repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
mirnylab/cooler
|
cooler/util.py
|
1
|
24268
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function
from collections import OrderedDict, defaultdict
from contextlib import contextmanager
import six
import re
import os
from pandas.api.types import is_scalar, is_integer
import numpy as np
import pandas as pd
import h5py
def partition(start, stop, step):
"""Partition an integer interval into equally-sized subintervals.
Like builtin :py:func:`range`, but yields pairs of end points.
Examples
--------
>>> for lo, hi in partition(0, 9, 2):
print(lo, hi)
0 2
2 4
4 6
6 8
8 9
"""
return ((i, min(i + step, stop)) for i in range(start, stop, step))
def parse_cooler_uri(s):
"""
Parse a Cooler URI string
e.g. /path/to/mycoolers.cool::/path/to/cooler
"""
parts = s.split("::")
if len(parts) == 1:
file_path, group_path = parts[0], "/"
elif len(parts) == 2:
file_path, group_path = parts
if not group_path.startswith("/"):
group_path = "/" + group_path
else:
raise ValueError("Invalid Cooler URI string")
return file_path, group_path
def atoi(s):
return int(s.replace(",", ""))
def parse_humanized(s):
_NUMERIC_RE = re.compile("([0-9,.]+)")
_, value, unit = _NUMERIC_RE.split(s.replace(",", ""))
if not len(unit):
return int(value)
value = float(value)
unit = unit.upper().strip()
if unit in ("K", "KB"):
value *= 1000
elif unit in ("M", "MB"):
value *= 1000000
elif unit in ("G", "GB"):
value *= 1000000000
else:
raise ValueError("Unknown unit '{}'".format(unit))
return int(value)
def parse_region_string(s):
"""
Parse a UCSC-style genomic region string into a triple.
Parameters
----------
s : str
UCSC-style string, e.g. "chr5:10,100,000-30,000,000". Ensembl and FASTA
style sequence names are allowed. End coordinate must be greater than
or equal to start.
Returns
-------
(str, int or None, int or None)
"""
def _tokenize(s):
token_spec = [
("HYPHEN", r"-"),
("COORD", r"[0-9,]+(\.[0-9]*)?(?:[a-z]+)?"),
("OTHER", r".+"),
]
tok_regex = r"\s*" + r"|\s*".join(r"(?P<%s>%s)" % pair for pair in token_spec)
tok_regex = re.compile(tok_regex, re.IGNORECASE)
for match in tok_regex.finditer(s):
typ = match.lastgroup
yield typ, match.group(typ)
def _check_token(typ, token, expected):
if typ is None:
raise ValueError("Expected {} token missing".format(" or ".join(expected)))
else:
if typ not in expected:
raise ValueError('Unexpected token "{}"'.format(token))
def _expect(tokens):
typ, token = next(tokens, (None, None))
_check_token(typ, token, ["COORD"])
start = parse_humanized(token)
typ, token = next(tokens, (None, None))
_check_token(typ, token, ["HYPHEN"])
typ, token = next(tokens, (None, None))
if typ is None:
return start, None
_check_token(typ, token, ["COORD"])
end = parse_humanized(token)
if end < start:
raise ValueError("End coordinate less than start")
return start, end
parts = s.split(":")
chrom = parts[0].strip()
if not len(chrom):
raise ValueError("Chromosome name cannot be empty")
if len(parts) < 2:
return (chrom, None, None)
start, end = _expect(_tokenize(parts[1]))
return (chrom, start, end)
def parse_region(reg, chromsizes=None):
"""
Genomic regions are represented as half-open intervals (0-based starts,
1-based ends) along the length coordinate of a contig/scaffold/chromosome.
Parameters
----------
reg : str or tuple
UCSC-style genomic region string, or
Triple (chrom, start, end), where ``start`` or ``end`` may be ``None``.
chromsizes : mapping, optional
Lookup table of scaffold lengths to check against ``chrom`` and the
``end`` coordinate. Required if ``end`` is not supplied.
Returns
-------
A well-formed genomic region triple (str, int, int)
"""
if isinstance(reg, six.string_types):
chrom, start, end = parse_region_string(reg)
else:
chrom, start, end = reg
start = int(start) if start is not None else start
end = int(end) if end is not None else end
try:
clen = chromsizes[chrom] if chromsizes is not None else None
except KeyError:
raise ValueError("Unknown sequence label: {}".format(chrom))
start = 0 if start is None else start
if end is None:
if clen is None: # TODO --- remove?
raise ValueError("Cannot determine end coordinate.")
end = clen
if end < start:
raise ValueError("End cannot be less than start")
if start < 0 or (clen is not None and end > clen):
raise ValueError("Genomic region out of bounds: [{}, {})".format(start, end))
return chrom, start, end
def natsort_key(s, _NS_REGEX=re.compile(r"(\d+)", re.U)):
return tuple([int(x) if x.isdigit() else x for x in _NS_REGEX.split(s) if x])
def natsorted(iterable):
return sorted(iterable, key=natsort_key)
def argnatsort(array):
array = np.asarray(array)
if not len(array):
return np.array([], dtype=int)
cols = tuple(zip(*(natsort_key(x) for x in array)))
return np.lexsort(cols[::-1])
def read_chromsizes(
filepath_or,
name_patterns=(r"^chr[0-9]+$", r"^chr[XY]$", r"^chrM$"),
all_names=False,
**kwargs
):
"""
Parse a ``<db>.chrom.sizes`` or ``<db>.chromInfo.txt`` file from the UCSC
database, where ``db`` is a genome assembly name.
Parameters
----------
filepath_or : str or file-like
Path or url to text file, or buffer.
name_patterns : sequence, optional
Sequence of regular expressions to capture desired sequence names.
Each corresponding set of records will be sorted in natural order.
all_names : bool, optional
Whether to return all contigs listed in the file. Default is
``False``.
Returns
-------
:py:class:`pandas.Series`
Series of integer bp lengths indexed by sequence name.
References
----------
* `UCSC assembly terminology <http://genome.ucsc.edu/FAQ/FAQdownloads.html#download9>`_
* `GRC assembly terminology <https://www.ncbi.nlm.nih.gov/grc/help/definitions>`_
"""
if isinstance(filepath_or, six.string_types) and filepath_or.endswith(".gz"):
kwargs.setdefault("compression", "gzip")
chromtable = pd.read_csv(
filepath_or,
sep="\t",
usecols=[0, 1],
names=["name", "length"],
dtype={"name": str},
**kwargs
)
if not all_names:
parts = []
for pattern in name_patterns:
part = chromtable[chromtable["name"].str.contains(pattern)]
part = part.iloc[argnatsort(part["name"])]
parts.append(part)
chromtable = pd.concat(parts, axis=0)
chromtable.index = chromtable["name"].values
return chromtable["length"]
def fetch_chromsizes(db, **kwargs):
"""
Download chromosome sizes from UCSC as a :py:class:`pandas.Series`, indexed
by chromosome label.
"""
return read_chromsizes(
"http://hgdownload.cse.ucsc.edu/goldenPath/{}/database/chromInfo.txt.gz".format(
db
),
**kwargs
)
def load_fasta(names, *filepaths):
"""
Load lazy FASTA records from one or multiple files without reading them
into memory.
Parameters
----------
names : sequence of str
Names of sequence records in FASTA file or files.
filepaths : str
Paths to one or more FASTA files to gather records from.
Returns
-------
OrderedDict of sequence name -> sequence record
"""
import pyfaidx
if len(filepaths) == 0:
raise ValueError("Need at least one file")
if len(filepaths) == 1:
fa = pyfaidx.Fasta(filepaths[0], as_raw=True)
else:
fa = {}
for filepath in filepaths:
fa.update(pyfaidx.Fasta(filepath, as_raw=True).records)
records = OrderedDict((chrom, fa[chrom]) for chrom in names)
return records
def binnify(chromsizes, binsize):
"""
Divide a genome into evenly sized bins.
Parameters
----------
chromsizes : Series
pandas Series indexed by chromosome name with chromosome lengths in bp.
binsize : int
size of bins in bp
Returns
-------
bins : :py:class:`pandas.DataFrame`
Dataframe with columns: ``chrom``, ``start``, ``end``.
"""
def _each(chrom):
clen = chromsizes[chrom]
n_bins = int(np.ceil(clen / binsize))
binedges = np.arange(0, (n_bins + 1)) * binsize
binedges[-1] = clen
return pd.DataFrame(
{"chrom": [chrom] * n_bins, "start": binedges[:-1], "end": binedges[1:]},
columns=["chrom", "start", "end"],
)
bintable = pd.concat(map(_each, chromsizes.keys()), axis=0, ignore_index=True)
bintable["chrom"] = pd.Categorical(
bintable["chrom"], categories=list(chromsizes.index), ordered=True
)
return bintable
make_bintable = binnify
def digest(fasta_records, enzyme):
"""
Divide a genome into restriction fragments.
Parameters
----------
fasta_records : OrderedDict
Dictionary of chromosome names to sequence records.
enzyme: str
Name of restriction enzyme (e.g., 'DpnII').
Returns
-------
frags : :py:class:`pandas.DataFrame`
Dataframe with columns: ``chrom``, ``start``, ``end``.
"""
try:
import Bio.Restriction as biorst
import Bio.Seq as bioseq
except ImportError:
raise ImportError("Biopython is required to find restriction fragments.")
# http://biopython.org/DIST/docs/cookbook/Restriction.html#mozTocId447698
chroms = fasta_records.keys()
try:
cut_finder = getattr(biorst, enzyme).search
except AttributeError:
raise ValueError("Unknown enzyme name: {}".format(enzyme))
def _each(chrom):
seq = bioseq.Seq(str(fasta_records[chrom]))
cuts = np.r_[0, np.array(cut_finder(seq)) + 1, len(seq)].astype(int)
n_frags = len(cuts) - 1
frags = pd.DataFrame(
{"chrom": [chrom] * n_frags, "start": cuts[:-1], "end": cuts[1:]},
columns=["chrom", "start", "end"],
)
return frags
return pd.concat(map(_each, chroms), axis=0, ignore_index=True)
def get_binsize(bins):
"""
Infer bin size from a bin DataFrame. Assumes that the last bin of each
contig is allowed to differ in size from the rest.
Returns
-------
int or None if bins are non-uniform
"""
sizes = set()
for chrom, group in bins.groupby("chrom"):
sizes.update((group["end"] - group["start"]).iloc[:-1].unique())
if len(sizes) > 1:
return None
if len(sizes) == 1:
return next(iter(sizes))
else:
return None
def get_chromsizes(bins):
"""
Infer chromsizes Series from a bin DataFrame. Assumes that the last bin of
each contig is allowed to differ in size from the rest.
Returns
-------
int or None if bins are non-uniform
"""
chromtable = (
bins.drop_duplicates(["chrom"], keep="last")[["chrom", "end"]]
.reset_index(drop=True)
.rename(columns={"chrom": "name", "end": "length"})
)
chroms, lengths = list(chromtable["name"]), list(chromtable["length"])
return pd.Series(index=chroms, data=lengths)
def bedslice(grouped, chromsizes, region):
"""
Range query on a BED-like dataframe with non-overlapping intervals.
"""
chrom, start, end = parse_region(region, chromsizes)
result = grouped.get_group(chrom)
if start > 0 or end < chromsizes[chrom]:
lo = result["end"].values.searchsorted(start, side="right")
hi = lo + result["start"].values[lo:].searchsorted(end, side="left")
result = result.iloc[lo:hi]
return result
def asarray_or_dataset(x):
return x if isinstance(x, h5py.Dataset) else np.asarray(x)
def rlencode(array, chunksize=None):
"""
Run length encoding.
Based on http://stackoverflow.com/a/32681075, which is based on the rle
function from R.
Parameters
----------
x : 1D array_like
Input array to encode
dropna: bool, optional
Drop all runs of NaNs.
Returns
-------
start positions, run lengths, run values
"""
where = np.flatnonzero
array = asarray_or_dataset(array)
n = len(array)
if n == 0:
return (
np.array([], dtype=int),
np.array([], dtype=int),
np.array([], dtype=array.dtype),
)
if chunksize is None:
chunksize = n
starts, values = [], []
last_val = np.nan
for i in range(0, n, chunksize):
x = array[i : i + chunksize]
locs = where(x[1:] != x[:-1]) + 1
if x[0] != last_val:
locs = np.r_[0, locs]
starts.append(i + locs)
values.append(x[locs])
last_val = x[-1]
starts = np.concatenate(starts)
lengths = np.diff(np.r_[starts, n])
values = np.concatenate(values)
return starts, lengths, values
def cmd_exists(cmd):
return any(
os.access(os.path.join(path, cmd), os.X_OK)
for path in os.environ["PATH"].split(os.pathsep)
)
def mad(data, axis=None):
return np.median(np.abs(data - np.median(data, axis)), axis)
@contextmanager
def open_hdf5(fp, mode="r", *args, **kwargs):
"""
Context manager like ``h5py.File`` but accepts already open HDF5 file
handles which do not get closed on teardown.
Parameters
----------
fp : str or ``h5py.File`` object
If an open file object is provided, it passes through unchanged,
provided that the requested mode is compatible.
If a filepath is passed, the context manager will close the file on
tear down.
mode : str
* r Readonly, file must exist
* r+ Read/write, file must exist
* a Read/write if exists, create otherwise
* w Truncate if exists, create otherwise
* w- or x Fail if exists, create otherwise
"""
if isinstance(fp, six.string_types):
own_fh = True
fh = h5py.File(fp, mode, *args, **kwargs)
else:
own_fh = False
if mode == "r" and fp.file.mode == "r+":
# warnings.warn("File object provided is writeable but intent is read-only")
pass
elif mode in ("r+", "a") and fp.file.mode == "r":
raise ValueError("File object provided is not writeable")
elif mode == "w":
raise ValueError("Cannot truncate open file")
elif mode in ("w-", "x"):
raise ValueError("File exists")
fh = fp
try:
yield fh
finally:
if own_fh:
fh.close()
class closing_hdf5(h5py.Group):
def __init__(self, grp):
super(closing_hdf5, self).__init__(grp.id)
def __enter__(self):
return self
def __exit__(self, *exc_info):
return self.file.close()
def close(self):
self.file.close()
def attrs_to_jsonable(attrs):
out = dict(attrs)
for k, v in attrs.items():
try:
out[k] = v.item()
except ValueError:
out[k] = v.tolist()
except AttributeError:
out[k] = v
return out
def infer_meta(x, index=None): # pragma: no cover
"""
Extracted and modified from dask/dataframe/utils.py :
make_meta (BSD licensed)
Create an empty pandas object containing the desired metadata.
Parameters
----------
x : dict, tuple, list, pd.Series, pd.DataFrame, pd.Index, dtype, scalar
To create a DataFrame, provide a `dict` mapping of `{name: dtype}`, or
an iterable of `(name, dtype)` tuples. To create a `Series`, provide a
tuple of `(name, dtype)`. If a pandas object, names, dtypes, and index
should match the desired output. If a dtype or scalar, a scalar of the
same dtype is returned.
index : pd.Index, optional
Any pandas index to use in the metadata. If none provided, a
`RangeIndex` will be used.
Examples
--------
>>> make_meta([('a', 'i8'), ('b', 'O')])
Empty DataFrame
Columns: [a, b]
Index: []
>>> make_meta(('a', 'f8'))
Series([], Name: a, dtype: float64)
>>> make_meta('i8')
1
"""
_simple_fake_mapping = {
"b": np.bool_(True),
"V": np.void(b" "),
"M": np.datetime64("1970-01-01"),
"m": np.timedelta64(1),
"S": np.str_("foo"),
"a": np.str_("foo"),
"U": np.unicode_("foo"),
"O": "foo",
}
UNKNOWN_CATEGORIES = "__UNKNOWN_CATEGORIES__"
def _scalar_from_dtype(dtype):
if dtype.kind in ("i", "f", "u"):
return dtype.type(1)
elif dtype.kind == "c":
return dtype.type(complex(1, 0))
elif dtype.kind in _simple_fake_mapping:
o = _simple_fake_mapping[dtype.kind]
return o.astype(dtype) if dtype.kind in ("m", "M") else o
else:
raise TypeError("Can't handle dtype: {0}".format(dtype))
def _nonempty_scalar(x):
if isinstance(x, (pd.Timestamp, pd.Timedelta, pd.Period)):
return x
elif np.isscalar(x):
dtype = x.dtype if hasattr(x, "dtype") else np.dtype(type(x))
return _scalar_from_dtype(dtype)
else:
raise TypeError(
"Can't handle meta of type " "'{0}'".format(type(x).__name__)
)
def _empty_series(name, dtype, index=None):
if isinstance(dtype, str) and dtype == "category":
return pd.Series(
pd.Categorical([UNKNOWN_CATEGORIES]), name=name, index=index
).iloc[:0]
return pd.Series([], dtype=dtype, name=name, index=index)
if hasattr(x, "_meta"):
return x._meta
if isinstance(x, (pd.Series, pd.DataFrame)):
return x.iloc[0:0]
elif isinstance(x, pd.Index):
return x[0:0]
index = index if index is None else index[0:0]
if isinstance(x, dict):
return pd.DataFrame(
{c: _empty_series(c, d, index=index) for (c, d) in x.items()}, index=index
)
if isinstance(x, tuple) and len(x) == 2:
return _empty_series(x[0], x[1], index=index)
elif isinstance(x, (list, tuple)):
if not all(isinstance(i, tuple) and len(i) == 2 for i in x):
raise ValueError(
"Expected iterable of tuples of (name, dtype), " "got {0}".format(x)
)
return pd.DataFrame(
{c: _empty_series(c, d, index=index) for (c, d) in x},
columns=[c for c, d in x],
index=index,
)
elif not hasattr(x, "dtype") and x is not None:
# could be a string, a dtype object, or a python type. Skip `None`,
# because it is implictly converted to `dtype('f8')`, which we don't
# want here.
try:
dtype = np.dtype(x)
return _scalar_from_dtype(dtype)
except: # noqa
# Continue on to next check
pass
if is_scalar(x):
return _nonempty_scalar(x)
raise TypeError("Don't know how to create metadata from {0}".format(x))
def get_meta(
columns, dtype=None, index_columns=None, index_names=None, default_dtype=np.object
): # pragma: no cover
"""
Extracted and modified from pandas/io/parsers.py :
_get_empty_meta (BSD licensed).
"""
columns = list(columns)
# Convert `dtype` to a defaultdict of some kind.
# This will enable us to write `dtype[col_name]`
# without worrying about KeyError issues later on.
if not isinstance(dtype, dict):
# if dtype == None, default will be default_dtype.
dtype = defaultdict(lambda: dtype or default_dtype)
else:
# Save a copy of the dictionary.
_dtype = dtype.copy()
dtype = defaultdict(lambda: default_dtype)
# Convert column indexes to column names.
for k, v in six.iteritems(_dtype):
col = columns[k] if is_integer(k) else k
dtype[col] = v
if index_columns is None or index_columns is False:
index = pd.Index([])
else:
data = [pd.Series([], dtype=dtype[name]) for name in index_names]
if len(data) == 1:
index = pd.Index(data[0], name=index_names[0])
else:
index = pd.MultiIndex.from_arrays(data, names=index_names)
index_columns.sort()
for i, n in enumerate(index_columns):
columns.pop(n - i)
col_dict = {col_name: pd.Series([], dtype=dtype[col_name]) for col_name in columns}
return pd.DataFrame(col_dict, columns=columns, index=index)
def check_bins(bins, chromsizes):
is_cat = pd.api.types.is_categorical(bins["chrom"])
bins = bins.copy()
if not is_cat:
bins["chrom"] = pd.Categorical(
bins.chrom, categories=list(chromsizes.index), ordered=True
)
else:
assert (bins["chrom"].cat.categories == chromsizes.index).all()
return bins
def balanced_partition(gs, n_chunk_max, file_contigs, loadings=None):
# n_bins = len(gs.bins)
grouped = gs._bins_grouped
chrom_nbins = grouped.size()
if loadings is None:
loadings = chrom_nbins
chrmax = loadings.idxmax()
loadings = loadings / loadings.loc[chrmax]
const = chrom_nbins.loc[chrmax] / n_chunk_max
granges = []
for chrom, group in grouped:
if chrom not in file_contigs:
continue
clen = gs.chromsizes[chrom]
step = int(np.ceil(const / loadings.loc[chrom]))
anchors = group.start.values[::step]
if anchors[-1] != clen:
anchors = np.r_[anchors, clen]
granges.extend(
(chrom, start, end) for start, end in zip(anchors[:-1], anchors[1:])
)
return granges
class GenomeSegmentation(object):
def __init__(self, chromsizes, bins):
bins = check_bins(bins, chromsizes)
self._bins_grouped = bins.groupby("chrom", sort=False)
nbins_per_chrom = self._bins_grouped.size().values
self.chromsizes = chromsizes
self.binsize = get_binsize(bins)
self.contigs = list(chromsizes.keys())
self.bins = bins
self.idmap = pd.Series(index=chromsizes.keys(), data=range(len(chromsizes)))
self.chrom_binoffset = np.r_[0, np.cumsum(nbins_per_chrom)]
self.chrom_abspos = np.r_[0, np.cumsum(chromsizes.values)]
self.start_abspos = (
self.chrom_abspos[bins["chrom"].cat.codes] + bins["start"].values
)
def fetch(self, region):
chrom, start, end = parse_region(region, self.chromsizes)
result = self._bins_grouped.get_group(chrom)
if start > 0 or end < self.chromsizes[chrom]:
lo = result["end"].values.searchsorted(start, side="right")
hi = lo + result["start"].values[lo:].searchsorted(end, side="left")
result = result.iloc[lo:hi]
return result
def buffered(chunks, size=10000000):
"""
Take an incoming iterator of small data frame chunks and buffer them into
an outgoing iterator of larger chunks.
Parameters
----------
chunks : iterator of :py:class:`pandas.DataFrame`
Each chunk should have the same column names.
size : int
Minimum length of output chunks.
Yields
------
Larger outgoing :py:class:`pandas.DataFrame` chunks made from concatenating
the incoming ones.
"""
buf = []
n = 0
for chunk in chunks:
n += len(chunk)
buf.append(chunk)
if n > size:
yield pd.concat(buf, axis=0)
buf = []
n = 0
if len(buf):
yield pd.concat(buf, axis=0)
|
bsd-3-clause
|
vivekmishra1991/scikit-learn
|
examples/tree/plot_iris.py
|
271
|
2186
|
"""
================================================================
Plot the decision surface of a decision tree on the iris dataset
================================================================
Plot the decision surface of a decision tree trained on pairs
of features of the iris dataset.
See :ref:`decision tree <tree>` for more information on the estimator.
For each pair of iris features, the decision tree learns decision
boundaries made of combinations of simple thresholding rules inferred from
the training samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
plot_colors = "bry"
plot_step = 0.02
# Load data
iris = load_iris()
for pairidx, pair in enumerate([[0, 1], [0, 2], [0, 3],
[1, 2], [1, 3], [2, 3]]):
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = DecisionTreeClassifier().fit(X, y)
# Plot the decision boundary
plt.subplot(2, 3, pairidx + 1)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.xlabel(iris.feature_names[pair[0]])
plt.ylabel(iris.feature_names[pair[1]])
plt.axis("tight")
# Plot the training points
for i, color in zip(range(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.axis("tight")
plt.suptitle("Decision surface of a decision tree using paired features")
plt.legend()
plt.show()
|
bsd-3-clause
|
rhyolight/nupic.research
|
projects/sequence_prediction/continuous_sequence/run_tm_model.py
|
4
|
16413
|
## ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import importlib
from optparse import OptionParser
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.model_factory import ModelFactory
from nupic.frameworks.opf.prediction_metrics_manager import MetricsManager
from nupic.frameworks.opf import metrics
# from htmresearch.frameworks.opf.clamodel_custom import CLAModel_custom
import nupic_output
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import yaml
from htmresearch.support.sequence_learning_utils import *
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
rcParams['pdf.fonttype'] = 42
plt.ion()
DATA_DIR = "./data"
MODEL_PARAMS_DIR = "./model_params"
def getMetricSpecs(predictedField, stepsAhead=5):
_METRIC_SPECS = (
MetricSpec(field=predictedField, metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'errorMetric': 'negativeLogLikelihood',
'window': 1000, 'steps': stepsAhead}),
MetricSpec(field=predictedField, metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'errorMetric': 'nrmse', 'window': 1000,
'steps': stepsAhead}),
)
return _METRIC_SPECS
def createModel(modelParams):
model = ModelFactory.create(modelParams)
model.enableInference({"predictedField": predictedField})
return model
def getModelParamsFromName(dataSet):
if (dataSet == "nyc_taxi" or
dataSet == "nyc_taxi_perturb" or
dataSet == "nyc_taxi_perturb_baseline"):
importedModelParams = yaml.safe_load(open('model_params/nyc_taxi_model_params.yaml'))
else:
raise Exception("No model params exist for {}".format(dataSet))
return importedModelParams
def _getArgs():
parser = OptionParser(usage="%prog PARAMS_DIR OUTPUT_DIR [options]"
"\n\nCompare TM performance with trivial predictor using "
"model outputs in prediction directory "
"and outputting results to result directory.")
parser.add_option("-d",
"--dataSet",
type=str,
default='nyc_taxi',
dest="dataSet",
help="DataSet Name, choose from rec-center-hourly, nyc_taxi")
parser.add_option("-p",
"--plot",
default=False,
dest="plot",
help="Set to True to plot result")
parser.add_option("--stepsAhead",
help="How many steps ahead to predict. [default: %default]",
default=5,
type=int)
parser.add_option("-c",
"--classifier",
type=str,
default='SDRClassifierRegion',
dest="classifier",
help="Classifier Type: SDRClassifierRegion or CLAClassifierRegion")
(options, remainder) = parser.parse_args()
print options
return options, remainder
def getInputRecord(df, predictedField, i):
inputRecord = {
predictedField: float(df[predictedField][i]),
"timeofday": float(df["timeofday"][i]),
"dayofweek": float(df["dayofweek"][i]),
}
return inputRecord
def printTPRegionParams(tpregion):
"""
Note: assumes we are using TemporalMemory/TPShim in the TPRegion
"""
tm = tpregion.getSelf()._tfdr
print "------------PY TemporalMemory Parameters ------------------"
print "numberOfCols =", tm.getColumnDimensions()
print "cellsPerColumn =", tm.getCellsPerColumn()
print "minThreshold =", tm.getMinThreshold()
print "activationThreshold =", tm.getActivationThreshold()
print "newSynapseCount =", tm.getMaxNewSynapseCount()
print "initialPerm =", tm.getInitialPermanence()
print "connectedPerm =", tm.getConnectedPermanence()
print "permanenceInc =", tm.getPermanenceIncrement()
print "permanenceDec =", tm.getPermanenceDecrement()
print "predictedSegmentDecrement=", tm.getPredictedSegmentDecrement()
print
def runMultiplePass(df, model, nMultiplePass, nTrain):
"""
run CLA model through data record 0:nTrain nMultiplePass passes
"""
predictedField = model.getInferenceArgs()['predictedField']
print "run TM through the train data multiple times"
for nPass in xrange(nMultiplePass):
for j in xrange(nTrain):
inputRecord = getInputRecord(df, predictedField, j)
result = model.run(inputRecord)
if j % 100 == 0:
print " pass %i, record %i" % (nPass, j)
# reset temporal memory
model._getTPRegion().getSelf()._tfdr.reset()
return model
def runMultiplePassSPonly(df, model, nMultiplePass, nTrain):
"""
run CLA model SP through data record 0:nTrain nMultiplePass passes
"""
predictedField = model.getInferenceArgs()['predictedField']
print "run TM through the train data multiple times"
for nPass in xrange(nMultiplePass):
for j in xrange(nTrain):
inputRecord = getInputRecord(df, predictedField, j)
model._sensorCompute(inputRecord)
model._spCompute()
if j % 400 == 0:
print " pass %i, record %i" % (nPass, j)
return model
def movingAverage(a, n):
movingAverage = []
for i in xrange(len(a)):
start = max(0, i - n)
values = a[start:i+1]
movingAverage.append(sum(values) / float(len(values)))
return movingAverage
if __name__ == "__main__":
(_options, _args) = _getArgs()
dataSet = _options.dataSet
plot = _options.plot
classifierType = _options.classifier
if dataSet == "rec-center-hourly":
DATE_FORMAT = "%m/%d/%y %H:%M" # '7/2/10 0:00'
predictedField = "kw_energy_consumption"
elif dataSet == "nyc_taxi" or dataSet == "nyc_taxi_perturb" or dataSet =="nyc_taxi_perturb_baseline":
DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
predictedField = "passenger_count"
else:
raise RuntimeError("un recognized dataset")
modelParams = getModelParamsFromName(dataSet)
modelParams['modelParams']['clParams']['steps'] = str(_options.stepsAhead)
modelParams['modelParams']['clParams']['regionName'] = classifierType
print "Creating model from %s..." % dataSet
# use customized CLA model
model = ModelFactory.create(modelParams)
model.enableInference({"predictedField": predictedField})
model.enableLearning()
model._spLearningEnabled = True
model._tpLearningEnabled = True
printTPRegionParams(model._getTPRegion())
inputData = "%s/%s.csv" % (DATA_DIR, dataSet.replace(" ", "_"))
sensor = model._getSensorRegion()
encoderList = sensor.getSelf().encoder.getEncoderList()
if sensor.getSelf().disabledEncoder is not None:
classifier_encoder = sensor.getSelf().disabledEncoder.getEncoderList()
classifier_encoder = classifier_encoder[0]
else:
classifier_encoder = None
_METRIC_SPECS = getMetricSpecs(predictedField, stepsAhead=_options.stepsAhead)
metric = metrics.getModule(_METRIC_SPECS[0])
metricsManager = MetricsManager(_METRIC_SPECS, model.getFieldInfo(),
model.getInferenceType())
if plot:
plotCount = 1
plotHeight = max(plotCount * 3, 6)
fig = plt.figure(figsize=(14, plotHeight))
gs = gridspec.GridSpec(plotCount, 1)
plt.title(predictedField)
plt.ylabel('Data')
plt.xlabel('Timed')
plt.tight_layout()
plt.ion()
print "Load dataset: ", dataSet
df = pd.read_csv(inputData, header=0, skiprows=[1, 2])
nMultiplePass = 5
nTrain = 5000
print " run SP through the first %i samples %i passes " %(nMultiplePass, nTrain)
model = runMultiplePassSPonly(df, model, nMultiplePass, nTrain)
model._spLearningEnabled = False
maxBucket = classifier_encoder.n - classifier_encoder.w + 1
likelihoodsVecAll = np.zeros((maxBucket, len(df)))
prediction_nstep = None
time_step = []
actual_data = []
patternNZ_track = []
predict_data = np.zeros((_options.stepsAhead, 0))
predict_data_ML = []
negLL_track = []
activeCellNum = []
predCellNum = []
predSegmentNum = []
predictedActiveColumnsNum = []
trueBucketIndex = []
sp = model._getSPRegion().getSelf()._sfdr
spActiveCellsCount = np.zeros(sp.getColumnDimensions())
output = nupic_output.NuPICFileOutput([dataSet])
for i in xrange(len(df)):
inputRecord = getInputRecord(df, predictedField, i)
tp = model._getTPRegion()
tm = tp.getSelf()._tfdr
prePredictiveCells = tm.getPredictiveCells()
prePredictiveColumn = np.array(list(prePredictiveCells)) / tm.cellsPerColumn
result = model.run(inputRecord)
trueBucketIndex.append(model._getClassifierInputRecord(inputRecord).bucketIndex)
predSegmentNum.append(len(tm.activeSegments))
sp = model._getSPRegion().getSelf()._sfdr
spOutput = model._getSPRegion().getOutputData('bottomUpOut')
spActiveCellsCount[spOutput.nonzero()[0]] += 1
activeDutyCycle = np.zeros(sp.getColumnDimensions(), dtype=np.float32)
sp.getActiveDutyCycles(activeDutyCycle)
overlapDutyCycle = np.zeros(sp.getColumnDimensions(), dtype=np.float32)
sp.getOverlapDutyCycles(overlapDutyCycle)
if i % 100 == 0 and i > 0:
plt.figure(1)
plt.clf()
plt.subplot(2, 2, 1)
plt.hist(overlapDutyCycle)
plt.xlabel('overlapDutyCycle')
plt.subplot(2, 2, 2)
plt.hist(activeDutyCycle)
plt.xlabel('activeDutyCycle-1000')
plt.subplot(2, 2, 3)
plt.hist(spActiveCellsCount)
plt.xlabel('activeDutyCycle-Total')
plt.draw()
tp = model._getTPRegion()
tm = tp.getSelf()._tfdr
tpOutput = tm.infActiveState['t']
predictiveCells = tm.getPredictiveCells()
predCellNum.append(len(predictiveCells))
predColumn = np.array(list(predictiveCells))/ tm.cellsPerColumn
patternNZ = tpOutput.reshape(-1).nonzero()[0]
activeColumn = patternNZ / tm.cellsPerColumn
activeCellNum.append(len(patternNZ))
predictedActiveColumns = np.intersect1d(prePredictiveColumn, activeColumn)
predictedActiveColumnsNum.append(len(predictedActiveColumns))
result.metrics = metricsManager.update(result)
negLL = result.metrics["multiStepBestPredictions:multiStep:"
"errorMetric='negativeLogLikelihood':steps=%d:window=1000:"
"field=%s"%(_options.stepsAhead, predictedField)]
if i % 100 == 0 and i>0:
negLL = result.metrics["multiStepBestPredictions:multiStep:"
"errorMetric='negativeLogLikelihood':steps=%d:window=1000:"
"field=%s"%(_options.stepsAhead, predictedField)]
nrmse = result.metrics["multiStepBestPredictions:multiStep:"
"errorMetric='nrmse':steps=%d:window=1000:"
"field=%s"%(_options.stepsAhead, predictedField)]
numActiveCell = np.mean(activeCellNum[-100:])
numPredictiveCells = np.mean(predCellNum[-100:])
numCorrectPredicted = np.mean(predictedActiveColumnsNum[-100:])
print "After %i records, %d-step negLL=%f nrmse=%f ActiveCell %f PredCol %f CorrectPredCol %f" % \
(i, _options.stepsAhead, negLL, nrmse, numActiveCell,
numPredictiveCells, numCorrectPredicted)
last_prediction = prediction_nstep
prediction_nstep = \
result.inferences["multiStepBestPredictions"][_options.stepsAhead]
output.write([i], [inputRecord[predictedField]], [float(prediction_nstep)])
bucketLL = \
result.inferences['multiStepBucketLikelihoods'][_options.stepsAhead]
likelihoodsVec = np.zeros((maxBucket,))
if bucketLL is not None:
for (k, v) in bucketLL.items():
likelihoodsVec[k] = v
time_step.append(i)
actual_data.append(inputRecord[predictedField])
predict_data_ML.append(
result.inferences['multiStepBestPredictions'][_options.stepsAhead])
negLL_track.append(negLL)
likelihoodsVecAll[0:len(likelihoodsVec), i] = likelihoodsVec
if plot and i > 500:
# prepare data for display
if i > 100:
time_step_display = time_step[-500:-_options.stepsAhead]
actual_data_display = actual_data[-500+_options.stepsAhead:]
predict_data_ML_display = predict_data_ML[-500:-_options.stepsAhead]
likelihood_display = likelihoodsVecAll[:, i-499:i-_options.stepsAhead+1]
xl = [(i)-500, (i)]
else:
time_step_display = time_step
actual_data_display = actual_data
predict_data_ML_display = predict_data_ML
likelihood_display = likelihoodsVecAll[:, :i+1]
xl = [0, (i)]
plt.figure(2)
plt.clf()
plt.imshow(likelihood_display,
extent=(time_step_display[0], time_step_display[-1], 0, 40000),
interpolation='nearest', aspect='auto',
origin='lower', cmap='Reds')
plt.colorbar()
plt.plot(time_step_display, actual_data_display, 'k', label='Data')
plt.plot(time_step_display, predict_data_ML_display, 'b', label='Best Prediction')
plt.xlim(xl)
plt.xlabel('Time')
plt.ylabel('Prediction')
# plt.title('TM, useTimeOfDay='+str(True)+' '+dataSet+' test neg LL = '+str(np.nanmean(negLL)))
plt.xlim([17020, 17300])
plt.ylim([0, 30000])
plt.clim([0, 1])
plt.draw()
predData_TM_n_step = np.roll(np.array(predict_data_ML), _options.stepsAhead)
nTest = len(actual_data) - nTrain - _options.stepsAhead
NRMSE_TM = NRMSE(actual_data[nTrain:nTrain+nTest], predData_TM_n_step[nTrain:nTrain+nTest])
print "NRMSE on test data: ", NRMSE_TM
output.close()
# calculate neg-likelihood
predictions = np.transpose(likelihoodsVecAll)
truth = np.roll(actual_data, -5)
from nupic.encoders.scalar import ScalarEncoder as NupicScalarEncoder
encoder = NupicScalarEncoder(w=1, minval=0, maxval=40000, n=22, forced=True)
from plot import computeLikelihood, plotAccuracy
bucketIndex2 = []
negLL = []
minProb = 0.0001
for i in xrange(len(truth)):
bucketIndex2.append(np.where(encoder.encode(truth[i]))[0])
outOfBucketProb = 1 - sum(predictions[i,:])
prob = predictions[i, bucketIndex2[i]]
if prob == 0:
prob = outOfBucketProb
if prob < minProb:
prob = minProb
negLL.append( -np.log(prob))
negLL = computeLikelihood(predictions, truth, encoder)
negLL[:5000] = np.nan
x = range(len(negLL))
plt.figure()
plotAccuracy((negLL, x), truth, window=480, errorType='negLL')
np.save('./result/'+dataSet+classifierType+'TMprediction.npy', predictions)
np.save('./result/'+dataSet+classifierType+'TMtruth.npy', truth)
plt.figure()
activeCellNumAvg = movingAverage(activeCellNum, 100)
plt.plot(np.array(activeCellNumAvg)/tm.numberOfCells())
plt.xlabel('data records')
plt.ylabel('sparsity')
plt.xlim([0, 5000])
plt.savefig('result/sparsity_over_training.pdf')
plt.figure()
predCellNumAvg = movingAverage(predCellNum, 100)
predSegmentNumAvg = movingAverage(predSegmentNum, 100)
# plt.plot(np.array(predCellNumAvg))
plt.plot(np.array(predSegmentNumAvg),'r', label='NMDA spike')
plt.plot(activeCellNumAvg,'b', label='spikes')
plt.xlabel('data records')
plt.ylabel('NMDA spike #')
plt.legend()
plt.xlim([0, 5000])
plt.ylim([0, 42])
plt.savefig('result/nmda_spike_over_training.pdf')
|
gpl-3.0
|
NelisVerhoef/scikit-learn
|
sklearn/linear_model/tests/test_bayes.py
|
299
|
1770
|
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn import datasets
from sklearn.utils.testing import assert_array_almost_equal
def test_bayesian_on_diabetes():
# Test BayesianRidge on diabetes
raise SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_toy_bayesian_ridge_object():
# Test BayesianRidge on toy
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_toy_ard_object():
# Test BayesianRegression ARD classifier
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
|
bsd-3-clause
|
shankari/e-mission-server
|
emission/analysis/classification/inference/mode/seed/section_features.py
|
2
|
16550
|
# Standard imports
import math
import logging
import numpy as np
import utm
from sklearn.cluster import DBSCAN
# Our imports
import emission.core.get_database as edb
from emission.core.get_database import get_section_db, get_mode_db, get_routeCluster_db,get_transit_db
from emission.core.common import calDistance, Include_place_2
from emission.analysis.modelling.tour_model.trajectory_matching.route_matching import getRoute,fullMatchDistance,matchTransitRoutes,matchTransitStops
Sections = get_section_db()
from pymongo import MongoClient
BackupSections = MongoClient(edb.url).Backup_database.Stage_Sections
Modes = get_mode_db()
# The speed is in m/s
def calSpeed(trackpoint1, trackpoint2):
from dateutil import parser
distanceDelta = calDistance(trackpoint1['track_location']['coordinates'],
trackpoint2['track_location']['coordinates'])
timeDelta = parser.parse(trackpoint2['time']) - parser.parse(trackpoint1['time'])
# logging.debug("while calculating speed form %s -> %s, distanceDelta = %s, timeDelta = %s" %
# (trackpoint1, trackpoint2, distanceDelta, timeDelta))
if timeDelta.total_seconds() != 0:
return distanceDelta / timeDelta.total_seconds()
else:
return None
# This formula is from:
# http://www.movable-type.co.uk/scripts/latlong.html
# It returns the heading between two points using
def calHeading(point1, point2):
# points are in GeoJSON format, ie (lng, lat)
phi1 = math.radians(point1[1])
phi2 = math.radians(point2[1])
lambda1 = math.radians(point1[0])
lambda2 = math.radians(point2[0])
y = math.sin(lambda2-lambda1) * math.cos(phi2)
x = math.cos(phi1)*math.sin(phi2) - \
math.sin(phi1)*math.cos(phi2)*math.cos(lambda2-lambda1)
brng = math.degrees(math.atan2(y, x))
return brng
def calHC(point1, point2, point3):
HC = calHeading(point2, point3) - calHeading(point1, point2)
return HC
def calHCR(segment):
trackpoints = segment['track_points']
if len(trackpoints) < 3:
return 0
else:
HCNum = 0
for (i, point) in enumerate(trackpoints[:-2]):
currPoint = point
nextPoint = trackpoints[i+1]
nexNextPt = trackpoints[i+2]
HC = calHC(currPoint['track_location']['coordinates'], nextPoint['track_location']['coordinates'], \
nexNextPt['track_location']['coordinates'])
if HC >= 15:
HCNum += 1
segmentDist = segment['distance']
if segmentDist!= None and segmentDist != 0:
HCR = HCNum/segmentDist
return HCR
else:
return 0
def calSR(segment):
trackpoints = segment['track_points']
if len(trackpoints) < 2:
return 0
else:
stopNum = 0
for (i, point) in enumerate(trackpoints[:-1]):
currPoint = point
nextPoint = trackpoints[i+1]
currVelocity = calSpeed(currPoint, nextPoint)
if currVelocity != None and currVelocity <= 0.75:
stopNum += 1
segmentDist = segment['distance']
if segmentDist != None and segmentDist != 0:
return stopNum/segmentDist
else:
return 0
def calVCR(segment):
trackpoints = segment['track_points']
if len(trackpoints) < 3:
return 0
else:
Pv = 0
for (i, point) in enumerate(trackpoints[:-2]):
currPoint = point
nextPoint = trackpoints[i+1]
nexNextPt = trackpoints[i+2]
velocity1 = calSpeed(currPoint, nextPoint)
velocity2 = calSpeed(nextPoint, nexNextPt)
if velocity1 != None and velocity2 != None:
if velocity1 != 0:
VC = abs(velocity2 - velocity1)/velocity1
else:
VC = 0
else:
VC = 0
if VC > 0.7:
Pv += 1
segmentDist = segment['distance']
if segmentDist != None and segmentDist != 0:
return Pv/segmentDist
else:
return 0
def calSegmentDistance(segment):
return segment['distance']
def calSpeeds(segment):
trackpoints = segment['track_points']
if len(trackpoints) == 0:
return None
return calSpeedsForList(trackpoints)
def calSpeedsForList(trackpoints):
speeds = np.zeros(len(trackpoints) - 1)
for (i, point) in enumerate(trackpoints[:-1]):
currPoint = point
nextPoint = trackpoints[i+1]
currSpeed = calSpeed(currPoint, nextPoint)
if currSpeed != None:
speeds[i] = currSpeed
# logging.debug("Returning vector of length %s while calculating speeds for trackpoints of length %s " % (speeds.shape, len(trackpoints)))
return speeds
def calAvgSpeed(segment):
timeDelta = segment['section_end_datetime'] - segment['section_start_datetime']
if timeDelta.total_seconds() != 0:
return segment['distance'] / timeDelta.total_seconds()
else:
return None
# In order to calculate the acceleration, we do the following.
# point0: (loc0, t0), point1: (loc1, t1), point2: (loc2, t2), point3: (loc3, t3)
# becomes
# speed0: ((loc1 - loc0) / (t1 - t0)), speed1: ((loc2 - loc1) / (t2-t1)),
# speed2: ((loc3 - loc2) / (t3 - t2)
# becomes
# segment0: speed0 / (t1 - t0), segment1: (speed1 - speed0)/(t2-t1),
# segment2: (speed2 - speed1) / (t3-t2)
def calAccels(segment):
from dateutil import parser
speeds = calSpeeds(segment)
trackpoints = segment['track_points']
if speeds is None or len(speeds) == 0:
return None
accel = np.zeros(len(speeds) - 1)
prevSpeed = 0
for (i, speed) in enumerate(speeds[0:-1]):
currSpeed = speed # speed0
speedDelta = currSpeed - prevSpeed # (speed0 - 0)
# t1 - t0
timeDelta = parser.parse(trackpoints[i+1]['time']) - parser.parse(trackpoints[i]['time'])
# logging.debug("while calculating accels from %s -> %s, speedDelta = %s, timeDelta = %s" %
# (trackpoints[i+1], trackpoints[i], speedDelta, timeDelta))
if timeDelta.total_seconds() != 0:
accel[i] = speedDelta/(timeDelta.total_seconds())
# logging.debug("resulting acceleration is %s" % accel[i])
prevSpeed = currSpeed
return accel
def getIthMaxSpeed(segment, i):
# python does not appear to have a built-in mechanism for returning the top
# ith max. We would need to write our own, possibly by sorting. Since it is
# not clear whether we ever actually need this (the paper does not explain
# which i they used), we just return the max.
assert(i == 1)
speeds = calSpeeds(segment)
return np.amax(speeds)
def getIthMaxAccel(segment, i):
# python does not appear to have a built-in mechanism for returning the top
# ith max. We would need to write our own, possibly by sorting. Since it is
# not clear whether we ever actually need this (the paper does not explain
# which i they used), we just return the max.
assert(i == 1)
accels = calAccels(segment)
return np.amax(accels)
def calSpeedDistParams(speeds):
return (np.mean(speeds), np.std(speeds))
# def user_tran_mat(user):
# user_sections=[]
# # print(tran_mat)
# query = {"$and": [{'type': 'move'},{'user_id':user},\
# {'$or': [{'confirmed_mode':1}, {'confirmed_mode':3},\
# {'confirmed_mode':5},{'confirmed_mode':6},{'confirmed_mode':7}]}]}
# # print(Sections.count_documents(query))
# for section in Sections.find(query).sort("section_start_datetime",1):
# user_sections.append(section)
# if Sections.count_documents(query)>=2:
# tran_mat=np.zeros([Modes.estimated_document_count(), Modes.estimated_document_count()])
# for i in range(len(user_sections)-1):
# if (user_sections[i+1]['section_start_datetime']-user_sections[i]['section_end_datetime']).seconds<=60:
# # print(user_sections[i+1]['section_start_datetime'],user_sections[i]['section_end_datetime'])
# fore_mode=user_sections[i]["confirmed_mode"]
# after_mode=user_sections[i+1]["confirmed_mode"]
# tran_mat[fore_mode-1,after_mode-1]+=1
# row_sums = tran_mat.sum(axis=1)
# new_mat = tran_mat / row_sums[:, np.newaxis]
# return new_mat
# else:
# return None
#
# # all model
# def all_tran_mat():
# tran_mat=np.zeros([Modes.estimated_document_count(), Modes.estimated_document_count()])
# for user in Sections.distinct("user_id"):
# user_sections=[]
# # print(tran_mat)
# query = {"$and": [{'type': 'move'},{'user_id':user},\
# {'$or': [{'confirmed_mode':1}, {'confirmed_mode':3},\
# {'confirmed_mode':5},{'confirmed_mode':6},{'confirmed_mode':7}]}]}
# # print(Sections.count_documents(query))
# for section in Sections.find(query).sort("section_start_datetime",1):
# user_sections.append(section)
# if Sections.count_documents(query)>=2:
# for i in range(len(user_sections)-1):
# if (user_sections[i+1]['section_start_datetime']-user_sections[i]['section_end_datetime']).seconds<=60:
# # print(user_sections[i+1]['section_start_datetime'],user_sections[i]['section_end_datetime'])
# fore_mode=user_sections[i]["confirmed_mode"]
# after_mode=user_sections[i+1]["confirmed_mode"]
# tran_mat[fore_mode-1,after_mode-1]+=1
# row_sums = tran_mat.sum(axis=1)
# new_mat = tran_mat / row_sums[:, np.newaxis]
# return new_mat
def mode_cluster(mode,eps,sam):
mode_change_pnts=[]
# print(tran_mat)
query = {"$and": [{'type': 'move'},\
{'confirmed_mode':mode}]}
# print(Sections.count_documents(query))
logging.debug("Trying to find cluster locations for %s trips" % (Sections.count_documents(query)))
for section in Sections.find(query).sort("section_start_datetime",1):
try:
mode_change_pnts.append(section['section_start_point']['coordinates'])
mode_change_pnts.append(section['section_end_point']['coordinates'])
except:
logging.warn("Found trip %s with missing start and/or end points" % (section['_id']))
pass
for section in BackupSections.find(query).sort("section_start_datetime",1):
try:
mode_change_pnts.append(section['section_start_point']['coordinates'])
mode_change_pnts.append(section['section_end_point']['coordinates'])
except:
logging.warn("Found trip %s with missing start and/or end points" % (section['_id']))
pass
# print(user_change_pnts)
# print(len(mode_change_pnts))
if len(mode_change_pnts) == 0:
logging.debug("No points found in cluster input, nothing to fit..")
return np.zeros(0)
if len(mode_change_pnts)>=1:
# print(mode_change_pnts)
np_points=np.array(mode_change_pnts)
# print(np_points[:,0])
# fig, axes = plt.subplots(1, 1)
# axes.scatter(np_points[:,0], np_points[:,1])
# plt.show()
else:
pass
utm_x = []
utm_y = []
for row in mode_change_pnts:
# GEOJSON order is lng, lat
try:
utm_loc = utm.from_latlon(row[1],row[0])
utm_x = np.append(utm_x,utm_loc[0])
utm_y = np.append(utm_y,utm_loc[1])
except utm.error.OutOfRangeError as oore:
logging.warning("Found OutOfRangeError while converting=%s, swapping" % row)
utm_loc = utm.from_latlon(row[0],row[1])
utm_x = np.append(utm_x,utm_loc[1])
utm_y = np.append(utm_y,utm_loc[0])
utm_location = np.column_stack((utm_x,utm_y))
db = DBSCAN(eps=eps,min_samples=sam)
db_fit = db.fit(utm_location)
db_labels = db_fit.labels_
#print db_labels
new_db_labels = db_labels[db_labels!=-1]
new_location = np_points[db_labels!=-1]
# print len(new_db_labels)
# print len(new_location)
# print new_information
label_unique = np.unique(new_db_labels)
cluster_center = np.zeros((len(label_unique),2))
for label in label_unique:
sub_location = new_location[new_db_labels==label]
temp_center = np.mean(sub_location,axis=0)
cluster_center[int(label)] = temp_center
# print cluster_center
return cluster_center
#
# print(mode_cluster(6))
def mode_start_end_coverage(segment,cluster,eps):
mode_change_pnts=[]
# print(tran_mat)
num_sec=0
centers=cluster
# print(centers)
try:
if Include_place_2(centers,segment['section_start_point']['coordinates'],eps) and \
Include_place_2(centers,segment['section_end_point']['coordinates'],eps):
return 1
else:
return 0
except:
return 0
# print(mode_start_end_coverage(5,105,2))
# print(mode_start_end_coverage(6,600,2))
# This is currently only used in this file, so it is fine to use only really
# user confirmed modes. We don't want to learn on trips where we don't have
# ground truth.
def get_mode_share_by_count(lst):
# input here is a list of sections
displayModeList = getDisplayModes()
# logging.debug(displayModeList)
modeCountMap = {}
for mode in displayModeList:
modeCountMap[mode['mode_name']] = 0
for section in lst:
if section['confirmed_mode']==mode['mode_id']:
modeCountMap[mode['mode_name']] +=1
elif section['mode']==mode['mode_id']:
modeCountMap[mode['mode_name']] +=1
return modeCountMap
# This is currently only used in this file, so it is fine to use only really
# user confirmed modes. We don't want to learn on trips where we don't have
# ground truth.
def get_mode_share_by_count(list_idx):
Sections=get_section_db()
## takes a list of idx's
AllModeList = getAllModes()
MODE = {}
MODE2= {}
for mode in AllModeList:
MODE[mode['mode_id']]=0
for _id in list_idx:
section=Sections.find_one({'_id': _id})
if section is None:
section=BackupSections.find_one({'id': _id})
mode_id = section['confirmed_mode']
try:
MODE[mode_id] += 1
except KeyError:
MODE[mode_id] = 1
# print(sum(MODE.values()))
if sum(MODE.values())==0:
for mode in AllModeList:
MODE2[mode['mode_id']]=0
# print(MODE2)
else:
for mode in AllModeList:
MODE2[mode['mode_id']]=MODE[mode['mode_id']]/sum(MODE.values())
return MODE2
def cluster_route_match_score(segment,step1=100000,step2=100000,method='lcs',radius1=2000,threshold=0.5):
userRouteClusters=get_routeCluster_db().find_one({'$and':[{'user':segment['user_id']},{'method':method}]})['clusters']
route_seg = getRoute(segment['_id'])
dis=999999
medoid_ids=userRouteClusters.keys()
if len(medoid_ids)!=0:
choice=medoid_ids[0]
for idx in userRouteClusters.keys():
route_idx=getRoute(idx)
try:
dis_new=fullMatchDistance(route_seg,route_idx,step1,step2,method,radius1)
except RuntimeError:
dis_new=999999
if dis_new<dis:
dis=dis_new
choice=idx
# print(dis)
# print(userRouteClusters[choice])
if dis<=threshold:
cluster=userRouteClusters[choice]
cluster.append(choice)
ModePerc=get_mode_share_by_count(cluster)
else:
ModePerc=get_mode_share_by_count([])
return ModePerc
def transit_route_match_score(segment,step1=100000,step2=100000,method='lcs',radius1=2500,threshold=0.5):
Transits=get_transit_db()
transitMatch={}
route_seg=getRoute(segment['_id'])
for type in Transits.distinct('type'):
for entry in Transits.find({'type':type}):
transitMatch[type]=matchTransitRoutes(route_seg,entry['stops'],step1,step2,method,radius1,threshold)
if transitMatch[entry['type']]==1:
break
return transitMatch
def transit_stop_match_score(segment,radius1=300):
Transits=get_transit_db()
transitMatch={}
route_seg=getRoute(segment['_id'])
for type in Transits.distinct('type'):
for entry in Transits.find({'type':type}):
transitMatch[type]=matchTransitStops(route_seg,entry['stops'],radius1)
if transitMatch[entry['type']]==1:
break
return transitMatch
|
bsd-3-clause
|
bsipocz/bokeh
|
examples/plotting/file/unemployment.py
|
46
|
1846
|
from collections import OrderedDict
import numpy as np
from bokeh.plotting import ColumnDataSource, figure, show, output_file
from bokeh.models import HoverTool
from bokeh.sampledata.unemployment1948 import data
# Read in the data with pandas. Convert the year column to string
data['Year'] = [str(x) for x in data['Year']]
years = list(data['Year'])
months = ["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]
data = data.set_index('Year')
# this is the colormap from the original plot
colors = [
"#75968f", "#a5bab7", "#c9d9d3", "#e2e2e2", "#dfccce",
"#ddb7b1", "#cc7878", "#933b41", "#550b1d"
]
# Set up the data for plotting. We will need to have values for every
# pair of year/month names. Map the rate to a color.
month = []
year = []
color = []
rate = []
for y in years:
for m in months:
month.append(m)
year.append(y)
monthly_rate = data[m][y]
rate.append(monthly_rate)
color.append(colors[min(int(monthly_rate)-2, 8)])
source = ColumnDataSource(
data=dict(month=month, year=year, color=color, rate=rate)
)
output_file('unemployment.html')
TOOLS = "resize,hover,save,pan,box_zoom,wheel_zoom"
p = figure(title="US Unemployment (1948 - 2013)",
x_range=years, y_range=list(reversed(months)),
x_axis_location="above", plot_width=900, plot_height=400,
toolbar_location="left", tools=TOOLS)
p.rect("year", "month", 1, 1, source=source,
color="color", line_color=None)
p.grid.grid_line_color = None
p.axis.axis_line_color = None
p.axis.major_tick_line_color = None
p.axis.major_label_text_font_size = "5pt"
p.axis.major_label_standoff = 0
p.xaxis.major_label_orientation = np.pi/3
hover = p.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([
('date', '@month @year'),
('rate', '@rate'),
])
show(p) # show the plot
|
bsd-3-clause
|
michigraber/scikit-learn
|
sklearn/linear_model/tests/test_perceptron.py
|
378
|
1815
|
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import Perceptron
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
X_csr.sort_indices()
class MyPerceptron(object):
def __init__(self, n_iter=1):
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
if self.predict(X[i])[0] != y[i]:
self.w += y[i] * X[i]
self.b += y[i]
def project(self, X):
return np.dot(X, self.w) + self.b
def predict(self, X):
X = np.atleast_2d(X)
return np.sign(self.project(X))
def test_perceptron_accuracy():
for data in (X, X_csr):
clf = Perceptron(n_iter=30, shuffle=False)
clf.fit(data, y)
score = clf.score(data, y)
assert_true(score >= 0.7)
def test_perceptron_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
clf1 = MyPerceptron(n_iter=2)
clf1.fit(X, y_bin)
clf2 = Perceptron(n_iter=2, shuffle=False)
clf2.fit(X, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel())
def test_undefined_methods():
clf = Perceptron()
for meth in ("predict_proba", "predict_log_proba"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
|
bsd-3-clause
|
richardtran415/pymatgen
|
pymatgen/analysis/diffraction/core.py
|
3
|
7292
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
This module implements core classes for calculation of diffraction patterns.
"""
import abc
import collections
import numpy as np
from pymatgen.core.spectrum import Spectrum
from pymatgen.util.plotting import add_fig_kwargs
class DiffractionPattern(Spectrum):
"""
A representation of a diffraction pattern
"""
XLABEL = "$2\\Theta$"
YLABEL = "Intensity"
def __init__(self, x, y, hkls, d_hkls):
"""
Args:
x: Two theta angles.
y: Intensities
hkls: [{"hkl": (h, k, l), "multiplicity": mult}],
where {"hkl": (h, k, l), "multiplicity": mult}
is a dict of Miller
indices for all diffracted lattice facets contributing to each
intensity.
d_hkls: List of interplanar spacings.
"""
super().__init__(x, y, hkls, d_hkls)
self.hkls = hkls
self.d_hkls = d_hkls
class AbstractDiffractionPatternCalculator(abc.ABC):
"""
Abstract base class for computing the diffraction pattern of a crystal.
"""
# Tolerance in which to treat two peaks as having the same two theta.
TWO_THETA_TOL = 1e-5
# Tolerance in which to treat a peak as effectively 0 if the scaled
# intensity is less than this number. Since the max intensity is 100,
# this means the peak must be less than 1e-5 of the peak intensity to be
# considered as zero. This deals with numerical issues where systematic
# absences do not cancel exactly to zero.
SCALED_INTENSITY_TOL = 1e-3
@abc.abstractmethod
def get_pattern(self, structure, scaled=True, two_theta_range=(0, 90)):
"""
Calculates the diffraction pattern for a structure.
Args:
structure (Structure): Input structure
scaled (bool): Whether to return scaled intensities. The maximum
peak is set to a value of 100. Defaults to True. Use False if
you need the absolute values to combine XRD plots.
two_theta_range ([float of length 2]): Tuple for range of
two_thetas to calculate in degrees. Defaults to (0, 90). Set to
None if you want all diffracted beams within the limiting
sphere of radius 2 / wavelength.
Returns:
(DiffractionPattern)
"""
pass
def get_plot(
self,
structure,
two_theta_range=(0, 90),
annotate_peaks=True,
ax=None,
with_labels=True,
fontsize=16,
):
"""
Returns the diffraction plot as a matplotlib.pyplot.
Args:
structure: Input structure
two_theta_range ([float of length 2]): Tuple for range of
two_thetas to calculate in degrees. Defaults to (0, 90). Set to
None if you want all diffracted beams within the limiting
sphere of radius 2 / wavelength.
annotate_peaks: Whether to annotate the peaks with plane
information.
ax: matplotlib :class:`Axes` or None if a new figure should be created.
with_labels: True to add xlabels and ylabels to the plot.
fontsize: (int) fontsize for peak labels.
Returns:
(matplotlib.pyplot)
"""
if ax is None:
from pymatgen.util.plotting import pretty_plot
plt = pretty_plot(16, 10)
ax = plt.gca()
else:
# This to maintain the type of the return value.
import matplotlib.pyplot as plt
xrd = self.get_pattern(structure, two_theta_range=two_theta_range)
for two_theta, i, hkls, d_hkl in zip(xrd.x, xrd.y, xrd.hkls, xrd.d_hkls):
if two_theta_range[0] <= two_theta <= two_theta_range[1]:
label = ", ".join([str(hkl["hkl"]) for hkl in hkls])
ax.plot([two_theta, two_theta], [0, i], color="k", linewidth=3, label=label)
if annotate_peaks:
ax.annotate(
label,
xy=[two_theta, i],
xytext=[two_theta, i],
fontsize=fontsize,
)
if with_labels:
ax.set_xlabel(r"$2\theta$ ($^\circ$)")
ax.set_ylabel("Intensities (scaled)")
if hasattr(ax, "tight_layout"):
ax.tight_layout()
return plt
def show_plot(self, structure, **kwargs):
"""
Shows the diffraction plot.
Args:
structure (Structure): Input structure
two_theta_range ([float of length 2]): Tuple for range of
two_thetas to calculate in degrees. Defaults to (0, 90). Set to
None if you want all diffracted beams within the limiting
sphere of radius 2 / wavelength.
annotate_peaks (bool): Whether to annotate the peaks with plane
information.
"""
self.get_plot(structure, **kwargs).show()
@add_fig_kwargs
def plot_structures(self, structures, fontsize=6, **kwargs):
"""
Plot diffraction patterns for multiple structures on the same figure.
Args:
structures (Structure): List of structures
two_theta_range ([float of length 2]): Tuple for range of
two_thetas to calculate in degrees. Defaults to (0, 90). Set to
None if you want all diffracted beams within the limiting
sphere of radius 2 / wavelength.
annotate_peaks (bool): Whether to annotate the peaks with plane
information.
fontsize: (int) fontsize for peak labels.
"""
import matplotlib.pyplot as plt
nrows = len(structures)
fig, axes = plt.subplots(nrows=nrows, ncols=1, sharex=True, squeeze=False)
for i, (ax, structure) in enumerate(zip(axes.ravel(), structures)):
self.get_plot(structure, fontsize=fontsize, ax=ax, with_labels=i == nrows - 1, **kwargs)
spg_symbol, spg_number = structure.get_space_group_info()
ax.set_title("{} {} ({}) ".format(structure.formula, spg_symbol, spg_number))
return fig
def get_unique_families(hkls):
"""
Returns unique families of Miller indices. Families must be permutations
of each other.
Args:
hkls ([h, k, l]): List of Miller indices.
Returns:
{hkl: multiplicity}: A dict with unique hkl and multiplicity.
"""
# TODO: Definitely can be sped up.
def is_perm(hkl1, hkl2):
h1 = np.abs(hkl1)
h2 = np.abs(hkl2)
return all(i == j for i, j in zip(sorted(h1), sorted(h2)))
unique = collections.defaultdict(list)
for hkl1 in hkls:
found = False
for hkl2 in unique.keys():
if is_perm(hkl1, hkl2):
found = True
unique[hkl2].append(hkl1)
break
if not found:
unique[hkl1].append(hkl1)
pretty_unique = {}
for k, v in unique.items():
pretty_unique[sorted(v)[-1]] = len(v)
return pretty_unique
|
mit
|
wangsharp/trading-with-python
|
lib/yahooFinance.py
|
76
|
8290
|
# -*- coding: utf-8 -*-
# Author: Jev Kuznetsov <jev.kuznetsov@gmail.com>
# License: BSD
"""
Toolset working with yahoo finance data
This module includes functions for easy access to YahooFinance data
Functions
----------
- `getHistoricData` get historic data for a single symbol
- `getQuote` get current quote for a symbol
- `getScreenerSymbols` load symbols from a yahoo stock screener file
Classes
---------
- `HistData` a class for working with multiple symbols
"""
from datetime import datetime, date
import urllib2
from pandas import DataFrame, Index, HDFStore, WidePanel
import numpy as np
import os
from extra import ProgressBar
def parseStr(s):
''' convert string to a float or string '''
f = s.strip()
if f[0] == '"':
return f.strip('"')
elif f=='N/A':
return np.nan
else:
try: # try float conversion
prefixes = {'M':1e6, 'B': 1e9}
prefix = f[-1]
if prefix in prefixes: # do we have a Billion/Million character?
return float(f[:-1])*prefixes[prefix]
else: # no, convert to float directly
return float(f)
except ValueError: # failed, return original string
return s
class HistData(object):
''' a class for working with yahoo finance data '''
def __init__(self, autoAdjust=True):
self.startDate = (2008,1,1)
self.autoAdjust=autoAdjust
self.wp = WidePanel()
def load(self,dataFile):
"""load data from HDF"""
if os.path.exists(dataFile):
store = HDFStore(dataFile)
symbols = [str(s).strip('/') for s in store.keys() ]
data = dict(zip(symbols,[store[symbol] for symbol in symbols]))
self.wp = WidePanel(data)
store.close()
else:
raise IOError('Data file does not exist')
def save(self,dataFile):
""" save data to HDF"""
print 'Saving data to', dataFile
store = HDFStore(dataFile)
for symbol in self.wp.items:
store[symbol] = self.wp[symbol]
store.close()
def downloadData(self,symbols='all'):
''' get data from yahoo '''
if symbols == 'all':
symbols = self.symbols
#store = HDFStore(self.dataFile)
p = ProgressBar(len(symbols))
for idx,symbol in enumerate(symbols):
try:
df = getSymbolData(symbol,sDate=self.startDate,verbose=False)
if self.autoAdjust:
df = _adjust(df,removeOrig=True)
if len(self.symbols)==0:
self.wp = WidePanel({symbol:df})
else:
self.wp[symbol] = df
except Exception,e:
print e
p.animate(idx+1)
def getDataFrame(self,field='close'):
''' return a slice on wide panel for a given field '''
return self.wp.minor_xs(field)
@property
def symbols(self):
return self.wp.items.tolist()
def __repr__(self):
return str(self.wp)
def getQuote(symbols):
''' get current yahoo quote, return a DataFrame '''
# for codes see: http://www.gummy-stuff.org/Yahoo-data.htm
if not isinstance(symbols,list):
symbols = [symbols]
header = ['symbol','last','change_pct','PE','time','short_ratio','prev_close','eps','market_cap']
request = str.join('', ['s', 'l1', 'p2' , 'r', 't1', 's7', 'p', 'e' , 'j1'])
data = dict(zip(header,[[] for i in range(len(header))]))
urlStr = 'http://finance.yahoo.com/d/quotes.csv?s=%s&f=%s' % (str.join('+',symbols), request)
try:
lines = urllib2.urlopen(urlStr).readlines()
except Exception, e:
s = "Failed to download:\n{0}".format(e);
print s
for line in lines:
fields = line.strip().split(',')
#print fields, len(fields)
for i,field in enumerate(fields):
data[header[i]].append( parseStr(field))
idx = data.pop('symbol')
return DataFrame(data,index=idx)
def _historicDataUrll(symbol, sDate=(1990,1,1),eDate=date.today().timetuple()[0:3]):
"""
generate url
symbol: Yahoo finanance symbol
sDate: start date (y,m,d)
eDate: end date (y,m,d)
"""
urlStr = 'http://ichart.finance.yahoo.com/table.csv?s={0}&a={1}&b={2}&c={3}&d={4}&e={5}&f={6}'.\
format(symbol.upper(),sDate[1]-1,sDate[2],sDate[0],eDate[1]-1,eDate[2],eDate[0])
return urlStr
def getHistoricData(symbols, **options):
'''
get data from Yahoo finance and return pandas dataframe
Will get OHLCV data frame if sinle symbol is provided.
If many symbols are provided, it will return a wide panel
Parameters
------------
symbols: Yahoo finanance symbol or a list of symbols
sDate: start date (y,m,d)
eDate: end date (y,m,d)
adjust : T/[F] adjust data based on adj_close
'''
assert isinstance(symbols,(list,str)), 'Input must be a string symbol or a list of symbols'
if isinstance(symbols,str):
return getSymbolData(symbols,**options)
else:
data = {}
print 'Downloading data:'
p = ProgressBar(len(symbols))
for idx,symbol in enumerate(symbols):
p.animate(idx+1)
data[symbol] = getSymbolData(symbol,verbose=False,**options)
return WidePanel(data)
def getSymbolData(symbol, sDate=(1990,1,1),eDate=date.today().timetuple()[0:3], adjust=False, verbose=True):
"""
get data from Yahoo finance and return pandas dataframe
symbol: Yahoo finanance symbol
sDate: start date (y,m,d)
eDate: end date (y,m,d)
"""
urlStr = 'http://ichart.finance.yahoo.com/table.csv?s={0}&a={1}&b={2}&c={3}&d={4}&e={5}&f={6}'.\
format(symbol.upper(),sDate[1]-1,sDate[2],sDate[0],eDate[1]-1,eDate[2],eDate[0])
try:
lines = urllib2.urlopen(urlStr).readlines()
except Exception, e:
s = "Failed to download:\n{0}".format(e);
print s
return None
dates = []
data = [[] for i in range(6)]
#high
# header : Date,Open,High,Low,Close,Volume,Adj Close
for line in lines[1:]:
#print line
fields = line.rstrip().split(',')
dates.append(datetime.strptime( fields[0],'%Y-%m-%d'))
for i,field in enumerate(fields[1:]):
data[i].append(float(field))
idx = Index(dates)
data = dict(zip(['open','high','low','close','volume','adj_close'],data))
# create a pandas dataframe structure
df = DataFrame(data,index=idx).sort()
if verbose:
print 'Got %i days of data' % len(df)
if adjust:
return _adjust(df,removeOrig=True)
else:
return df
def _adjust(df, removeOrig=False):
'''
_adjustust hist data based on adj_close field
'''
c = df['close']/df['adj_close']
df['adj_open'] = df['open']/c
df['adj_high'] = df['high']/c
df['adj_low'] = df['low']/c
if removeOrig:
df=df.drop(['open','close','high','low'],axis=1)
renames = dict(zip(['adj_open','adj_close','adj_high','adj_low'],['open','close','high','low']))
df=df.rename(columns=renames)
return df
def getScreenerSymbols(fileName):
''' read symbols from a .csv saved by yahoo stock screener '''
with open(fileName,'r') as fid:
lines = fid.readlines()
symbols = []
for line in lines[3:]:
fields = line.strip().split(',')
field = fields[0].strip()
if len(field) > 0:
symbols.append(field)
return symbols
|
bsd-3-clause
|
lesserwhirls/scipy-cwt
|
scipy/optimize/nonlin.py
|
2
|
45860
|
r"""
Nonlinear solvers
=================
.. currentmodule:: scipy.optimize
This is a collection of general-purpose nonlinear multidimensional
solvers. These solvers find *x* for which *F(x) = 0*. Both *x*
and *F* can be multidimensional.
Routines
--------
Large-scale nonlinear solvers:
.. autosummary::
newton_krylov
anderson
General nonlinear solvers:
.. autosummary::
broyden1
broyden2
Simple iterations:
.. autosummary::
excitingmixing
linearmixing
diagbroyden
Examples
========
Small problem
-------------
>>> def F(x):
... return np.cos(x) + x[::-1] - [1, 2, 3, 4]
>>> import scipy.optimize
>>> x = scipy.optimize.broyden1(F, [1,1,1,1], f_tol=1e-14)
>>> x
array([ 4.04674914, 3.91158389, 2.71791677, 1.61756251])
>>> np.cos(x) + x[::-1]
array([ 1., 2., 3., 4.])
Large problem
-------------
Suppose that we needed to solve the following integrodifferential
equation on the square :math:`[0,1]\times[0,1]`:
.. math::
\nabla^2 P = 10 \left(\int_0^1\int_0^1\cosh(P)\,dx\,dy\right)^2
with :math:`P(x,1) = 1` and :math:`P=0` elsewhere on the boundary of
the square.
The solution can be found using the `newton_krylov` solver:
.. plot::
import numpy as np
from scipy.optimize import newton_krylov
from numpy import cosh, zeros_like, mgrid, zeros
# parameters
nx, ny = 75, 75
hx, hy = 1./(nx-1), 1./(ny-1)
P_left, P_right = 0, 0
P_top, P_bottom = 1, 0
def residual(P):
d2x = zeros_like(P)
d2y = zeros_like(P)
d2x[1:-1] = (P[2:] - 2*P[1:-1] + P[:-2]) / hx/hx
d2x[0] = (P[1] - 2*P[0] + P_left)/hx/hx
d2x[-1] = (P_right - 2*P[-1] + P[-2])/hx/hx
d2y[:,1:-1] = (P[:,2:] - 2*P[:,1:-1] + P[:,:-2])/hy/hy
d2y[:,0] = (P[:,1] - 2*P[:,0] + P_bottom)/hy/hy
d2y[:,-1] = (P_top - 2*P[:,-1] + P[:,-2])/hy/hy
return d2x + d2y - 10*cosh(P).mean()**2
# solve
guess = zeros((nx, ny), float)
sol = newton_krylov(residual, guess, method='lgmres', verbose=1)
print 'Residual', abs(residual(sol)).max()
# visualize
import matplotlib.pyplot as plt
x, y = mgrid[0:1:(nx*1j), 0:1:(ny*1j)]
plt.pcolor(x, y, sol)
plt.colorbar()
plt.show()
"""
# Copyright (C) 2009, Pauli Virtanen <pav@iki.fi>
# Distributed under the same license as Scipy.
import sys
import numpy as np
from scipy.linalg import norm, solve, inv, qr, svd, lstsq, LinAlgError
from numpy import asarray, dot, vdot
import scipy.sparse.linalg
import scipy.sparse
import scipy.lib.blas as blas
import inspect
from linesearch import scalar_search_wolfe1, scalar_search_armijo
__all__ = [
'broyden1', 'broyden2', 'anderson', 'linearmixing',
'diagbroyden', 'excitingmixing', 'newton_krylov',
# Deprecated functions:
'broyden_generalized', 'anderson2', 'broyden3']
#------------------------------------------------------------------------------
# Utility functions
#------------------------------------------------------------------------------
class NoConvergence(Exception):
pass
def maxnorm(x):
return np.absolute(x).max()
def _as_inexact(x):
"""Return `x` as an array, of either floats or complex floats"""
x = asarray(x)
if not np.issubdtype(x.dtype, np.inexact):
return asarray(x, dtype=np.float_)
return x
def _array_like(x, x0):
"""Return ndarray `x` as same array subclass and shape as `x0`"""
x = np.reshape(x, np.shape(x0))
wrap = getattr(x0, '__array_wrap__', x.__array_wrap__)
return wrap(x)
def _safe_norm(v):
if not np.isfinite(v).all():
return np.array(np.inf)
return norm(v)
#------------------------------------------------------------------------------
# Generic nonlinear solver machinery
#------------------------------------------------------------------------------
_doc_parts = dict(
params_basic="""
F : function(x) -> f
Function whose root to find; should take and return an array-like
object.
x0 : array-like
Initial guess for the solution
""".strip(),
params_extra="""
iter : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
verbose : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
f_tol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
f_rtol : float, optional
Relative tolerance for the residual. If omitted, not used.
x_tol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
x_rtol : float, optional
Relative minimum step size. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in the
direction given by the Jacobian approximation. Defaults to 'armijo'.
callback : function, optional
Optional callback function. It is called on every iteration as
``callback(x, f)`` where `x` is the current solution and `f`
the corresponding residual.
Returns
-------
sol : array-like
An array (of similar array type as `x0`) containing the final solution.
Raises
------
NoConvergence
When a solution was not found.
""".strip()
)
def _set_doc(obj):
if obj.__doc__:
obj.__doc__ = obj.__doc__ % _doc_parts
def nonlin_solve(F, x0, jacobian='krylov', iter=None, verbose=False,
maxiter=None, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
tol_norm=None, line_search='armijo', callback=None):
"""
Find a root of a function, in a way suitable for large-scale problems.
Parameters
----------
%(params_basic)s
jacobian : Jacobian
A Jacobian approximation: `Jacobian` object or something that
`asjacobian` can transform to one. Alternatively, a string specifying
which of the builtin Jacobian approximations to use:
krylov, broyden1, broyden2, anderson
diagbroyden, linearmixing, excitingmixing
%(params_extra)s
See Also
--------
asjacobian, Jacobian
Notes
-----
This algorithm implements the inexact Newton method, with
backtracking or full line searches. Several Jacobian
approximations are available, including Krylov and Quasi-Newton
methods.
References
----------
.. [KIM] C. T. Kelley, \"Iterative Methods for Linear and Nonlinear
Equations\". Society for Industrial and Applied Mathematics. (1995)
http://www.siam.org/books/kelley/
"""
condition = TerminationCondition(f_tol=f_tol, f_rtol=f_rtol,
x_tol=x_tol, x_rtol=x_rtol,
iter=iter, norm=tol_norm)
x0 = _as_inexact(x0)
func = lambda z: _as_inexact(F(_array_like(z, x0))).flatten()
x = x0.flatten()
dx = np.inf
Fx = func(x)
Fx_norm = norm(Fx)
jacobian = asjacobian(jacobian)
jacobian.setup(x.copy(), Fx, func)
if maxiter is None:
if iter is not None:
maxiter = iter + 1
else:
maxiter = 100*(x.size+1)
if line_search is True:
line_search = 'armijo'
elif line_search is False:
line_search = None
if line_search not in (None, 'armijo', 'wolfe'):
raise ValueError("Invalid line search")
# Solver tolerance selection
gamma = 0.9
eta_max = 0.9999
eta_treshold = 0.1
eta = 1e-3
for n in xrange(maxiter):
if condition.check(Fx, x, dx):
break
# The tolerance, as computed for scipy.sparse.linalg.* routines
tol = min(eta, eta*Fx_norm)
dx = -jacobian.solve(Fx, tol=tol)
if norm(dx) == 0:
raise ValueError("Jacobian inversion yielded zero vector. "
"This indicates a bug in the Jacobian "
"approximation.")
# Line search, or Newton step
if line_search:
s, x, Fx, Fx_norm_new = _nonlin_line_search(func, x, Fx, dx,
line_search)
else:
s = 1.0
x += dx
Fx = func(x)
Fx_norm_new = norm(Fx)
jacobian.update(x.copy(), Fx)
if callback:
callback(x, Fx)
# Adjust forcing parameters for inexact methods
eta_A = gamma * Fx_norm_new**2 / Fx_norm**2
if gamma * eta**2 < eta_treshold:
eta = min(eta_max, eta_A)
else:
eta = min(eta_max, max(eta_A, gamma*eta**2))
Fx_norm = Fx_norm_new
# Print status
if verbose:
sys.stdout.write("%d: |F(x)| = %g; step %g; tol %g\n" % (
n, norm(Fx), s, eta))
sys.stdout.flush()
else:
raise NoConvergence(_array_like(x, x0))
return _array_like(x, x0)
_set_doc(nonlin_solve)
def _nonlin_line_search(func, x, Fx, dx, search_type='armijo', rdiff=1e-8,
smin=1e-2):
tmp_s = [0]
tmp_Fx = [Fx]
tmp_phi = [norm(Fx)**2]
s_norm = norm(x) / norm(dx)
def phi(s, store=True):
if s == tmp_s[0]:
return tmp_phi[0]
xt = x + s*dx
v = func(xt)
p = _safe_norm(v)**2
if store:
tmp_s[0] = s
tmp_phi[0] = p
tmp_Fx[0] = v
return p
def derphi(s):
ds = (abs(s) + s_norm + 1) * rdiff
return (phi(s+ds, store=False) - phi(s)) / ds
if search_type == 'wolfe':
s, phi1, phi0 = scalar_search_wolfe1(phi, derphi, tmp_phi[0],
xtol=1e-2, amin=smin)
elif search_type == 'armijo':
s, phi1 = scalar_search_armijo(phi, tmp_phi[0], -tmp_phi[0],
amin=smin)
if s is None:
# XXX: No suitable step length found. Take the full Newton step,
# and hope for the best.
s = 1.0
x = x + s*dx
if s == tmp_s[0]:
Fx = tmp_Fx[0]
else:
Fx = func(x)
Fx_norm = norm(Fx)
return s, x, Fx, Fx_norm
class TerminationCondition(object):
"""
Termination condition for an iteration. It is terminated if
- |F| < f_rtol*|F_0|, AND
- |F| < f_tol
AND
- |dx| < x_rtol*|x|, AND
- |dx| < x_tol
"""
def __init__(self, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
iter=None, norm=maxnorm):
if f_tol is None:
f_tol = np.finfo(np.float_).eps ** (1./3)
if f_rtol is None:
f_rtol = np.inf
if x_tol is None:
x_tol = np.inf
if x_rtol is None:
x_rtol = np.inf
self.x_tol = x_tol
self.x_rtol = x_rtol
self.f_tol = f_tol
self.f_rtol = f_rtol
self.norm = maxnorm
self.iter = iter
self.f0_norm = None
self.iteration = 0
def check(self, f, x, dx):
self.iteration += 1
f_norm = self.norm(f)
x_norm = self.norm(x)
dx_norm = self.norm(dx)
if self.f0_norm is None:
self.f0_norm = f_norm
if f_norm == 0:
return True
if self.iter is not None:
# backwards compatibility with Scipy 0.6.0
return self.iteration > self.iter
# NB: condition must succeed for rtol=inf even if norm == 0
return ((f_norm <= self.f_tol and f_norm/self.f_rtol <= self.f0_norm)
and (dx_norm <= self.x_tol and dx_norm/self.x_rtol <= x_norm))
#------------------------------------------------------------------------------
# Generic Jacobian approximation
#------------------------------------------------------------------------------
class Jacobian(object):
"""
Common interface for Jacobians or Jacobian approximations.
The optional methods come useful when implementing trust region
etc. algorithms that often require evaluating transposes of the
Jacobian.
Methods
-------
solve
Returns J^-1 * v
update
Updates Jacobian to point `x` (where the function has residual `Fx`)
matvec : optional
Returns J * v
rmatvec : optional
Returns A^H * v
rsolve : optional
Returns A^-H * v
matmat : optional
Returns A * V, where V is a dense matrix with dimensions (N,K).
todense : optional
Form the dense Jacobian matrix. Necessary for dense trust region
algorithms, and useful for testing.
Attributes
----------
shape
Matrix dimensions (M, N)
dtype
Data type of the matrix.
func : callable, optional
Function the Jacobian corresponds to
"""
def __init__(self, **kw):
names = ["solve", "update", "matvec", "rmatvec", "rsolve",
"matmat", "todense", "shape", "dtype"]
for name, value in kw.items():
if name not in names:
raise ValueError("Unknown keyword argument %s" % name)
if value is not None:
setattr(self, name, kw[name])
if hasattr(self, 'todense'):
self.__array__ = lambda: self.todense()
def aspreconditioner(self):
return InverseJacobian(self)
def solve(self, v, tol=0):
raise NotImplementedError
def update(self, x, F):
pass
def setup(self, x, F, func):
self.func = func
self.shape = (F.size, x.size)
self.dtype = F.dtype
if self.__class__.setup is Jacobian.setup:
# Call on the first point unless overridden
self.update(self, x, F)
class InverseJacobian(object):
def __init__(self, jacobian):
self.jacobian = jacobian
self.matvec = jacobian.solve
self.update = jacobian.update
if hasattr(jacobian, 'setup'):
self.setup = jacobian.setup
if hasattr(jacobian, 'rsolve'):
self.rmatvec = jacobian.rsolve
@property
def shape(self):
return self.jacobian.shape
@property
def dtype(self):
return self.jacobian.dtype
def asjacobian(J):
"""
Convert given object to one suitable for use as a Jacobian.
"""
spsolve = scipy.sparse.linalg.spsolve
if isinstance(J, Jacobian):
return J
elif inspect.isclass(J) and issubclass(J, Jacobian):
return J()
elif isinstance(J, np.ndarray):
if J.ndim > 2:
raise ValueError('array must have rank <= 2')
J = np.atleast_2d(np.asarray(J))
if J.shape[0] != J.shape[1]:
raise ValueError('array must be square')
return Jacobian(matvec=lambda v: dot(J, v),
rmatvec=lambda v: dot(J.conj().T, v),
solve=lambda v: solve(J, v),
rsolve=lambda v: solve(J.conj().T, v),
dtype=J.dtype, shape=J.shape)
elif scipy.sparse.isspmatrix(J):
if J.shape[0] != J.shape[1]:
raise ValueError('matrix must be square')
return Jacobian(matvec=lambda v: J*v,
rmatvec=lambda v: J.conj().T * v,
solve=lambda v: spsolve(J, v),
rsolve=lambda v: spsolve(J.conj().T, v),
dtype=J.dtype, shape=J.shape)
elif hasattr(J, 'shape') and hasattr(J, 'dtype') and hasattr(J, 'solve'):
return Jacobian(matvec=getattr(J, 'matvec'),
rmatvec=getattr(J, 'rmatvec'),
solve=J.solve,
rsolve=getattr(J, 'rsolve'),
update=getattr(J, 'update'),
setup=getattr(J, 'setup'),
dtype=J.dtype,
shape=J.shape)
elif callable(J):
# Assume it's a function J(x) that returns the Jacobian
class Jac(Jacobian):
def update(self, x, F):
self.x = x
def solve(self, v, tol=0):
m = J(self.x)
if isinstance(m, np.ndarray):
return solve(m, v)
elif scipy.sparse.isspmatrix(m):
return spsolve(m, v)
else:
raise ValueError("Unknown matrix type")
def matvec(self, v):
m = J(self.x)
if isinstance(m, np.ndarray):
return dot(m, v)
elif scipy.sparse.isspmatrix(m):
return m*v
else:
raise ValueError("Unknown matrix type")
def rsolve(self, v, tol=0):
m = J(self.x)
if isinstance(m, np.ndarray):
return solve(m.conj().T, v)
elif scipy.sparse.isspmatrix(m):
return spsolve(m.conj().T, v)
else:
raise ValueError("Unknown matrix type")
def rmatvec(self, v):
m = J(self.x)
if isinstance(m, np.ndarray):
return dot(m.conj().T, v)
elif scipy.sparse.isspmatrix(m):
return m.conj().T * v
else:
raise ValueError("Unknown matrix type")
return Jac()
elif isinstance(J, str):
return dict(broyden1=BroydenFirst,
broyden2=BroydenSecond,
anderson=Anderson,
diagbroyden=DiagBroyden,
linearmixing=LinearMixing,
excitingmixing=ExcitingMixing,
krylov=KrylovJacobian)[J]()
else:
raise TypeError('Cannot convert object to a Jacobian')
#------------------------------------------------------------------------------
# Broyden
#------------------------------------------------------------------------------
class GenericBroyden(Jacobian):
def setup(self, x0, f0, func):
Jacobian.setup(self, x0, f0, func)
self.last_f = f0
self.last_x = x0
if hasattr(self, 'alpha') and self.alpha is None:
# autoscale the initial Jacobian parameter
self.alpha = 0.5*max(norm(x0), 1) / norm(f0)
def _update(self, x, f, dx, df, dx_norm, df_norm):
raise NotImplementedError
def update(self, x, f):
df = f - self.last_f
dx = x - self.last_x
self._update(x, f, dx, df, norm(dx), norm(df))
self.last_f = f
self.last_x = x
class LowRankMatrix(object):
r"""
A matrix represented as
.. math:: \alpha I + \sum_{n=0}^{n=M} c_n d_n^\dagger
However, if the rank of the matrix reaches the dimension of the vectors,
full matrix representation will be used thereon.
"""
def __init__(self, alpha, n, dtype):
self.alpha = alpha
self.cs = []
self.ds = []
self.n = n
self.dtype = dtype
self.collapsed = None
@staticmethod
def _matvec(v, alpha, cs, ds):
axpy, scal, dotc = blas.get_blas_funcs(['axpy', 'scal', 'dotc'],
cs[:1] + [v])
w = alpha * v
for c, d in zip(cs, ds):
a = dotc(d, v)
w = axpy(c, w, w.size, a)
return w
@staticmethod
def _solve(v, alpha, cs, ds):
"""Evaluate w = M^-1 v"""
if len(cs) == 0:
return v/alpha
# (B + C D^H)^-1 = B^-1 - B^-1 C (I + D^H B^-1 C)^-1 D^H B^-1
axpy, dotc = blas.get_blas_funcs(['axpy', 'dotc'], cs[:1] + [v])
c0 = cs[0]
A = alpha * np.identity(len(cs), dtype=c0.dtype)
for i, d in enumerate(ds):
for j, c in enumerate(cs):
A[i,j] += dotc(d, c)
q = np.zeros(len(cs), dtype=c0.dtype)
for j, d in enumerate(ds):
q[j] = dotc(d, v)
q /= alpha
q = solve(A, q)
w = v/alpha
for c, qc in zip(cs, q):
w = axpy(c, w, w.size, -qc)
return w
def matvec(self, v):
"""Evaluate w = M v"""
if self.collapsed is not None:
return np.dot(self.collapsed, v)
return LowRankMatrix._matvec(v, self.alpha, self.cs, self.ds)
def rmatvec(self, v):
"""Evaluate w = M^H v"""
if self.collapsed is not None:
return np.dot(self.collapsed.T.conj(), v)
return LowRankMatrix._matvec(v, np.conj(self.alpha), self.ds, self.cs)
def solve(self, v, tol=0):
"""Evaluate w = M^-1 v"""
if self.collapsed is not None:
return solve(self.collapsed, v)
return LowRankMatrix._solve(v, self.alpha, self.cs, self.ds)
def rsolve(self, v, tol=0):
"""Evaluate w = M^-H v"""
if self.collapsed is not None:
return solve(self.collapsed.T.conj(), v)
return LowRankMatrix._solve(v, np.conj(self.alpha), self.ds, self.cs)
def append(self, c, d):
if self.collapsed is not None:
self.collapsed += c[:,None] * d[None,:].conj()
return
self.cs.append(c)
self.ds.append(d)
if len(self.cs) > c.size:
self.collapse()
def __array__(self):
if self.collapsed is not None:
return self.collapsed
Gm = self.alpha*np.identity(self.n, dtype=self.dtype)
for c, d in zip(self.cs, self.ds):
Gm += c[:,None]*d[None,:].conj()
return Gm
def collapse(self):
"""Collapse the low-rank matrix to a full-rank one."""
self.collapsed = np.array(self)
self.cs = None
self.ds = None
self.alpha = None
def restart_reduce(self, rank):
"""
Reduce the rank of the matrix by dropping all vectors.
"""
if self.collapsed is not None:
return
assert rank > 0
if len(self.cs) > rank:
del self.cs[:]
del self.ds[:]
def simple_reduce(self, rank):
"""
Reduce the rank of the matrix by dropping oldest vectors.
"""
if self.collapsed is not None:
return
assert rank > 0
while len(self.cs) > rank:
del self.cs[0]
del self.ds[0]
def svd_reduce(self, max_rank, to_retain=None):
"""
Reduce the rank of the matrix by retaining some SVD components.
This corresponds to the \"Broyden Rank Reduction Inverse\"
algorithm described in [vR]_.
Note that the SVD decomposition can be done by solving only a
problem whose size is the effective rank of this matrix, which
is viable even for large problems.
Parameters
----------
max_rank : int
Maximum rank of this matrix after reduction.
to_retain : int, optional
Number of SVD components to retain when reduction is done
(ie. rank > max_rank). Default is ``max_rank - 2``.
References
----------
.. [vR] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
if self.collapsed is not None:
return
p = max_rank
if to_retain is not None:
q = to_retain
else:
q = p - 2
if self.cs:
p = min(p, len(self.cs[0]))
q = max(0, min(q, p-1))
m = len(self.cs)
if m < p:
# nothing to do
return
C = np.array(self.cs).T
D = np.array(self.ds).T
D, R = qr(D, mode='qr', econ=True)
C = dot(C, R.T.conj())
U, S, WH = svd(C, full_matrices=False, compute_uv=True)
C = dot(C, inv(WH))
D = dot(D, WH.T.conj())
for k in xrange(q):
self.cs[k] = C[:,k].copy()
self.ds[k] = D[:,k].copy()
del self.cs[q:]
del self.ds[q:]
_doc_parts['broyden_params'] = """
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
reduction_method : str or tuple, optional
Method used in ensuring that the rank of the Broyden matrix
stays low. Can either be a string giving the name of the method,
or a tuple of the form ``(method, param1, param2, ...)``
that gives the name of the method and values for additional parameters.
Methods available:
- ``restart``: drop all matrix columns. Has no extra parameters.
- ``simple``: drop oldest matrix column. Has no extra parameters.
- ``svd``: keep only the most significant SVD components.
Extra parameters:
- ``to_retain`: number of SVD components to retain when
rank reduction is done. Default is ``max_rank - 2``.
max_rank : int, optional
Maximum rank for the Broyden matrix.
Default is infinity (ie., no rank reduction).
""".strip()
class BroydenFirst(GenericBroyden):
r"""
Find a root of a function, using Broyden's first Jacobian approximation.
This method is also known as \"Broyden's good method\".
Parameters
----------
%(params_basic)s
%(broyden_params)s
%(params_extra)s
Notes
-----
This algorithm implements the inverse Jacobian Quasi-Newton update
.. math:: H_+ = H + (dx - H df) dx^\dagger H / ( dx^\dagger H df)
which corresponds to Broyden's first Jacobian update
.. math:: J_+ = J + (df - J dx) dx^\dagger / dx^\dagger dx
References
----------
.. [vR] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
def __init__(self, alpha=None, reduction_method='restart', max_rank=None):
GenericBroyden.__init__(self)
self.alpha = alpha
self.Gm = None
if max_rank is None:
max_rank = np.inf
self.max_rank = max_rank
if isinstance(reduction_method, str):
reduce_params = ()
else:
reduce_params = reduction_method[1:]
reduction_method = reduction_method[0]
reduce_params = (max_rank - 1,) + reduce_params
if reduction_method == 'svd':
self._reduce = lambda: self.Gm.svd_reduce(*reduce_params)
elif reduction_method == 'simple':
self._reduce = lambda: self.Gm.simple_reduce(*reduce_params)
elif reduction_method == 'restart':
self._reduce = lambda: self.Gm.restart_reduce(*reduce_params)
else:
raise ValueError("Unknown rank reduction method '%s'" %
reduction_method)
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.Gm = LowRankMatrix(-self.alpha, self.shape[0], self.dtype)
def todense(self):
return inv(self.Gm)
def solve(self, f, tol=0):
r = self.Gm.matvec(f)
if not np.isfinite(r).all():
# singular; reset the Jacobian approximation
self.setup(self.last_x, self.last_f, self.func)
return self.Gm.matvec(f)
def matvec(self, f):
return self.Gm.solve(f)
def rsolve(self, f, tol=0):
return self.Gm.rmatvec(f)
def rmatvec(self, f):
return self.Gm.rsolve(f)
def _update(self, x, f, dx, df, dx_norm, df_norm):
self._reduce() # reduce first to preserve secant condition
v = self.Gm.rmatvec(dx)
c = dx - self.Gm.matvec(df)
d = v / vdot(df, v)
self.Gm.append(c, d)
class BroydenSecond(BroydenFirst):
"""
Find a root of a function, using Broyden\'s second Jacobian approximation.
This method is also known as \"Broyden's bad method\".
Parameters
----------
%(params_basic)s
%(broyden_params)s
%(params_extra)s
Notes
-----
This algorithm implements the inverse Jacobian Quasi-Newton update
.. math:: H_+ = H + (dx - H df) df^\dagger / ( df^\dagger df)
corresponding to Broyden's second method.
References
----------
.. [vR] B.A. van der Rotten, PhD thesis,
\"A limited memory Broyden method to solve high-dimensional
systems of nonlinear equations\". Mathematisch Instituut,
Universiteit Leiden, The Netherlands (2003).
http://www.math.leidenuniv.nl/scripties/Rotten.pdf
"""
def _update(self, x, f, dx, df, dx_norm, df_norm):
self._reduce() # reduce first to preserve secant condition
v = df
c = dx - self.Gm.matvec(df)
d = v / df_norm**2
self.Gm.append(c, d)
#------------------------------------------------------------------------------
# Broyden-like (restricted memory)
#------------------------------------------------------------------------------
class Anderson(GenericBroyden):
"""
Find a root of a function, using (extended) Anderson mixing.
The Jacobian is formed by for a 'best' solution in the space
spanned by last `M` vectors. As a result, only a MxM matrix
inversions and MxN multiplications are required. [Ey]_
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
M : float, optional
Number of previous vectors to retain. Defaults to 5.
w0 : float, optional
Regularization parameter for numerical stability.
Compared to unity, good values of the order of 0.01.
%(params_extra)s
References
----------
.. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996).
"""
# Note:
#
# Anderson method maintains a rank M approximation of the inverse Jacobian,
#
# J^-1 v ~ -v*alpha + (dX + alpha dF) A^-1 dF^H v
# A = W + dF^H dF
# W = w0^2 diag(dF^H dF)
#
# so that for w0 = 0 the secant condition applies for last M iterates, ie.,
#
# J^-1 df_j = dx_j
#
# for all j = 0 ... M-1.
#
# Moreover, (from Sherman-Morrison-Woodbury formula)
#
# J v ~ [ b I - b^2 C (I + b dF^H A^-1 C)^-1 dF^H ] v
# C = (dX + alpha dF) A^-1
# b = -1/alpha
#
# and after simplification
#
# J v ~ -v/alpha + (dX/alpha + dF) (dF^H dX - alpha W)^-1 dF^H v
#
def __init__(self, alpha=None, w0=0.01, M=5):
GenericBroyden.__init__(self)
self.alpha = alpha
self.M = M
self.dx = []
self.df = []
self.gamma = None
self.w0 = w0
def solve(self, f, tol=0):
dx = -self.alpha*f
n = len(self.dx)
if n == 0:
return dx
df_f = np.empty(n, dtype=f.dtype)
for k in xrange(n):
df_f[k] = vdot(self.df[k], f)
try:
gamma = solve(self.a, df_f)
except LinAlgError:
# singular; reset the Jacobian approximation
del self.dx[:]
del self.df[:]
return dx
for m in xrange(n):
dx += gamma[m]*(self.dx[m] + self.alpha*self.df[m])
return dx
def matvec(self, f):
dx = -f/self.alpha
n = len(self.dx)
if n == 0:
return dx
df_f = np.empty(n, dtype=f.dtype)
for k in xrange(n):
df_f[k] = vdot(self.df[k], f)
b = np.empty((n, n), dtype=f.dtype)
for i in xrange(n):
for j in xrange(n):
b[i,j] = vdot(self.df[i], self.dx[j])
if i == j and self.w0 != 0:
b[i,j] -= vdot(self.df[i], self.df[i])*self.w0**2*self.alpha
gamma = solve(b, df_f)
for m in xrange(n):
dx += gamma[m]*(self.df[m] + self.dx[m]/self.alpha)
return dx
def _update(self, x, f, dx, df, dx_norm, df_norm):
if self.M == 0:
return
self.dx.append(dx)
self.df.append(df)
while len(self.dx) > self.M:
self.dx.pop(0)
self.df.pop(0)
n = len(self.dx)
a = np.zeros((n, n), dtype=f.dtype)
for i in xrange(n):
for j in xrange(i, n):
if i == j:
wd = self.w0**2
else:
wd = 0
a[i,j] = (1+wd)*vdot(self.df[i], self.df[j])
a += np.triu(a, 1).T.conj()
self.a = a
#------------------------------------------------------------------------------
# Simple iterations
#------------------------------------------------------------------------------
class DiagBroyden(GenericBroyden):
"""
Find a root of a function, using diagonal Broyden Jacobian approximation.
The Jacobian approximation is derived from previous iterations, by
retaining only the diagonal of Broyden matrices.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
%(params_extra)s
"""
def __init__(self, alpha=None):
GenericBroyden.__init__(self)
self.alpha = alpha
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.d = np.ones((self.shape[0],), dtype=self.dtype) / self.alpha
def solve(self, f, tol=0):
return -f / self.d
def matvec(self, f):
return -f * self.d
def rsolve(self, f, tol=0):
return -f / self.d.conj()
def rmatvec(self, f):
return -f * self.d.conj()
def todense(self):
return np.diag(-self.d)
def _update(self, x, f, dx, df, dx_norm, df_norm):
self.d -= (df + self.d*dx)*dx/dx_norm**2
class LinearMixing(GenericBroyden):
"""
Find a root of a function, using a scalar Jacobian approximation.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
The Jacobian approximation is (-1/alpha).
%(params_extra)s
"""
def __init__(self, alpha=None):
GenericBroyden.__init__(self)
self.alpha = alpha
def solve(self, f, tol=0):
return -f*self.alpha
def matvec(self, f):
return -f/self.alpha
def rsolve(self, f, tol=0):
return -f*np.conj(self.alpha)
def rmatvec(self, f):
return -f/np.conj(self.alpha)
def todense(self):
return np.diag(-np.ones(self.shape[0])/self.alpha)
def _update(self, x, f, dx, df, dx_norm, df_norm):
pass
class ExcitingMixing(GenericBroyden):
"""
Find a root of a function, using a tuned diagonal Jacobian approximation.
The Jacobian matrix is diagonal and is tuned on each iteration.
.. warning::
This algorithm may be useful for specific problems, but whether
it will work may depend strongly on the problem.
Parameters
----------
%(params_basic)s
alpha : float, optional
Initial Jacobian approximation is (-1/alpha).
alphamax : float, optional
The entries of the diagonal Jacobian are kept in the range
``[alpha, alphamax]``.
%(params_extra)s
"""
def __init__(self, alpha=None, alphamax=1.0):
GenericBroyden.__init__(self)
self.alpha = alpha
self.alphamax = alphamax
self.beta = None
def setup(self, x, F, func):
GenericBroyden.setup(self, x, F, func)
self.beta = self.alpha * np.ones((self.shape[0],), dtype=self.dtype)
def solve(self, f, tol=0):
return -f*self.beta
def matvec(self, f):
return -f/self.beta
def rsolve(self, f, tol=0):
return -f*self.beta.conj()
def rmatvec(self, f):
return -f/self.beta.conj()
def todense(self):
return np.diag(-1/self.beta)
def _update(self, x, f, dx, df, dx_norm, df_norm):
incr = f*self.last_f > 0
self.beta[incr] += self.alpha
self.beta[~incr] = self.alpha
np.clip(self.beta, 0, self.alphamax, out=self.beta)
#------------------------------------------------------------------------------
# Iterative/Krylov approximated Jacobians
#------------------------------------------------------------------------------
class KrylovJacobian(Jacobian):
r"""
Find a root of a function, using Krylov approximation for inverse Jacobian.
This method is suitable for solving large-scale problems.
Parameters
----------
%(params_basic)s
rdiff : float, optional
Relative step size to use in numerical differentiation.
method : {'lgmres', 'gmres', 'bicgstab', 'cgs', 'minres'} or function
Krylov method to use to approximate the Jacobian.
Can be a string, or a function implementing the same interface as
the iterative solvers in `scipy.sparse.linalg`.
The default is `scipy.sparse.linalg.lgmres`.
inner_M : LinearOperator or InverseJacobian
Preconditioner for the inner Krylov iteration.
Note that you can use also inverse Jacobians as (adaptive)
preconditioners. For example,
>>> jac = BroydenFirst()
>>> kjac = KrylovJacobian(inner_M=jac.inverse).
If the preconditioner has a method named 'update', it will be called
as ``update(x, f)`` after each nonlinear step, with ``x`` giving
the current point, and ``f`` the current function value.
inner_tol, inner_maxiter, ...
Parameters to pass on to the \"inner\" Krylov solver.
See `scipy.sparse.linalg.gmres` for details.
outer_k : int, optional
Size of the subspace kept across LGMRES nonlinear iterations.
See `scipy.sparse.linalg.lgmres` for details.
%(params_extra)s
See Also
--------
scipy.sparse.linalg.gmres
scipy.sparse.linalg.lgmres
Notes
-----
This function implements a Newton-Krylov solver. The basic idea is
to compute the inverse of the Jacobian with an iterative Krylov
method. These methods require only evaluating the Jacobian-vector
products, which are conveniently approximated by numerical
differentiation:
.. math:: J v \approx (f(x + \omega*v/|v|) - f(x)) / \omega
Due to the use of iterative matrix inverses, these methods can
deal with large nonlinear problems.
Scipy's `scipy.sparse.linalg` module offers a selection of Krylov
solvers to choose from. The default here is `lgmres`, which is a
variant of restarted GMRES iteration that reuses some of the
information obtained in the previous Newton steps to invert
Jacobians in subsequent steps.
For a review on Newton-Krylov methods, see for example [KK]_,
and for the LGMRES sparse inverse method, see [BJM]_.
References
----------
.. [KK] D.A. Knoll and D.E. Keyes, J. Comp. Phys. 193, 357 (2003).
.. [BJM] A.H. Baker and E.R. Jessup and T. Manteuffel,
SIAM J. Matrix Anal. Appl. 26, 962 (2005).
"""
def __init__(self, rdiff=None, method='lgmres', inner_maxiter=20,
inner_M=None, outer_k=10, **kw):
self.preconditioner = inner_M
self.rdiff = rdiff
self.method = dict(
bicgstab=scipy.sparse.linalg.bicgstab,
gmres=scipy.sparse.linalg.gmres,
lgmres=scipy.sparse.linalg.lgmres,
cgs=scipy.sparse.linalg.cgs,
minres=scipy.sparse.linalg.minres,
).get(method, method)
self.method_kw = dict(maxiter=inner_maxiter, M=self.preconditioner)
if self.method is scipy.sparse.linalg.gmres:
# Replace GMRES's outer iteration with Newton steps
self.method_kw['restrt'] = inner_maxiter
self.method_kw['maxiter'] = 1
elif self.method is scipy.sparse.linalg.lgmres:
self.method_kw['outer_k'] = outer_k
# Replace LGMRES's outer iteration with Newton steps
self.method_kw['maxiter'] = 1
# Carry LGMRES's `outer_v` vectors across nonlinear iterations
self.method_kw.setdefault('outer_v', [])
# But don't carry the corresponding Jacobian*v products, in case
# the Jacobian changes a lot in the nonlinear step
#
# XXX: some trust-region inspired ideas might be more efficient...
# See eg. Brown & Saad. But needs to be implemented separately
# since it's not an inexact Newton method.
self.method_kw.setdefault('store_outer_Av', False)
for key, value in kw.items():
if not key.startswith('inner_'):
raise ValueError("Unknown parameter %s" % key)
self.method_kw[key[6:]] = value
def _update_diff_step(self):
mx = abs(self.x0).max()
mf = abs(self.f0).max()
self.omega = self.rdiff * max(1, mx) / max(1, mf)
def matvec(self, v):
nv = norm(v)
if nv == 0:
return 0*v
sc = self.omega / nv
r = (self.func(self.x0 + sc*v) - self.f0) / sc
if not np.all(np.isfinite(r)) and np.all(np.isfinite(v)):
raise ValueError('Function returned non-finite results')
return r
def solve(self, rhs, tol=0):
sol, info = self.method(self.op, rhs, tol=tol, **self.method_kw)
return sol
def update(self, x, f):
self.x0 = x
self.f0 = f
self._update_diff_step()
# Update also the preconditioner, if possible
if self.preconditioner is not None:
if hasattr(self.preconditioner, 'update'):
self.preconditioner.update(x, f)
def setup(self, x, f, func):
Jacobian.setup(self, x, f, func)
self.x0 = x
self.f0 = f
self.op = scipy.sparse.linalg.aslinearoperator(self)
if self.rdiff is None:
self.rdiff = np.finfo(x.dtype).eps ** (1./2)
self._update_diff_step()
# Setup also the preconditioner, if possible
if self.preconditioner is not None:
if hasattr(self.preconditioner, 'setup'):
self.preconditioner.setup(x, f, func)
#------------------------------------------------------------------------------
# Wrapper functions
#------------------------------------------------------------------------------
def _nonlin_wrapper(name, jac):
"""
Construct a solver wrapper with given name and jacobian approx.
It inspects the keyword arguments of ``jac.__init__``, and allows to
use the same arguments in the wrapper function, in addition to the
keyword arguments of `nonlin_solve`
"""
import inspect
args, varargs, varkw, defaults = inspect.getargspec(jac.__init__)
kwargs = zip(args[-len(defaults):], defaults)
kw_str = ", ".join(["%s=%r" % (k, v) for k, v in kwargs])
if kw_str:
kw_str = ", " + kw_str
kwkw_str = ", ".join(["%s=%s" % (k, k) for k, v in kwargs])
if kwkw_str:
kwkw_str = kwkw_str + ", "
# Construct the wrapper function so that it's keyword arguments
# are visible in pydoc.help etc.
wrapper = """
def %(name)s(F, xin, iter=None %(kw)s, verbose=False, maxiter=None,
f_tol=None, f_rtol=None, x_tol=None, x_rtol=None,
tol_norm=None, line_search='armijo', callback=None, **kw):
jac = %(jac)s(%(kwkw)s **kw)
return nonlin_solve(F, xin, jac, iter, verbose, maxiter,
f_tol, f_rtol, x_tol, x_rtol, tol_norm, line_search,
callback)
"""
wrapper = wrapper % dict(name=name, kw=kw_str, jac=jac.__name__,
kwkw=kwkw_str)
ns = {}
ns.update(globals())
exec wrapper in ns
func = ns[name]
func.__doc__ = jac.__doc__
_set_doc(func)
return func
broyden1 = _nonlin_wrapper('broyden1', BroydenFirst)
broyden2 = _nonlin_wrapper('broyden2', BroydenSecond)
anderson = _nonlin_wrapper('anderson', Anderson)
linearmixing = _nonlin_wrapper('linearmixing', LinearMixing)
diagbroyden = _nonlin_wrapper('diagbroyden', DiagBroyden)
excitingmixing = _nonlin_wrapper('excitingmixing', ExcitingMixing)
newton_krylov = _nonlin_wrapper('newton_krylov', KrylovJacobian)
# Deprecated functions
@np.deprecate
def broyden_generalized(*a, **kw):
"""Use *anderson(..., w0=0)* instead"""
kw.setdefault('w0', 0)
return anderson(*a, **kw)
@np.deprecate
def broyden1_modified(*a, **kw):
"""Use `broyden1` instead"""
return broyden1(*a, **kw)
@np.deprecate
def broyden_modified(*a, **kw):
"""Use `anderson` instead"""
return anderson(*a, **kw)
@np.deprecate
def anderson2(*a, **kw):
"""Use `anderson` instead"""
return anderson(*a, **kw)
@np.deprecate
def broyden3(*a, **kw):
"""Use `broyden2` instead"""
return broyden2(*a, **kw)
@np.deprecate
def vackar(*a, **kw):
"""Use `diagbroyden` instead"""
return diagbroyden(*a, **kw)
|
bsd-3-clause
|
hsuantien/scikit-learn
|
sklearn/linear_model/tests/test_least_angle.py
|
44
|
17033
|
import tempfile
import shutil
import os.path as op
import warnings
from nose.tools import assert_equal
import numpy as np
from scipy import linalg
from sklearn.cross_validation import train_test_split
from sklearn.externals import joblib
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_no_warnings, assert_warns
from sklearn.utils import ConvergenceWarning
from sklearn import linear_model, datasets
from sklearn.linear_model.least_angle import _lars_path_residues
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", verbose=10)
sys.stdout = old_stdout
for (i, coef_) in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert_true(not np.isnan(coef_path_).any())
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = np.random.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar")
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G)
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G,
return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, active_, coef_path_ = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9)
print("---")
alpha_, active, coef = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
alphas, active, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]],
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
def test_lasso_lars_vs_lasso_cd(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
alphas_min = [10, 0.9, 1e-4]
# same test, with normalization
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert_true(np.all(np.diff(lasso.alphas_) < 0))
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-10, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert_true(np.all(np.isfinite(clf.coef_)))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
X = diabetes.data
Y = np.vstack([diabetes.target, diabetes.target ** 2]).T
n_targets = Y.shape[1]
for estimator in (linear_model.LassoLars(), linear_model.Lars()):
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
Y_dec = estimator.decision_function(X)
assert_array_almost_equal(Y_pred, Y_dec)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually garantied in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
y = diabetes.target
X = np.c_[X, rng.randn(X.shape[0], 4)] # add 4 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_no_warning_for_zero_mse():
# LassoLarsIC should not warn for log of zero MSE.
y = np.arange(10, dtype=float)
X = y.reshape(-1, 1)
lars = linear_model.LassoLarsIC(normalize=False)
assert_no_warnings(lars.fit, X, y)
assert_true(np.any(np.isinf(lars.criterion_)))
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
temp_folder = tempfile.mkdtemp()
try:
fpath = op.join(temp_folder, 'data.pkl')
joblib.dump(splitted_data, fpath)
X_train, X_test, y_train, y_test = joblib.load(fpath, mmap_mode='r')
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False)
finally:
# try to release the mmap file handle in time to be able to delete
# the temporary folder under windows
del X_train, X_test, y_train, y_test
try:
shutil.rmtree(temp_folder)
except shutil.WindowsError:
warnings.warn("Could not delete temporary folder %s" % temp_folder)
|
bsd-3-clause
|
bigdataelephants/scikit-learn
|
sklearn/linear_model/least_angle.py
|
6
|
48722
|
"""
Least Angle Regression algorithm. See the documentation on the
Generalized Linear Model for a complete discussion.
"""
from __future__ import print_function
# Author: Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux
#
# License: BSD 3 clause
from math import log
import sys
import warnings
from distutils.version import LooseVersion
import numpy as np
from scipy import linalg, interpolate
from scipy.linalg.lapack import get_lapack_funcs
from .base import LinearModel
from ..base import RegressorMixin
from ..utils import arrayfuncs, as_float_array, check_array, check_X_y
from ..cross_validation import _check_cv as check_cv
from ..utils import ConvergenceWarning
from ..externals.joblib import Parallel, delayed
from ..externals.six.moves import xrange
import scipy
solve_triangular_args = {}
if LooseVersion(scipy.__version__) >= LooseVersion('0.12'):
solve_triangular_args = {'check_finite': False}
def lars_path(X, y, Xy=None, Gram=None, max_iter=500,
alpha_min=0, method='lar', copy_X=True,
eps=np.finfo(np.float).eps,
copy_Gram=True, verbose=0, return_path=True,
return_n_iter=False):
"""Compute Least Angle Regression or Lasso path using LARS algorithm [1]
The optimization objective for the case method='lasso' is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
in the case of method='lars', the objective function is only known in
the form of an implicit equation (see discussion in [1])
Parameters
-----------
X : array, shape: (n_samples, n_features)
Input data.
y : array, shape: (n_samples)
Input targets.
max_iter : integer, optional (default=500)
Maximum number of iterations to perform, set to infinity for no limit.
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features.
alpha_min : float, optional (default=0)
Minimum correlation along the path. It corresponds to the
regularization parameter alpha parameter in the Lasso.
method : {'lar', 'lasso'}, optional (default='lar')
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
eps : float, optional (default=``np.finfo(np.float).eps``)
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : bool, optional (default=True)
If ``False``, ``X`` is overwritten.
copy_Gram : bool, optional (default=True)
If ``False``, ``Gram`` is overwritten.
verbose : int (default=0)
Controls output verbosity.
return_path : bool, optional (default=True)
If ``return_path==True`` returns the entire path, else returns only the
last point of the path.
return_n_iter : bool, optional (default=False)
Whether to return the number of iterations.
Returns
--------
alphas : array, shape: [n_alphas + 1]
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter``, ``n_features`` or the
number of nodes in the path with ``alpha >= alpha_min``, whichever
is smaller.
active : array, shape [n_alphas]
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas + 1)
Coefficients along the path
n_iter : int
Number of iterations run. Returned only if return_n_iter is set
to True.
See also
--------
lasso_path
LassoLars
Lars
LassoLarsCV
LarsCV
sklearn.decomposition.sparse_encode
References
----------
.. [1] "Least Angle Regression", Effron et al.
http://www-stat.stanford.edu/~tibs/ftp/lars.pdf
.. [2] `Wikipedia entry on the Least-angle regression
<http://en.wikipedia.org/wiki/Least-angle_regression>`_
.. [3] `Wikipedia entry on the Lasso
<http://en.wikipedia.org/wiki/Lasso_(statistics)#Lasso_method>`_
"""
n_features = X.shape[1]
n_samples = y.size
max_features = min(max_iter, n_features)
if return_path:
coefs = np.zeros((max_features + 1, n_features))
alphas = np.zeros(max_features + 1)
else:
coef, prev_coef = np.zeros(n_features), np.zeros(n_features)
alpha, prev_alpha = np.array([0.]), np.array([0.]) # better ideas?
n_iter, n_active = 0, 0
active, indices = list(), np.arange(n_features)
# holds the sign of covariance
sign_active = np.empty(max_features, dtype=np.int8)
drop = False
# will hold the cholesky factorization. Only lower part is
# referenced.
# We are initializing this to "zeros" and not empty, because
# it is passed to scipy linalg functions and thus if it has NaNs,
# even if they are in the upper part that it not used, we
# get errors raised.
# Once we support only scipy > 0.12 we can use check_finite=False and
# go back to "empty"
L = np.zeros((max_features, max_features), dtype=X.dtype)
swap, nrm2 = linalg.get_blas_funcs(('swap', 'nrm2'), (X,))
solve_cholesky, = get_lapack_funcs(('potrs',), (X,))
if Gram is None:
if copy_X:
# force copy. setting the array to be fortran-ordered
# speeds up the calculation of the (partial) Gram matrix
# and allows to easily swap columns
X = X.copy('F')
elif Gram == 'auto':
Gram = None
if X.shape[0] > X.shape[1]:
Gram = np.dot(X.T, X)
elif copy_Gram:
Gram = Gram.copy()
if Xy is None:
Cov = np.dot(X.T, y)
else:
Cov = Xy.copy()
if verbose:
if verbose > 1:
print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC")
else:
sys.stdout.write('.')
sys.stdout.flush()
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning
equality_tolerance = np.finfo(np.float32).eps
while True:
if Cov.size:
C_idx = np.argmax(np.abs(Cov))
C_ = Cov[C_idx]
C = np.fabs(C_)
else:
C = 0.
if return_path:
alpha = alphas[n_iter, np.newaxis]
coef = coefs[n_iter]
prev_alpha = alphas[n_iter - 1, np.newaxis]
prev_coef = coefs[n_iter - 1]
alpha[0] = C / n_samples
if alpha[0] <= alpha_min + equality_tolerance: # early stopping
if abs(alpha[0] - alpha_min) > equality_tolerance:
# interpolation factor 0 <= ss < 1
if n_iter > 0:
# In the first iteration, all alphas are zero, the formula
# below would make ss a NaN
ss = ((prev_alpha[0] - alpha_min) /
(prev_alpha[0] - alpha[0]))
coef[:] = prev_coef + ss * (coef - prev_coef)
alpha[0] = alpha_min
if return_path:
coefs[n_iter] = coef
break
if n_iter >= max_iter or n_active >= n_features:
break
if not drop:
##########################################################
# Append x_j to the Cholesky factorization of (Xa * Xa') #
# #
# ( L 0 ) #
# L -> ( ) , where L * w = Xa' x_j #
# ( w z ) and z = ||x_j|| #
# #
##########################################################
sign_active[n_active] = np.sign(C_)
m, n = n_active, C_idx + n_active
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
indices[n], indices[m] = indices[m], indices[n]
Cov_not_shortened = Cov
Cov = Cov[1:] # remove Cov[0]
if Gram is None:
X.T[n], X.T[m] = swap(X.T[n], X.T[m])
c = nrm2(X.T[n_active]) ** 2
L[n_active, :n_active] = \
np.dot(X.T[n_active], X.T[:n_active].T)
else:
# swap does only work inplace if matrix is fortran
# contiguous ...
Gram[m], Gram[n] = swap(Gram[m], Gram[n])
Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n])
c = Gram[n_active, n_active]
L[n_active, :n_active] = Gram[n_active, :n_active]
# Update the cholesky decomposition for the Gram matrix
if n_active:
linalg.solve_triangular(L[:n_active, :n_active],
L[n_active, :n_active],
trans=0, lower=1,
overwrite_b=True,
**solve_triangular_args)
v = np.dot(L[n_active, :n_active], L[n_active, :n_active])
diag = max(np.sqrt(np.abs(c - v)), eps)
L[n_active, n_active] = diag
if diag < 1e-7:
# The system is becoming too ill-conditioned.
# We have degenerate vectors in our active set.
# We'll 'drop for good' the last regressor added.
# Note: this case is very rare. It is no longer triggered by the
# test suite. The `equality_tolerance` margin added in 0.16.0 to
# get early stopping to work consistently on all versions of
# Python including 32 bit Python under Windows seems to make it
# very difficult to trigger the 'drop for good' strategy.
warnings.warn('Regressors in active set degenerate. '
'Dropping a regressor, after %i iterations, '
'i.e. alpha=%.3e, '
'with an active set of %i regressors, and '
'the smallest cholesky pivot element being %.3e'
% (n_iter, alpha, n_active, diag),
ConvergenceWarning)
# XXX: need to figure a 'drop for good' way
Cov = Cov_not_shortened
Cov[0] = 0
Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0])
continue
active.append(indices[n_active])
n_active += 1
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], '',
n_active, C))
if method == 'lasso' and n_iter > 0 and prev_alpha[0] < alpha[0]:
# alpha is increasing. This is because the updates of Cov are
# bringing in too much numerical error that is greater than
# than the remaining correlation with the
# regressors. Time to bail out
warnings.warn('Early stopping the lars path, as the residues '
'are small and the current value of alpha is no '
'longer well controlled. %i iterations, alpha=%.3e, '
'previous alpha=%.3e, with an active set of %i '
'regressors.'
% (n_iter, alpha, prev_alpha, n_active),
ConvergenceWarning)
break
# least squares solution
least_squares, info = solve_cholesky(L[:n_active, :n_active],
sign_active[:n_active],
lower=True)
if least_squares.size == 1 and least_squares == 0:
# This happens because sign_active[:n_active] = 0
least_squares[...] = 1
AA = 1.
else:
# is this really needed ?
AA = 1. / np.sqrt(np.sum(least_squares * sign_active[:n_active]))
if not np.isfinite(AA):
# L is too ill-conditioned
i = 0
L_ = L[:n_active, :n_active].copy()
while not np.isfinite(AA):
L_.flat[::n_active + 1] += (2 ** i) * eps
least_squares, info = solve_cholesky(
L_, sign_active[:n_active], lower=True)
tmp = max(np.sum(least_squares * sign_active[:n_active]),
eps)
AA = 1. / np.sqrt(tmp)
i += 1
least_squares *= AA
if Gram is None:
# equiangular direction of variables in the active set
eq_dir = np.dot(X.T[:n_active].T, least_squares)
# correlation between each unactive variables and
# eqiangular vector
corr_eq_dir = np.dot(X.T[n_active:], eq_dir)
else:
# if huge number of features, this takes 50% of time, I
# think could be avoided if we just update it using an
# orthogonal (QR) decomposition of X
corr_eq_dir = np.dot(Gram[:n_active, n_active:].T,
least_squares)
g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny))
g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny))
gamma_ = min(g1, g2, C / AA)
# TODO: better names for these variables: z
drop = False
z = -coef[active] / (least_squares + tiny32)
z_pos = arrayfuncs.min_pos(z)
if z_pos < gamma_:
# some coefficients have changed sign
idx = np.where(z == z_pos)[0][::-1]
# update the sign, important for LAR
sign_active[idx] = -sign_active[idx]
if method == 'lasso':
gamma_ = z_pos
drop = True
n_iter += 1
if return_path:
if n_iter >= coefs.shape[0]:
del coef, alpha, prev_alpha, prev_coef
# resize the coefs and alphas array
add_features = 2 * max(1, (max_features - n_active))
coefs = np.resize(coefs, (n_iter + add_features, n_features))
alphas = np.resize(alphas, n_iter + add_features)
coef = coefs[n_iter]
prev_coef = coefs[n_iter - 1]
alpha = alphas[n_iter, np.newaxis]
prev_alpha = alphas[n_iter - 1, np.newaxis]
else:
# mimic the effect of incrementing n_iter on the array references
prev_coef = coef
prev_alpha[0] = alpha[0]
coef = np.zeros_like(coef)
coef[active] = prev_coef[active] + gamma_ * least_squares
# update correlations
Cov -= gamma_ * corr_eq_dir
# See if any coefficient has changed sign
if drop and method == 'lasso':
# handle the case when idx is not length of 1
[arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii) for ii in
idx]
n_active -= 1
m, n = idx, n_active
# handle the case when idx is not length of 1
drop_idx = [active.pop(ii) for ii in idx]
if Gram is None:
# propagate dropped variable
for ii in idx:
for i in range(ii, n_active):
X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1])
# yeah this is stupid
indices[i], indices[i + 1] = indices[i + 1], indices[i]
# TODO: this could be updated
residual = y - np.dot(X[:, :n_active], coef[active])
temp = np.dot(X.T[n_active], residual)
Cov = np.r_[temp, Cov]
else:
for ii in idx:
for i in range(ii, n_active):
indices[i], indices[i + 1] = indices[i + 1], indices[i]
Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i+1])
Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i],
Gram[:, i + 1])
# Cov_n = Cov_j + x_j * X + increment(betas) TODO:
# will this still work with multiple drops ?
# recompute covariance. Probably could be done better
# wrong as Xy is not swapped with the rest of variables
# TODO: this could be updated
residual = y - np.dot(X, coef)
temp = np.dot(X.T[drop_idx], residual)
Cov = np.r_[temp, Cov]
sign_active = np.delete(sign_active, idx)
sign_active = np.append(sign_active, 0.) # just to maintain size
if verbose > 1:
print("%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, '', drop_idx,
n_active, abs(temp)))
if return_path:
# resize coefs in case of early stop
alphas = alphas[:n_iter + 1]
coefs = coefs[:n_iter + 1]
if return_n_iter:
return alphas, active, coefs.T, n_iter
else:
return alphas, active, coefs.T
else:
if return_n_iter:
return alpha, active, coef, n_iter
else:
return alpha, active, coef
###############################################################################
# Estimator classes
class Lars(LinearModel, RegressorMixin):
"""Least Angle Regression model a.k.a. LAR
Parameters
----------
n_nonzero_coefs : int, optional
Target number of non-zero coefficients. Use ``np.inf`` for no limit.
fit_intercept : boolean
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If ``True``, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If True the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``n_nonzero_coefs`` or ``n_features``, \
whichever is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) \
| list of n_targets such arrays
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.Lars(n_nonzero_coefs=1)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
Lars(copy_X=True, eps=..., fit_intercept=True, fit_path=True,
n_nonzero_coefs=1, normalize=True, precompute='auto', verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
See also
--------
lars_path, LarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, fit_intercept=True, verbose=False, normalize=True,
precompute='auto', n_nonzero_coefs=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True):
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.method = 'lar'
self.precompute = precompute
self.n_nonzero_coefs = n_nonzero_coefs
self.eps = eps
self.copy_X = copy_X
self.fit_path = fit_path
def _get_gram(self):
# precompute if n_samples > n_features
precompute = self.precompute
if hasattr(precompute, '__array__'):
Gram = precompute
elif precompute == 'auto':
Gram = 'auto'
else:
Gram = None
return Gram
def fit(self, X, y, Xy=None):
"""Fit the model using X, y as training data.
parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Xy : array-like, shape (n_samples,) or (n_samples, n_targets), \
optional
Xy = np.dot(X.T, y) that can be precomputed. It is useful
only when the Gram matrix is precomputed.
returns
-------
self : object
returns an instance of self.
"""
X = check_array(X)
y = np.asarray(y)
n_features = X.shape[1]
X, y, X_mean, y_mean, X_std = self._center_data(X, y,
self.fit_intercept,
self.normalize,
self.copy_X)
if y.ndim == 1:
y = y[:, np.newaxis]
n_targets = y.shape[1]
alpha = getattr(self, 'alpha', 0.)
if hasattr(self, 'n_nonzero_coefs'):
alpha = 0. # n_nonzero_coefs parametrization takes priority
max_iter = self.n_nonzero_coefs
else:
max_iter = self.max_iter
precompute = self.precompute
if not hasattr(precompute, '__array__') and (
precompute is True or
(precompute == 'auto' and X.shape[0] > X.shape[1]) or
(precompute == 'auto' and y.shape[1] > 1)):
Gram = np.dot(X.T, X)
else:
Gram = self._get_gram()
self.alphas_ = []
self.n_iter_ = []
if self.fit_path:
self.coef_ = []
self.active_ = []
self.coef_path_ = []
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, active, coef_path, n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=True,
return_n_iter=True)
self.alphas_.append(alphas)
self.active_.append(active)
self.n_iter_.append(n_iter_)
self.coef_path_.append(coef_path)
self.coef_.append(coef_path[:, -1])
if n_targets == 1:
self.alphas_, self.active_, self.coef_path_, self.coef_ = [
a[0] for a in (self.alphas_, self.active_, self.coef_path_,
self.coef_)]
self.n_iter_ = self.n_iter_[0]
else:
self.coef_ = np.empty((n_targets, n_features))
for k in xrange(n_targets):
this_Xy = None if Xy is None else Xy[:, k]
alphas, _, self.coef_[k], n_iter_ = lars_path(
X, y[:, k], Gram=Gram, Xy=this_Xy, copy_X=self.copy_X,
copy_Gram=True, alpha_min=alpha, method=self.method,
verbose=max(0, self.verbose - 1), max_iter=max_iter,
eps=self.eps, return_path=False, return_n_iter=True)
self.alphas_.append(alphas)
self.n_iter_.append(n_iter_)
if n_targets == 1:
self.alphas_ = self.alphas_[0]
self.n_iter_ = self.n_iter_[0]
self._set_intercept(X_mean, y_mean, X_std)
return self
class LassoLars(Lars):
"""Lasso model fit with Least Angle Regression a.k.a. Lars
It is a Linear Model trained with an L1 prior as regularizer.
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Parameters
----------
alpha : float
Constant that multiplies the penalty term. Defaults to 1.0.
``alpha = 0`` is equivalent to an ordinary least square, solved
by :class:`LinearRegression`. For numerical reasons, using
``alpha = 0`` with the LassoLars object is not advised and you
should prefer the LinearRegression object.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
fit_path : boolean
If ``True`` the full path is stored in the ``coef_path_`` attribute.
If you compute the solution for a large problem or many targets,
setting ``fit_path`` to ``False`` will lead to a speedup, especially
with a small alpha.
Attributes
----------
alphas_ : array, shape (n_alphas + 1,) | list of n_targets such arrays
Maximum of covariances (in absolute value) at each iteration. \
``n_alphas`` is either ``max_iter``, ``n_features``, or the number of \
nodes in the path with correlation greater than ``alpha``, whichever \
is smaller.
active_ : list, length = n_alphas | list of n_targets such lists
Indices of active variables at the end of the path.
coef_path_ : array, shape (n_features, n_alphas + 1) or list
If a list is passed it's expected to be one of n_targets such arrays.
The varying values of the coefficients along the path. It is not
present if the ``fit_path`` parameter is ``False``.
coef_ : array, shape (n_features,) or (n_targets, n_features)
Parameter vector (w in the formulation formula).
intercept_ : float | array, shape (n_targets,)
Independent term in decision function.
n_iter_ : array-like or int.
The number of iterations taken by lars_path to find the
grid of alphas for each target.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLars(alpha=0.01)
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLars(alpha=0.01, copy_X=True, eps=..., fit_intercept=True,
fit_path=True, max_iter=500, normalize=True, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -0.963257...]
See also
--------
lars_path
lasso_path
Lasso
LassoCV
LassoLarsCV
sklearn.decomposition.sparse_encode
"""
def __init__(self, alpha=1.0, fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True, fit_path=True):
self.alpha = alpha
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.method = 'lasso'
self.precompute = precompute
self.copy_X = copy_X
self.eps = eps
self.fit_path = fit_path
###############################################################################
# Cross-validated estimator classes
def _lars_path_residues(X_train, y_train, X_test, y_test, Gram=None,
copy=True, method='lars', verbose=False,
fit_intercept=True, normalize=True, max_iter=500,
eps=np.finfo(np.float).eps):
"""Compute the residues on left-out data for a full LARS path
Parameters
-----------
X_train : array, shape (n_samples, n_features)
The data to fit the LARS on
y_train : array, shape (n_samples)
The target variable to fit LARS on
X_test : array, shape (n_samples, n_features)
The data to compute the residues on
y_test : array, shape (n_samples)
The target variable to compute the residues on
Gram : None, 'auto', array, shape: (n_features, n_features), optional
Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram
matrix is precomputed from the given X, if there are more samples
than features
copy : boolean, optional
Whether X_train, X_test, y_train and y_test should be copied;
if False, they may be overwritten.
method : 'lar' | 'lasso'
Specifies the returned model. Select ``'lar'`` for Least Angle
Regression, ``'lasso'`` for the Lasso.
verbose : integer, optional
Sets the amount of verbosity
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
max_iter : integer, optional
Maximum number of iterations to perform.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Returns
--------
alphas : array, shape (n_alphas,)
Maximum of covariances (in absolute value) at each iteration.
``n_alphas`` is either ``max_iter`` or ``n_features``, whichever
is smaller.
active : list
Indices of active variables at the end of the path.
coefs : array, shape (n_features, n_alphas)
Coefficients along the path
residues : array, shape (n_alphas, n_samples)
Residues of the prediction on the test data
"""
if copy:
X_train = X_train.copy()
y_train = y_train.copy()
X_test = X_test.copy()
y_test = y_test.copy()
if fit_intercept:
X_mean = X_train.mean(axis=0)
X_train -= X_mean
X_test -= X_mean
y_mean = y_train.mean(axis=0)
y_train = as_float_array(y_train, copy=False)
y_train -= y_mean
y_test = as_float_array(y_test, copy=False)
y_test -= y_mean
if normalize:
norms = np.sqrt(np.sum(X_train ** 2, axis=0))
nonzeros = np.flatnonzero(norms)
X_train[:, nonzeros] /= norms[nonzeros]
alphas, active, coefs = lars_path(
X_train, y_train, Gram=Gram, copy_X=False, copy_Gram=False,
method=method, verbose=max(0, verbose - 1), max_iter=max_iter, eps=eps)
if normalize:
coefs[nonzeros] /= norms[nonzeros][:, np.newaxis]
residues = np.dot(X_test, coefs) - y_test[:, np.newaxis]
return alphas, active, coefs, residues.T
class LarsCV(Lars):
"""Cross-validated Least Angle Regression model
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter: integer, optional
Maximum number of iterations to perform.
cv : cross-validation generator, optional
see :mod:`sklearn.cross_validation`. If ``None`` is passed, default to
a 5-fold strategy
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
method = 'lar'
def __init__(self, fit_intercept=True, verbose=False, max_iter=500,
normalize=True, precompute='auto', cv=None,
max_n_alphas=1000, n_jobs=1, eps=np.finfo(np.float).eps,
copy_X=True):
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.copy_X = copy_X
self.cv = cv
self.max_n_alphas = max_n_alphas
self.n_jobs = n_jobs
self.eps = eps
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X, y = check_X_y(X, y)
# init cross-validation generator
cv = check_cv(self.cv, X, y, classifier=False)
Gram = 'auto' if self.precompute else None
cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)(
delayed(_lars_path_residues)(
X[train], y[train], X[test], y[test], Gram=Gram, copy=False,
method=self.method, verbose=max(0, self.verbose - 1),
normalize=self.normalize, fit_intercept=self.fit_intercept,
max_iter=self.max_iter, eps=self.eps)
for train, test in cv)
all_alphas = np.concatenate(list(zip(*cv_paths))[0])
# Unique also sorts
all_alphas = np.unique(all_alphas)
# Take at most max_n_alphas values
stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas))))
all_alphas = all_alphas[::stride]
mse_path = np.empty((len(all_alphas), len(cv_paths)))
for index, (alphas, active, coefs, residues) in enumerate(cv_paths):
alphas = alphas[::-1]
residues = residues[::-1]
if alphas[0] != 0:
alphas = np.r_[0, alphas]
residues = np.r_[residues[0, np.newaxis], residues]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
residues = np.r_[residues, residues[-1, np.newaxis]]
this_residues = interpolate.interp1d(alphas,
residues,
axis=0)(all_alphas)
this_residues **= 2
mse_path[:, index] = np.mean(this_residues, axis=-1)
mask = np.all(np.isfinite(mse_path), axis=-1)
all_alphas = all_alphas[mask]
mse_path = mse_path[mask]
# Select the alpha that minimizes left-out error
i_best_alpha = np.argmin(mse_path.mean(axis=-1))
best_alpha = all_alphas[i_best_alpha]
# Store our parameters
self.alpha_ = best_alpha
self.cv_alphas_ = all_alphas
self.cv_mse_path_ = mse_path
# Now compute the full model
# it will call a lasso internally when self if LassoLarsCV
# as self.method == 'lasso'
Lars.fit(self, X, y)
return self
@property
def alpha(self):
# impedance matching for the above Lars.fit (should not be documented)
return self.alpha_
class LassoLarsCV(LarsCV):
"""Cross-validated Lasso, using the LARS algorithm
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
Parameters
----------
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform.
cv : cross-validation generator, optional
see sklearn.cross_validation module. If None is passed, default to
a 5-fold strategy
max_n_alphas : integer, optional
The maximum number of points on the path used to compute the
residuals in the cross-validation
n_jobs : integer, optional
Number of CPUs to use during the cross validation. If ``-1``, use
all the CPUs
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
coef_path_ : array, shape (n_features, n_alphas)
the varying values of the coefficients along the path
alpha_ : float
the estimated regularization parameter alpha
alphas_ : array, shape (n_alphas,)
the different values of alpha along the path
cv_alphas_ : array, shape (n_cv_alphas,)
all the values of alpha along the path for the different folds
cv_mse_path_ : array, shape (n_folds, n_cv_alphas)
the mean square error on left-out for each fold along the path
(alpha values given by ``cv_alphas``)
n_iter_ : array-like or int
the number of iterations run by Lars with the optimal alpha.
Notes
-----
The object solves the same problem as the LassoCV object. However,
unlike the LassoCV, it find the relevant alphas values by itself.
In general, because of this property, it will be more stable.
However, it is more fragile to heavily multicollinear datasets.
It is more efficient than the LassoCV if only a small number of
features are selected compared to the total number, for instance if
there are very few samples compared to the number of features.
See also
--------
lars_path, LassoLars, LarsCV, LassoCV
"""
method = 'lasso'
class LassoLarsIC(LassoLars):
"""Lasso model fit with Lars using BIC or AIC for model selection
The optimization objective for Lasso is::
(1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1
AIC is the Akaike information criterion and BIC is the Bayes
Information criterion. Such criteria are useful to select the value
of the regularization parameter by making a trade-off between the
goodness of fit and the complexity of the model. A good model should
explain well the data while being simple.
Parameters
----------
criterion : 'bic' | 'aic'
The type of criterion to use.
fit_intercept : boolean
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up
calculations. If set to ``'auto'`` let us decide. The Gram
matrix can also be passed as argument.
max_iter : integer, optional
Maximum number of iterations to perform. Can be used for
early stopping.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the ``tol`` parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
Attributes
----------
coef_ : array, shape (n_features,)
parameter vector (w in the formulation formula)
intercept_ : float
independent term in decision function.
alpha_ : float
the alpha parameter chosen by the information criterion
n_iter_ : int
number of iterations run by lars_path to find the grid of
alphas.
criterion_ : array, shape (n_alphas,)
The value of the information criteria ('aic', 'bic') across all
alphas. The alpha which has the smallest information criteria
is chosen.
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.LassoLarsIC(criterion='bic')
>>> clf.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111])
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
LassoLarsIC(copy_X=True, criterion='bic', eps=..., fit_intercept=True,
max_iter=500, normalize=True, precompute='auto',
verbose=False)
>>> print(clf.coef_) # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
[ 0. -1.11...]
Notes
-----
The estimation of the number of degrees of freedom is given by:
"On the degrees of freedom of the lasso"
Hui Zou, Trevor Hastie, and Robert Tibshirani
Ann. Statist. Volume 35, Number 5 (2007), 2173-2192.
http://en.wikipedia.org/wiki/Akaike_information_criterion
http://en.wikipedia.org/wiki/Bayesian_information_criterion
See also
--------
lars_path, LassoLars, LassoLarsCV
"""
def __init__(self, criterion='aic', fit_intercept=True, verbose=False,
normalize=True, precompute='auto', max_iter=500,
eps=np.finfo(np.float).eps, copy_X=True):
self.criterion = criterion
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.copy_X = copy_X
self.precompute = precompute
self.eps = eps
def fit(self, X, y, copy_X=True):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples, n_features)
training data.
y : array-like, shape (n_samples,)
target values.
copy_X : boolean, optional, default True
If ``True``, X will be copied; else, it may be overwritten.
Returns
-------
self : object
returns an instance of self.
"""
self.fit_path = True
X = check_array(X)
y = np.asarray(y)
X, y, Xmean, ymean, Xstd = LinearModel._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
max_iter = self.max_iter
Gram = self._get_gram()
alphas_, active_, coef_path_, self.n_iter_ = lars_path(
X, y, Gram=Gram, copy_X=copy_X, copy_Gram=True, alpha_min=0.0,
method='lasso', verbose=self.verbose, max_iter=max_iter,
eps=self.eps, return_n_iter=True)
n_samples = X.shape[0]
if self.criterion == 'aic':
K = 2 # AIC
elif self.criterion == 'bic':
K = log(n_samples) # BIC
else:
raise ValueError('criterion should be either bic or aic')
R = y[:, np.newaxis] - np.dot(X, coef_path_) # residuals
mean_squared_error = np.mean(R ** 2, axis=0)
df = np.zeros(coef_path_.shape[1], dtype=np.int) # Degrees of freedom
for k, coef in enumerate(coef_path_.T):
mask = np.abs(coef) > np.finfo(coef.dtype).eps
if not np.any(mask):
continue
# get the number of degrees of freedom equal to:
# Xc = X[:, mask]
# Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs
df[k] = np.sum(mask)
self.alphas_ = alphas_
with np.errstate(divide='ignore'):
self.criterion_ = n_samples * np.log(mean_squared_error) + K * df
n_best = np.argmin(self.criterion_)
self.alpha_ = alphas_[n_best]
self.coef_ = coef_path_[:, n_best]
self._set_intercept(Xmean, ymean, Xstd)
return self
|
bsd-3-clause
|
laosiaudi/tensorflow
|
tensorflow/contrib/learn/python/learn/learn_io/__init__.py
|
8
|
2293
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools to allow different io formats."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import extract_dask_data
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import extract_dask_labels
from tensorflow.contrib.learn.python.learn.learn_io.dask_io import HAS_DASK
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import _read_keyed_batch_examples_shared_queue
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import _read_keyed_batch_features_shared_queue
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import queue_parsed_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_examples
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_batch_record_features
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_examples
from tensorflow.contrib.learn.python.learn.learn_io.graph_io import read_keyed_batch_features
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_data
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_labels
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import extract_pandas_matrix
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import HAS_PANDAS
from tensorflow.contrib.learn.python.learn.learn_io.pandas_io import pandas_input_fn
|
apache-2.0
|
simongibbons/numpy
|
numpy/lib/npyio.py
|
3
|
87121
|
import sys
import os
import re
import functools
import itertools
import warnings
import weakref
import contextlib
from operator import itemgetter, index as opindex
from collections.abc import Mapping
import numpy as np
from . import format
from ._datasource import DataSource
from numpy.core import overrides
from numpy.core.multiarray import packbits, unpackbits
from numpy.core.overrides import set_module
from numpy.core._internal import recursive
from ._iotools import (
LineSplitter, NameValidator, StringConverter, ConverterError,
ConverterLockError, ConversionWarning, _is_string_like,
has_nested_fields, flatten_dtype, easy_dtype, _decode_line
)
from numpy.compat import (
asbytes, asstr, asunicode, bytes, os_fspath, os_PathLike,
pickle, contextlib_nullcontext
)
@set_module('numpy')
def loads(*args, **kwargs):
# NumPy 1.15.0, 2017-12-10
warnings.warn(
"np.loads is deprecated, use pickle.loads instead",
DeprecationWarning, stacklevel=2)
return pickle.loads(*args, **kwargs)
__all__ = [
'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource'
]
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
class BagObj:
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo:
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
# Use weakref to make NpzFile objects collectable by refcount
self._obj = weakref.proxy(obj)
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError(key)
def __dir__(self):
"""
Enables dir(bagobj) to list the files in an NpzFile.
This also enables tab-completion in an interpreter or IPython.
"""
return list(object.__getattribute__(self, '_obj').keys())
def zipfile_factory(file, *args, **kwargs):
"""
Create a ZipFile.
Allows for Zip64, and the `file` argument can accept file, str, or
pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile
constructor.
"""
if not hasattr(file, 'read'):
file = os_fspath(file)
import zipfile
kwargs['allowZip64'] = True
return zipfile.ZipFile(file, *args, **kwargs)
class NpzFile(Mapping):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ``.npy`` extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ``.npy`` extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ``.npy`` extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
allow_pickle : bool, optional
Allow loading pickled data. Default: False
.. versionchanged:: 1.16.3
Made default False in response to CVE-2019-6446.
pickle_kwargs : dict, optional
Additional keyword arguments to pass on to pickle.load.
These are only useful when loading object arrays saved on
Python 2 when using Python 3.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> _ = outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> sorted(npz.files)
['x', 'y']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False, allow_pickle=False,
pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an
# optional component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
self.allow_pickle = allow_pickle
self.pickle_kwargs = pickle_kwargs
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
self.f = None # break reference cycle
def __del__(self):
self.close()
# Implement the Mapping ABC
def __iter__(self):
return iter(self.files)
def __len__(self):
return len(self.files)
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = False
if key in self._files:
member = True
elif key in self.files:
member = True
key += '.npy'
if member:
bytes = self.zip.open(key)
magic = bytes.read(len(format.MAGIC_PREFIX))
bytes.close()
if magic == format.MAGIC_PREFIX:
bytes = self.zip.open(key)
return format.read_array(bytes,
allow_pickle=self.allow_pickle,
pickle_kwargs=self.pickle_kwargs)
else:
return self.zip.read(key)
else:
raise KeyError("%s is not a file in the archive" % key)
# deprecate the python 2 dict apis that we supported by accident in
# python 3. We forgot to implement itervalues() at all in earlier
# versions of numpy, so no need to deprecated it here.
def iteritems(self):
# Numpy 1.15, 2018-02-20
warnings.warn(
"NpzFile.iteritems is deprecated in python 3, to match the "
"removal of dict.itertems. Use .items() instead.",
DeprecationWarning, stacklevel=2)
return self.items()
def iterkeys(self):
# Numpy 1.15, 2018-02-20
warnings.warn(
"NpzFile.iterkeys is deprecated in python 3, to match the "
"removal of dict.iterkeys. Use .keys() instead.",
DeprecationWarning, stacklevel=2)
return self.keys()
@set_module('numpy')
def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True,
encoding='ASCII'):
"""
Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
.. warning:: Loading files that contain object arrays uses the ``pickle``
module, which is not secure against erroneous or maliciously
constructed data. Consider passing ``allow_pickle=False`` to
load data that is known not to contain object arrays for the
safer handling of untrusted sources.
Parameters
----------
file : file-like object, string, or pathlib.Path
The file to read. File-like objects must support the
``seek()`` and ``read()`` methods. Pickled files require that the
file-like object support the ``readline()`` method as well.
mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode (see
`numpy.memmap` for a detailed description of the modes). A
memory-mapped array is kept on disk. However, it can be accessed
and sliced like any ndarray. Memory mapping is especially useful
for accessing small fragments of large files without reading the
entire file into memory.
allow_pickle : bool, optional
Allow loading pickled object arrays stored in npy files. Reasons for
disallowing pickles include security, as loading pickled data can
execute arbitrary code. If pickles are disallowed, loading object
arrays will fail. Default: False
.. versionchanged:: 1.16.3
Made default False in response to CVE-2019-6446.
fix_imports : bool, optional
Only useful when loading Python 2 generated pickled files on Python 3,
which includes npy/npz files containing object arrays. If `fix_imports`
is True, pickle will try to map the old Python 2 names to the new names
used in Python 3.
encoding : str, optional
What encoding to use when reading Python 2 strings. Only useful when
loading Python 2 generated pickled files in Python 3, which includes
npy/npz files containing object arrays. Values other than 'latin1',
'ASCII', and 'bytes' are not allowed, as they can corrupt numerical
data. Default: 'ASCII'
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file. For ``.npz`` files, the returned instance
of NpzFile class must be closed to avoid leaking file descriptors.
Raises
------
IOError
If the input file does not exist or cannot be read.
ValueError
The file contains an object array, but allow_pickle=False given.
See Also
--------
save, savez, savez_compressed, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file.
Notes
-----
- If the file contains pickle data, then whatever object is stored
in the pickle is returned.
- If the file is a ``.npy`` file, then a single array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
- If the file is a ``.npz`` file, the returned value supports the
context manager protocol in a similar fashion to the open function::
with load('foo.npz') as data:
a = data['a']
The underlying file descriptor is closed when exiting the 'with'
block.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Store compressed data to disk, and load it again:
>>> a=np.array([[1, 2, 3], [4, 5, 6]])
>>> b=np.array([1, 2])
>>> np.savez('/tmp/123.npz', a=a, b=b)
>>> data = np.load('/tmp/123.npz')
>>> data['a']
array([[1, 2, 3],
[4, 5, 6]])
>>> data['b']
array([1, 2])
>>> data.close()
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
if encoding not in ('ASCII', 'latin1', 'bytes'):
# The 'encoding' value for pickle also affects what encoding
# the serialized binary data of NumPy arrays is loaded
# in. Pickle does not pass on the encoding information to
# NumPy. The unpickling code in numpy.core.multiarray is
# written to assume that unicode data appearing where binary
# should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'.
#
# Other encoding values can corrupt binary data, and we
# purposefully disallow them. For the same reason, the errors=
# argument is not exposed, as values other than 'strict'
# result can similarly silently corrupt numerical data.
raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'")
pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports)
with contextlib.ExitStack() as stack:
if hasattr(file, 'read'):
fid = file
own_fid = False
else:
fid = stack.enter_context(open(os_fspath(file), "rb"))
own_fid = True
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = b'PK\x03\x04'
_ZIP_SUFFIX = b'PK\x05\x06' # empty zip files start with this
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
# If the file size is less than N, we need to make sure not
# to seek past the beginning of the file
fid.seek(-min(N, len(magic)), 1) # back-up
if magic.startswith(_ZIP_PREFIX) or magic.startswith(_ZIP_SUFFIX):
# zip-file (assume .npz)
# Potentially transfer file ownership to NpzFile
stack.pop_all()
ret = NpzFile(fid, own_fid=own_fid, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
return ret
elif magic == format.MAGIC_PREFIX:
# .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
else:
# Try a pickle
if not allow_pickle:
raise ValueError("Cannot load file containing pickled data "
"when allow_pickle=False")
try:
return pickle.load(fid, **pickle_kwargs)
except Exception:
raise IOError(
"Failed to interpret file %s as a pickle" % repr(file))
def _save_dispatcher(file, arr, allow_pickle=None, fix_imports=None):
return (arr,)
@array_function_dispatch(_save_dispatcher)
def save(file, arr, allow_pickle=True, fix_imports=True):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file, str, or pathlib.Path
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string or Path, a ``.npy``
extension will be appended to the filename if it does not already
have one.
arr : array_like
Array data to be saved.
allow_pickle : bool, optional
Allow saving object arrays using Python pickles. Reasons for disallowing
pickles include security (loading pickled data can execute arbitrary
code) and portability (pickled objects may not be loadable on different
Python installations, for example if the stored objects require libraries
that are not available, and not all pickled data is compatible between
Python 2 and Python 3).
Default: True
fix_imports : bool, optional
Only useful in forcing objects in object arrays on Python 3 to be
pickled in a Python 2 compatible way. If `fix_imports` is True, pickle
will try to map the new Python 3 names to the old module names used in
Python 2, so that the pickle data stream is readable with Python 2.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
Any data saved to the file is appended to the end of the file.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> with open('test.npy', 'wb') as f:
... np.save(f, np.array([1, 2]))
... np.save(f, np.array([1, 3]))
>>> with open('test.npy', 'rb') as f:
... a = np.load(f)
... b = np.load(f)
>>> print(a, b)
# [1 2] [1 3]
"""
if hasattr(file, 'write'):
file_ctx = contextlib_nullcontext(file)
else:
file = os_fspath(file)
if not file.endswith('.npy'):
file = file + '.npy'
file_ctx = open(file, "wb")
with file_ctx as fid:
arr = np.asanyarray(arr)
format.write_array(fid, arr, allow_pickle=allow_pickle,
pickle_kwargs=dict(fix_imports=fix_imports))
def _savez_dispatcher(file, *args, **kwds):
yield from args
yield from kwds.values()
@array_function_dispatch(_savez_dispatcher)
def savez(file, *args, **kwds):
"""Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword
arguments are given, the corresponding variable names, in the ``.npz``
file will match the keyword names.
Parameters
----------
file : str or file
Either the filename (string) or an open file (file-like object)
where the data will be saved. If file is a string or a Path, the
``.npz`` extension will be appended to the filename if it is not
already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
When saving dictionaries, the dictionary keys become filenames
inside the ZIP archive. Therefore, keys should be valid filenames.
E.g., avoid keys that begin with ``/`` or contain ``.``.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with \\*args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_0', 'arr_1']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with \\**kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> _ = outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> sorted(npzfile.files)
['x', 'y']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
_savez(file, args, kwds, False)
def _savez_compressed_dispatcher(file, *args, **kwds):
yield from args
yield from kwds.values()
@array_function_dispatch(_savez_compressed_dispatcher)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored filenames are
arr_0, arr_1, etc.
Parameters
----------
file : str or file
Either the filename (string) or an open file (file-like object)
where the data will be saved. If file is a string or a Path, the
``.npz`` extension will be appended to the filename if it is not
already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
numpy.save : Save a single array to a binary file in NumPy format.
numpy.savetxt : Save an array to a file as plain text.
numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
numpy.load : Load the files created by savez_compressed.
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is compressed with
``zipfile.ZIP_DEFLATED`` and each file in the archive contains one variable
in ``.npy`` format. For a description of the ``.npy`` format, see
:py:mod:`numpy.lib.format`.
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> test_array = np.random.rand(3, 2)
>>> test_vector = np.random.rand(4)
>>> np.savez_compressed('/tmp/123', a=test_array, b=test_vector)
>>> loaded = np.load('/tmp/123.npz')
>>> print(np.array_equal(test_array, loaded['a']))
True
>>> print(np.array_equal(test_vector, loaded['b']))
True
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
if not hasattr(file, 'write'):
file = os_fspath(file)
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError(
"Cannot use un-named variables and keyword %s" % key)
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zipf = zipfile_factory(file, mode="w", compression=compression)
if sys.version_info >= (3, 6):
# Since Python 3.6 it is possible to write directly to a ZIP file.
for key, val in namedict.items():
fname = key + '.npy'
val = np.asanyarray(val)
# always force zip64, gh-10776
with zipf.open(fname, 'w', force_zip64=True) as fid:
format.write_array(fid, val,
allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
else:
# Stage arrays in a temporary file on disk, before writing to zip.
# Import deferred for startup time improvement
import tempfile
# Since target file might be big enough to exceed capacity of a global
# temporary directory, create temp file side-by-side with the target file.
file_dir, file_prefix = os.path.split(file) if _is_string_like(file) else (None, 'tmp')
fd, tmpfile = tempfile.mkstemp(prefix=file_prefix, dir=file_dir, suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.items():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val),
allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
fid.close()
fid = None
zipf.write(tmpfile, arcname=fname)
except IOError as exc:
raise IOError("Failed to write to %s: %s" % (tmpfile, exc))
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zipf.close()
def _getconv(dtype):
""" Find the correct dtype converter. Adapted from matplotlib """
def floatconv(x):
x.lower()
if '0x' in x:
return float.fromhex(x)
return float(x)
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.uint64):
return np.uint64
if issubclass(typ, np.int64):
return np.int64
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.longdouble):
return np.longdouble
elif issubclass(typ, np.floating):
return floatconv
elif issubclass(typ, complex):
return lambda x: complex(asstr(x).replace('+-', '-'))
elif issubclass(typ, np.bytes_):
return asbytes
elif issubclass(typ, np.unicode_):
return asunicode
else:
return asstr
# amount of lines loadtxt reads in one chunk, can be overridden for testing
_loadtxt_chunksize = 50000
@set_module('numpy')
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
ndmin=0, encoding='bytes', max_rows=None):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file, str, or pathlib.Path
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
generators should return byte strings.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a
structured data-type, the resulting array will be 1-dimensional, and
each row will be interpreted as an element of the array. In this
case, the number of columns used must match the number of fields in
the data-type.
comments : str or sequence of str, optional
The characters or list of characters used to indicate the start of a
comment. None implies no comments. For backwards compatibility, byte
strings will be decoded as 'latin1'. The default is '#'.
delimiter : str, optional
The string used to separate values. For backwards compatibility, byte
strings will be decoded as 'latin1'. The default is whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will parse the
column string into the desired value. E.g., if column 0 is a date
string: ``converters = {0: datestr2num}``. Converters can also be
used to provide a default value for missing data (but see also
`genfromtxt`): ``converters = {3: lambda s: float(s.strip() or 0)}``.
Default: None.
skiprows : int, optional
Skip the first `skiprows` lines, including comments; default: 0.
usecols : int or sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
.. versionchanged:: 1.11.0
When a single column has to be read it is possible to use
an integer instead of a tuple. E.g ``usecols = 3`` reads the
fourth column the same way as ``usecols = (3,)`` would.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. When used with a structured
data-type, arrays are returned for each field. Default is False.
ndmin : int, optional
The returned array will have at least `ndmin` dimensions.
Otherwise mono-dimensional axes will be squeezed.
Legal values: 0 (default), 1 or 2.
.. versionadded:: 1.6.0
encoding : str, optional
Encoding used to decode the inputfile. Does not apply to input streams.
The special value 'bytes' enables backward compatibility workarounds
that ensures you receive byte arrays as results if possible and passes
'latin1' encoded strings to converters. Override this value to receive
unicode arrays and pass strings as input to converters. If set to None
the system default is used. The default value is 'bytes'.
.. versionadded:: 1.14.0
max_rows : int, optional
Read `max_rows` lines of content after `skiprows` lines. The default
is to read all the lines.
.. versionadded:: 1.16.0
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
.. versionadded:: 1.10.0
The strings produced by the Python float.hex method can be used as
input for floats.
Examples
--------
>>> from io import StringIO # StringIO behaves like a file object
>>> c = StringIO(u"0 1\\n2 3")
>>> np.loadtxt(c)
array([[0., 1.],
[2., 3.]])
>>> d = StringIO(u"M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([(b'M', 21, 72.), (b'F', 35, 58.)],
dtype=[('gender', 'S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO(u"1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([1., 3.])
>>> y
array([2., 4.])
"""
# Type conversions for Py3 convenience
if comments is not None:
if isinstance(comments, (str, bytes)):
comments = [comments]
comments = [_decode_line(x) for x in comments]
# Compile regex for comments beforehand
comments = (re.escape(comment) for comment in comments)
regex_comments = re.compile('|'.join(comments))
if delimiter is not None:
delimiter = _decode_line(delimiter)
user_converters = converters
if encoding == 'bytes':
encoding = None
byte_converters = True
else:
byte_converters = False
if usecols is not None:
# Allow usecols to be a single int or a sequence of ints
try:
usecols_as_list = list(usecols)
except TypeError:
usecols_as_list = [usecols]
for col_idx in usecols_as_list:
try:
opindex(col_idx)
except TypeError as e:
e.args = (
"usecols must be an int or a sequence of ints but "
"it contains at least one element of type %s" %
type(col_idx),
)
raise
# Fall back to existing code
usecols = usecols_as_list
fown = False
try:
if isinstance(fname, os_PathLike):
fname = os_fspath(fname)
if _is_string_like(fname):
fh = np.lib._datasource.open(fname, 'rt', encoding=encoding)
fencoding = getattr(fh, 'encoding', 'latin1')
fh = iter(fh)
fown = True
else:
fh = iter(fname)
fencoding = getattr(fname, 'encoding', 'latin1')
except TypeError:
raise ValueError('fname must be a string, file handle, or generator')
# input may be a python2 io stream
if encoding is not None:
fencoding = encoding
# we must assume local encoding
# TODO emit portability warning?
elif fencoding is None:
import locale
fencoding = locale.getpreferredencoding()
# not to be confused with the flatten_dtype we import...
@recursive
def flatten_dtype_internal(self, dt):
"""Unpack a structured data-type, and produce re-packing info."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
shape = dt.shape
if len(shape) == 0:
return ([dt.base], None)
else:
packing = [(shape[-1], list)]
if len(shape) > 1:
for dim in dt.shape[-2::-1]:
packing = [(dim*packing[0][0], packing*dim)]
return ([dt.base] * int(np.prod(dt.shape)), packing)
else:
types = []
packing = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt, flat_packing = self(tp)
types.extend(flat_dt)
# Avoid extra nesting for subarrays
if tp.ndim > 0:
packing.extend(flat_packing)
else:
packing.append((len(flat_dt), flat_packing))
return (types, packing)
@recursive
def pack_items(self, items, packing):
"""Pack items into nested lists based on re-packing info."""
if packing is None:
return items[0]
elif packing is tuple:
return tuple(items)
elif packing is list:
return list(items)
else:
start = 0
ret = []
for length, subpacking in packing:
ret.append(self(items[start:start+length], subpacking))
start += length
return tuple(ret)
def split_line(line):
"""Chop off comments, strip, and split at delimiter. """
line = _decode_line(line, encoding=encoding)
if comments is not None:
line = regex_comments.split(line, maxsplit=1)[0]
line = line.strip('\r\n')
if line:
return line.split(delimiter)
else:
return []
def read_data(chunk_size):
"""Parse each line, including the first.
The file read, `fh`, is a global defined above.
Parameters
----------
chunk_size : int
At most `chunk_size` lines are read at a time, with iteration
until all lines are read.
"""
X = []
line_iter = itertools.chain([first_line], fh)
line_iter = itertools.islice(line_iter, max_rows)
for i, line in enumerate(line_iter):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[j] for j in usecols]
if len(vals) != N:
line_num = i + skiprows + 1
raise ValueError("Wrong number of columns at line %d"
% line_num)
# Convert each value according to its column and store
items = [conv(val) for (conv, val) in zip(converters, vals)]
# Then pack it according to the dtype's nesting
items = pack_items(items, packing)
X.append(items)
if len(X) > chunk_size:
yield X
X = []
if X:
yield X
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in range(skiprows):
next(fh)
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
try:
while not first_vals:
first_line = next(fh)
first_vals = split_line(first_line)
except StopIteration:
# End of lines reached
first_line = ''
first_vals = []
warnings.warn('loadtxt: Empty input file: "%s"' % fname, stacklevel=2)
N = len(usecols or first_vals)
dtype_types, packing = flatten_dtype_internal(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in range(N)]
if N > 1:
packing = [(N, tuple)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).items():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
if byte_converters:
# converters may use decode to workaround numpy's old behaviour,
# so encode the string again before passing to the user converter
def tobytes_first(x, conv):
if type(x) is bytes:
return conv(x)
return conv(x.encode("latin1"))
converters[i] = functools.partial(tobytes_first, conv=conv)
else:
converters[i] = conv
converters = [conv if conv is not bytes else
lambda x: x.encode(fencoding) for conv in converters]
# read data in chunks and fill it into an array via resize
# over-allocating and shrinking the array later may be faster but is
# probably not relevant compared to the cost of actually reading and
# converting the data
X = None
for x in read_data(_loadtxt_chunksize):
if X is None:
X = np.array(x, dtype)
else:
nshape = list(X.shape)
pos = nshape[0]
nshape[0] += len(x)
X.resize(nshape, refcheck=False)
X[pos:, ...] = x
finally:
if fown:
fh.close()
if X is None:
X = np.array([], dtype)
# Multicolumn data are returned with shape (1, N, M), i.e.
# (1, 1, M) for a single row - remove the singleton dimension there
if X.ndim == 3 and X.shape[:2] == (1, 1):
X.shape = (1, -1)
# Verify that the array has at least dimensions `ndmin`.
# Check correctness of the values of `ndmin`
if ndmin not in [0, 1, 2]:
raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
# Tweak the size and shape of the arrays - remove extraneous dimensions
if X.ndim > ndmin:
X = np.squeeze(X)
# and ensure we have the minimum number of dimensions asked for
# - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
if X.ndim < ndmin:
if ndmin == 1:
X = np.atleast_1d(X)
elif ndmin == 2:
X = np.atleast_2d(X).T
if unpack:
if len(dtype_types) > 1:
# For structured arrays, return an array for each field.
return [X[field] for field in dtype.names]
else:
return X.T
else:
return X
def _savetxt_dispatcher(fname, X, fmt=None, delimiter=None, newline=None,
header=None, footer=None, comments=None,
encoding=None):
return (X,)
@array_function_dispatch(_savetxt_dispatcher)
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
footer='', comments='# ', encoding=None):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : 1D or 2D array_like
Data to be saved to a text file.
fmt : str or sequence of strs, optional
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored. For complex `X`, the legal options
for `fmt` are:
* a single specifier, `fmt='%.4e'`, resulting in numbers formatted
like `' (%s+%sj)' % (fmt, fmt)`
* a full string specifying every real and imaginary part, e.g.
`' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'` for 3 columns
* a list of specifiers, one per column - in this case, the real
and imaginary part must have separate specifiers,
e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
delimiter : str, optional
String or character separating columns.
newline : str, optional
String or character separating lines.
.. versionadded:: 1.5.0
header : str, optional
String that will be written at the beginning of the file.
.. versionadded:: 1.7.0
footer : str, optional
String that will be written at the end of the file.
.. versionadded:: 1.7.0
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``numpy.loadtxt``.
.. versionadded:: 1.7.0
encoding : {None, str}, optional
Encoding used to encode the outputfile. Does not apply to output
streams. If the encoding is something other than 'bytes' or 'latin1'
you will not be able to load the file in NumPy versions < 1.14. Default
is 'latin1'.
.. versionadded:: 1.14.0
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into an uncompressed ``.npz`` archive
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to precede result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<https://docs.python.org/library/string.html#format-specification-mini-language>`_,
Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
class WriteWrap:
"""Convert to bytes on bytestream inputs.
"""
def __init__(self, fh, encoding):
self.fh = fh
self.encoding = encoding
self.do_write = self.first_write
def close(self):
self.fh.close()
def write(self, v):
self.do_write(v)
def write_bytes(self, v):
if isinstance(v, bytes):
self.fh.write(v)
else:
self.fh.write(v.encode(self.encoding))
def write_normal(self, v):
self.fh.write(asunicode(v))
def first_write(self, v):
try:
self.write_normal(v)
self.write = self.write_normal
except TypeError:
# input is probably a bytestream
self.write_bytes(v)
self.write = self.write_bytes
own_fh = False
if isinstance(fname, os_PathLike):
fname = os_fspath(fname)
if _is_string_like(fname):
# datasource doesn't support creating a new file ...
open(fname, 'wt').close()
fh = np.lib._datasource.open(fname, 'wt', encoding=encoding)
own_fh = True
elif hasattr(fname, 'write'):
# wrap to handle byte output streams
fh = WriteWrap(fname, encoding or 'latin1')
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 0 or X.ndim > 2:
raise ValueError(
"Expected 1D or 2D array, got %dD array instead" % X.ndim)
elif X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.names)
else:
ncol = X.shape[1]
iscomplex_X = np.iscomplexobj(X)
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif isinstance(fmt, str):
n_fmt_chars = fmt.count('%')
error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
if n_fmt_chars == 1:
if iscomplex_X:
fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol
else:
fmt = [fmt, ] * ncol
format = delimiter.join(fmt)
elif iscomplex_X and n_fmt_chars != (2 * ncol):
raise error
elif ((not iscomplex_X) and n_fmt_chars != ncol):
raise error
else:
format = fmt
else:
raise ValueError('invalid fmt: %r' % (fmt,))
if len(header) > 0:
header = header.replace('\n', '\n' + comments)
fh.write(comments + header + newline)
if iscomplex_X:
for row in X:
row2 = []
for number in row:
row2.append(number.real)
row2.append(number.imag)
s = format % tuple(row2) + newline
fh.write(s.replace('+-', '-'))
else:
for row in X:
try:
v = format % tuple(row) + newline
except TypeError:
raise TypeError("Mismatch between array dtype ('%s') and "
"format specifier ('%s')"
% (str(X.dtype), format))
fh.write(v)
if len(footer) > 0:
footer = footer.replace('\n', '\n' + comments)
fh.write(comments + footer + newline)
finally:
if own_fh:
fh.close()
@set_module('numpy')
def fromregex(file, regexp, dtype, encoding=None):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
Filename or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
encoding : str, optional
Encoding used to decode the inputfile. Does not apply to input streams.
.. versionadded:: 1.14.0
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> _ = f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312, b'foo'), (1534, b'bar'), ( 444, b'qux')],
dtype=[('num', '<i8'), ('key', 'S3')])
>>> output['num']
array([1312, 1534, 444])
"""
own_fh = False
if not hasattr(file, "read"):
file = np.lib._datasource.open(file, 'rt', encoding=encoding)
own_fh = True
try:
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
content = file.read()
if isinstance(content, bytes) and isinstance(regexp, np.compat.unicode):
regexp = asbytes(regexp)
elif isinstance(content, np.compat.unicode) and isinstance(regexp, bytes):
regexp = asstr(regexp)
if not hasattr(regexp, 'match'):
regexp = re.compile(regexp)
seq = regexp.findall(content)
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
file.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
@set_module('numpy')
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skip_header=0, skip_footer=0, converters=None,
missing_values=None, filling_values=None, usecols=None,
names=None, excludelist=None,
deletechars=''.join(sorted(NameValidator.defaultdeletechars)),
replace_space='_', autostrip=False, case_sensitive=True,
defaultfmt="f%i", unpack=None, usemask=False, loose=True,
invalid_raise=True, max_rows=None, encoding='bytes'):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skip_header` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file, str, pathlib.Path, list of str, generator
File, filename, list, or generator to read. If the filename
extension is `.gz` or `.bz2`, the file is first decompressed. Note
that generators must return byte strings. The strings
in a list or produced by a generator are treated as lines.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skiprows : int, optional
`skiprows` was removed in numpy 1.10. Please use `skip_header` instead.
skip_header : int, optional
The number of lines to skip at the beginning of the file.
skip_footer : int, optional
The number of lines to skip at the end of the file.
converters : variable, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing : variable, optional
`missing` was removed in numpy 1.10. Please use `missing_values`
instead.
missing_values : variable, optional
The set of strings corresponding to missing data.
filling_values : variable, optional
The set of values to be used as default when the data are missing.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first line after
the first `skip_header` lines. This line can optionally be proceeded
by a comment delimiter. If `names` is a sequence or a single-string of
comma-separated names, the names will be used to define the field names
in a structured dtype. If `names` is None, the names of the dtype
fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables
names. By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
loose : bool, optional
If True, do not raise errors for invalid values.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
max_rows : int, optional
The maximum number of rows to read. Must not be used with skip_footer
at the same time. If given, the value must be at least 1. Default is
to read the entire file.
.. versionadded:: 1.10.0
encoding : str, optional
Encoding used to decode the inputfile. Does not apply when `fname` is
a file object. The special value 'bytes' enables backward compatibility
workarounds that ensure that you receive byte arrays when possible
and passes latin1 encoded strings to converters. Override this value to
receive unicode arrays and pass strings as input to converters. If set
to None the system default is used. The default value is 'bytes'.
.. versionadded:: 1.14.0
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
References
----------
.. [1] NumPy User Guide, section `I/O with NumPy
<https://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
Examples
---------
>>> from io import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO(u"1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, b'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')])
Using dtype = None
>>> _ = s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, b'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')])
Specifying dtype and names
>>> _ = s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, b'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')])
An example with fixed-width columns
>>> s = StringIO(u"11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, b'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', 'S5')])
An example to show comments
>>> f = StringIO('''
... text,# of chars
... hello world,11
... numpy,5''')
>>> np.genfromtxt(f, dtype='S12,S12', delimiter=',')
array([(b'text', b''), (b'hello world', b'11'), (b'numpy', b'5')],
dtype=[('f0', 'S12'), ('f1', 'S12')])
"""
if max_rows is not None:
if skip_footer:
raise ValueError(
"The keywords 'skip_footer' and 'max_rows' can not be "
"specified at the same time.")
if max_rows < 1:
raise ValueError("'max_rows' must be at least 1.")
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
raise TypeError(
"The input argument 'converter' should be a valid dictionary "
"(got '%s' instead)" % type(user_converters))
if encoding == 'bytes':
encoding = None
byte_converters = True
else:
byte_converters = False
# Initialize the filehandle, the LineSplitter and the NameValidator
try:
if isinstance(fname, os_PathLike):
fname = os_fspath(fname)
if isinstance(fname, str):
fid = np.lib._datasource.open(fname, 'rt', encoding=encoding)
fid_ctx = contextlib.closing(fid)
else:
fid = fname
fid_ctx = contextlib_nullcontext(fid)
fhd = iter(fid)
except TypeError:
raise TypeError(
"fname must be a string, filehandle, list of strings, "
"or generator. Got %s instead." % type(fname))
with fid_ctx:
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip, encoding=encoding)
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Skip the first `skip_header` rows
try:
for i in range(skip_header):
next(fhd)
# Keep on until we find the first valid values
first_values = None
while not first_values:
first_line = _decode_line(next(fhd), encoding)
if (names is True) and (comments is not None):
if comments in first_line:
first_line = (
''.join(first_line.split(comments)[1:]))
first_values = split_line(first_line)
except StopIteration:
# return an empty array if the datafile is empty
first_line = ''
first_values = []
warnings.warn('genfromtxt: Empty input file: "%s"' % fname, stacklevel=2)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if comments is not None:
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([str(_.strip()) for _ in first_values])
first_line = ''
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names,
excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = list(dtype.names)
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
if isinstance(user_missing_values, bytes):
user_missing_values = user_missing_values.decode('latin1')
# Define the list of missing_values (one column: one list)
missing_values = [list(['']) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, str):
user_value = user_missing_values.split(",")
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values
if user_filling_values is None:
user_filling_values = []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped,
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (j, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(j):
try:
j = names.index(j)
i = j
except ValueError:
continue
elif usecols:
try:
i = usecols.index(j)
except ValueError:
# Unused converter specified
continue
else:
i = j
# Find the value to test - first_line is not filtered by usecols:
if len(first_line):
testing_value = first_values[j]
else:
testing_value = None
if conv is bytes:
user_conv = asbytes
elif byte_converters:
# converters may use decode to workaround numpy's old behaviour,
# so encode the string again before passing to the user converter
def tobytes_first(x, conv):
if type(x) is bytes:
return conv(x)
return conv(x.encode("latin1"))
user_conv = functools.partial(tobytes_first, conv=conv)
else:
user_conv = conv
converters[i].update(user_conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, user_conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
# Fixme: possible error as following variable never used.
# miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
if usecols:
# Select only the columns we need
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values,
missing_values)]))
if len(rows) == max_rows:
break
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = [itemgetter(i)(_m) for _m in rows]
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = map(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning, stacklevel=2)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
if loose:
rows = list(
zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
else:
rows = list(
zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v == np.unicode_]
if byte_converters and strcolidx:
# convert strings back to bytes for backward compatibility
warnings.warn(
"Reading unicode strings without specifying the encoding "
"argument is deprecated. Set the encoding, use None for the "
"system default.",
np.VisibleDeprecationWarning, stacklevel=2)
def encode_unicode_cols(row_tup):
row = list(row_tup)
for i in strcolidx:
row[i] = row[i].encode('latin1')
return tuple(row)
try:
data = [encode_unicode_cols(r) for r in data]
except UnicodeEncodeError:
pass
else:
for i in strcolidx:
column_types[i] = np.bytes_
# Update string types to be the right length
sized_column_types = column_types[:]
for i, col_type in enumerate(column_types):
if np.issubdtype(col_type, np.character):
n_chars = max(len(row[i]) for row in data)
sized_column_types[i] = (col_type, n_chars)
if names is None:
# If the dtype is uniform (before sizing strings)
base = {
c_type
for c, c_type in zip(converters, column_types)
if c._checked}
if len(base) == 1:
uniform_type, = base
(ddtype, mdtype) = (uniform_type, bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(sized_column_types)]
if usemask:
mdtype = [(defaultfmt % i, bool)
for (i, dt) in enumerate(sized_column_types)]
else:
ddtype = list(zip(names, sized_column_types))
mdtype = list(zip(names, [bool] * len(sized_column_types)))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names is not None:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
raise NotImplementedError(
"Nested fields involving objects are not supported...")
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(
masks, dtype=np.dtype([('', bool) for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for i, ttype in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if np.issubdtype(ttype, np.character):
ttype = (ttype, max(len(row[i]) for row in data))
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names is not None:
mdtype = [(_, bool) for _ in dtype.names]
else:
mdtype = bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names, converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != '']
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
.. deprecated:: 1.17
ndfromtxt` is a deprecated alias of `genfromtxt` which
overwrites the ``usemask`` argument with `False` even when
explicitly called as ``ndfromtxt(..., usemask=True)``.
Use `genfromtxt` instead.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
# Numpy 1.17
warnings.warn(
"np.ndfromtxt is a deprecated alias of np.genfromtxt, "
"prefer the latter.",
DeprecationWarning, stacklevel=2)
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
.. deprecated:: 1.17
np.mafromtxt is a deprecated alias of `genfromtxt` which
overwrites the ``usemask`` argument with `True` even when
explicitly called as ``mafromtxt(..., usemask=False)``.
Use `genfromtxt` instead.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
# Numpy 1.17
warnings.warn(
"np.mafromtxt is a deprecated alias of np.genfromtxt, "
"prefer the latter.",
DeprecationWarning, stacklevel=2)
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.setdefault("dtype", None)
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
# Set default kwargs for genfromtxt as relevant to csv import.
kwargs.setdefault("case_sensitive", "lower")
kwargs.setdefault("names", True)
kwargs.setdefault("delimiter", ",")
kwargs.setdefault("dtype", None)
output = genfromtxt(fname, **kwargs)
usemask = kwargs.get("usemask", False)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
|
bsd-3-clause
|
ThomasMiconi/htmresearch
|
projects/sequence_prediction/continuous_sequence/run_tm_model.py
|
3
|
16639
|
## ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import importlib
from optparse import OptionParser
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.modelfactory import ModelFactory
from nupic.frameworks.opf.predictionmetricsmanager import MetricsManager
from nupic.frameworks.opf import metrics
from htmresearch.frameworks.opf.clamodel_custom import CLAModel_custom
import nupic_output
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from htmresearch.support.sequence_learning_utils import *
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
rcParams['pdf.fonttype'] = 42
plt.ion()
DATA_DIR = "./data"
MODEL_PARAMS_DIR = "./model_params"
def getMetricSpecs(predictedField, stepsAhead=5):
_METRIC_SPECS = (
MetricSpec(field=predictedField, metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'errorMetric': 'negativeLogLikelihood',
'window': 1000, 'steps': stepsAhead}),
MetricSpec(field=predictedField, metric='multiStep',
inferenceElement='multiStepBestPredictions',
params={'errorMetric': 'nrmse', 'window': 1000,
'steps': stepsAhead}),
)
return _METRIC_SPECS
def createModel(modelParams):
model = ModelFactory.create(modelParams)
model.enableInference({"predictedField": predictedField})
return model
def getModelParamsFromName(dataSet):
importName = "model_params.%s_model_params" % (
dataSet.replace(" ", "_").replace("-", "_")
)
print "Importing model params from %s" % importName
try:
importedModelParams = importlib.import_module(importName).MODEL_PARAMS
except ImportError:
raise Exception("No model params exist for '%s'. Run swarm first!"
% dataSet)
return importedModelParams
def _getArgs():
parser = OptionParser(usage="%prog PARAMS_DIR OUTPUT_DIR [options]"
"\n\nCompare TM performance with trivial predictor using "
"model outputs in prediction directory "
"and outputting results to result directory.")
parser.add_option("-d",
"--dataSet",
type=str,
default='nyc_taxi',
dest="dataSet",
help="DataSet Name, choose from rec-center-hourly, nyc_taxi")
parser.add_option("-p",
"--plot",
default=False,
dest="plot",
help="Set to True to plot result")
parser.add_option("--stepsAhead",
help="How many steps ahead to predict. [default: %default]",
default=5,
type=int)
parser.add_option("-c",
"--classifier",
type=str,
default='SDRClassifierRegion',
dest="classifier",
help="Classifier Type: SDRClassifierRegion or CLAClassifierRegion")
(options, remainder) = parser.parse_args()
print options
return options, remainder
def getInputRecord(df, predictedField, i):
inputRecord = {
predictedField: float(df[predictedField][i]),
"timeofday": float(df["timeofday"][i]),
"dayofweek": float(df["dayofweek"][i]),
}
return inputRecord
def printTPRegionParams(tpregion):
"""
Note: assumes we are using TemporalMemory/TPShim in the TPRegion
"""
tm = tpregion.getSelf()._tfdr
print "------------PY TemporalMemory Parameters ------------------"
print "numberOfCols =", tm.getColumnDimensions()
print "cellsPerColumn =", tm.getCellsPerColumn()
print "minThreshold =", tm.getMinThreshold()
print "activationThreshold =", tm.getActivationThreshold()
print "newSynapseCount =", tm.getMaxNewSynapseCount()
print "initialPerm =", tm.getInitialPermanence()
print "connectedPerm =", tm.getConnectedPermanence()
print "permanenceInc =", tm.getPermanenceIncrement()
print "permanenceDec =", tm.getPermanenceDecrement()
print "predictedSegmentDecrement=", tm.getPredictedSegmentDecrement()
print
def runMultiplePass(df, model, nMultiplePass, nTrain):
"""
run CLA model through data record 0:nTrain nMultiplePass passes
"""
predictedField = model.getInferenceArgs()['predictedField']
print "run TM through the train data multiple times"
for nPass in xrange(nMultiplePass):
for j in xrange(nTrain):
inputRecord = getInputRecord(df, predictedField, j)
result = model.run(inputRecord)
if j % 100 == 0:
print " pass %i, record %i" % (nPass, j)
# reset temporal memory
model._getTPRegion().getSelf()._tfdr.reset()
return model
def runMultiplePassSPonly(df, model, nMultiplePass, nTrain):
"""
run CLA model SP through data record 0:nTrain nMultiplePass passes
"""
predictedField = model.getInferenceArgs()['predictedField']
print "run TM through the train data multiple times"
for nPass in xrange(nMultiplePass):
for j in xrange(nTrain):
inputRecord = getInputRecord(df, predictedField, j)
model._sensorCompute(inputRecord)
model._spCompute()
if j % 400 == 0:
print " pass %i, record %i" % (nPass, j)
return model
def movingAverage(a, n):
movingAverage = []
for i in xrange(len(a)):
start = max(0, i - n)
values = a[start:i+1]
movingAverage.append(sum(values) / float(len(values)))
return movingAverage
if __name__ == "__main__":
(_options, _args) = _getArgs()
dataSet = _options.dataSet
plot = _options.plot
classifierType = _options.classifier
if dataSet == "rec-center-hourly":
DATE_FORMAT = "%m/%d/%y %H:%M" # '7/2/10 0:00'
predictedField = "kw_energy_consumption"
elif dataSet == "nyc_taxi" or dataSet == "nyc_taxi_perturb" or dataSet =="nyc_taxi_perturb_baseline":
DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
predictedField = "passenger_count"
else:
raise RuntimeError("un recognized dataset")
if dataSet == "nyc_taxi" or dataSet == "nyc_taxi_perturb" or dataSet =="nyc_taxi_perturb_baseline":
modelParams = getModelParamsFromName("nyc_taxi")
else:
modelParams = getModelParamsFromName(dataSet)
modelParams['modelParams']['clParams']['steps'] = str(_options.stepsAhead)
modelParams['modelParams']['clParams']['regionName'] = classifierType
print "Creating model from %s..." % dataSet
# use customized CLA model
model = CLAModel_custom(**modelParams['modelParams'])
model.enableInference({"predictedField": predictedField})
model.enableLearning()
model._spLearningEnabled = True
model._tpLearningEnabled = True
printTPRegionParams(model._getTPRegion())
inputData = "%s/%s.csv" % (DATA_DIR, dataSet.replace(" ", "_"))
sensor = model._getSensorRegion()
encoderList = sensor.getSelf().encoder.getEncoderList()
if sensor.getSelf().disabledEncoder is not None:
classifier_encoder = sensor.getSelf().disabledEncoder.getEncoderList()
classifier_encoder = classifier_encoder[0]
else:
classifier_encoder = None
_METRIC_SPECS = getMetricSpecs(predictedField, stepsAhead=_options.stepsAhead)
metric = metrics.getModule(_METRIC_SPECS[0])
metricsManager = MetricsManager(_METRIC_SPECS, model.getFieldInfo(),
model.getInferenceType())
if plot:
plotCount = 1
plotHeight = max(plotCount * 3, 6)
fig = plt.figure(figsize=(14, plotHeight))
gs = gridspec.GridSpec(plotCount, 1)
plt.title(predictedField)
plt.ylabel('Data')
plt.xlabel('Timed')
plt.tight_layout()
plt.ion()
print "Load dataset: ", dataSet
df = pd.read_csv(inputData, header=0, skiprows=[1, 2])
nMultiplePass = 5
nTrain = 5000
print " run SP through the first %i samples %i passes " %(nMultiplePass, nTrain)
model = runMultiplePassSPonly(df, model, nMultiplePass, nTrain)
model._spLearningEnabled = False
maxBucket = classifier_encoder.n - classifier_encoder.w + 1
likelihoodsVecAll = np.zeros((maxBucket, len(df)))
prediction_nstep = None
time_step = []
actual_data = []
patternNZ_track = []
predict_data = np.zeros((_options.stepsAhead, 0))
predict_data_ML = []
negLL_track = []
activeCellNum = []
predCellNum = []
predSegmentNum = []
predictedActiveColumnsNum = []
trueBucketIndex = []
sp = model._getSPRegion().getSelf()._sfdr
spActiveCellsCount = np.zeros(sp.getColumnDimensions())
output = nupic_output.NuPICFileOutput([dataSet])
for i in xrange(len(df)):
inputRecord = getInputRecord(df, predictedField, i)
tp = model._getTPRegion()
tm = tp.getSelf()._tfdr
prePredictiveCells = tm.getPredictiveCells()
prePredictiveColumn = np.array(list(prePredictiveCells)) / tm.cellsPerColumn
result = model.run(inputRecord)
trueBucketIndex.append(model._getClassifierInputRecord(inputRecord).bucketIndex)
predSegmentNum.append(len(tm.activeSegments))
sp = model._getSPRegion().getSelf()._sfdr
spOutput = model._getSPRegion().getOutputData('bottomUpOut')
spActiveCellsCount[spOutput.nonzero()[0]] += 1
activeDutyCycle = np.zeros(sp.getColumnDimensions(), dtype=np.float32)
sp.getActiveDutyCycles(activeDutyCycle)
overlapDutyCycle = np.zeros(sp.getColumnDimensions(), dtype=np.float32)
sp.getOverlapDutyCycles(overlapDutyCycle)
if i % 100 == 0 and i > 0:
plt.figure(1)
plt.clf()
plt.subplot(2, 2, 1)
plt.hist(overlapDutyCycle)
plt.xlabel('overlapDutyCycle')
plt.subplot(2, 2, 2)
plt.hist(activeDutyCycle)
plt.xlabel('activeDutyCycle-1000')
plt.subplot(2, 2, 3)
plt.hist(spActiveCellsCount)
plt.xlabel('activeDutyCycle-Total')
plt.draw()
tp = model._getTPRegion()
tm = tp.getSelf()._tfdr
tpOutput = tm.infActiveState['t']
predictiveCells = tm.getPredictiveCells()
predCellNum.append(len(predictiveCells))
predColumn = np.array(list(predictiveCells))/ tm.cellsPerColumn
patternNZ = tpOutput.reshape(-1).nonzero()[0]
activeColumn = patternNZ / tm.cellsPerColumn
activeCellNum.append(len(patternNZ))
predictedActiveColumns = np.intersect1d(prePredictiveColumn, activeColumn)
predictedActiveColumnsNum.append(len(predictedActiveColumns))
result.metrics = metricsManager.update(result)
negLL = result.metrics["multiStepBestPredictions:multiStep:"
"errorMetric='negativeLogLikelihood':steps=%d:window=1000:"
"field=%s"%(_options.stepsAhead, predictedField)]
if i % 100 == 0 and i>0:
negLL = result.metrics["multiStepBestPredictions:multiStep:"
"errorMetric='negativeLogLikelihood':steps=%d:window=1000:"
"field=%s"%(_options.stepsAhead, predictedField)]
nrmse = result.metrics["multiStepBestPredictions:multiStep:"
"errorMetric='nrmse':steps=%d:window=1000:"
"field=%s"%(_options.stepsAhead, predictedField)]
numActiveCell = np.mean(activeCellNum[-100:])
numPredictiveCells = np.mean(predCellNum[-100:])
numCorrectPredicted = np.mean(predictedActiveColumnsNum[-100:])
print "After %i records, %d-step negLL=%f nrmse=%f ActiveCell %f PredCol %f CorrectPredCol %f" % \
(i, _options.stepsAhead, negLL, nrmse, numActiveCell,
numPredictiveCells, numCorrectPredicted)
last_prediction = prediction_nstep
prediction_nstep = \
result.inferences["multiStepBestPredictions"][_options.stepsAhead]
output.write([i], [inputRecord[predictedField]], [float(prediction_nstep)])
bucketLL = \
result.inferences['multiStepBucketLikelihoods'][_options.stepsAhead]
likelihoodsVec = np.zeros((maxBucket,))
if bucketLL is not None:
for (k, v) in bucketLL.items():
likelihoodsVec[k] = v
time_step.append(i)
actual_data.append(inputRecord[predictedField])
predict_data_ML.append(
result.inferences['multiStepBestPredictions'][_options.stepsAhead])
negLL_track.append(negLL)
likelihoodsVecAll[0:len(likelihoodsVec), i] = likelihoodsVec
if plot and i > 500:
# prepare data for display
if i > 100:
time_step_display = time_step[-500:-_options.stepsAhead]
actual_data_display = actual_data[-500+_options.stepsAhead:]
predict_data_ML_display = predict_data_ML[-500:-_options.stepsAhead]
likelihood_display = likelihoodsVecAll[:, i-499:i-_options.stepsAhead+1]
xl = [(i)-500, (i)]
else:
time_step_display = time_step
actual_data_display = actual_data
predict_data_ML_display = predict_data_ML
likelihood_display = likelihoodsVecAll[:, :i+1]
xl = [0, (i)]
plt.figure(2)
plt.clf()
plt.imshow(likelihood_display,
extent=(time_step_display[0], time_step_display[-1], 0, 40000),
interpolation='nearest', aspect='auto',
origin='lower', cmap='Reds')
plt.colorbar()
plt.plot(time_step_display, actual_data_display, 'k', label='Data')
plt.plot(time_step_display, predict_data_ML_display, 'b', label='Best Prediction')
plt.xlim(xl)
plt.xlabel('Time')
plt.ylabel('Prediction')
# plt.title('TM, useTimeOfDay='+str(True)+' '+dataSet+' test neg LL = '+str(np.nanmean(negLL)))
plt.xlim([17020, 17300])
plt.ylim([0, 30000])
plt.clim([0, 1])
plt.draw()
predData_TM_n_step = np.roll(np.array(predict_data_ML), _options.stepsAhead)
nTest = len(actual_data) - nTrain - _options.stepsAhead
NRMSE_TM = NRMSE(actual_data[nTrain:nTrain+nTest], predData_TM_n_step[nTrain:nTrain+nTest])
print "NRMSE on test data: ", NRMSE_TM
output.close()
# calculate neg-likelihood
predictions = np.transpose(likelihoodsVecAll)
truth = np.roll(actual_data, -5)
from nupic.encoders.scalar import ScalarEncoder as NupicScalarEncoder
encoder = NupicScalarEncoder(w=1, minval=0, maxval=40000, n=22, forced=True)
from plot import computeLikelihood, plotAccuracy
bucketIndex2 = []
negLL = []
minProb = 0.0001
for i in xrange(len(truth)):
bucketIndex2.append(np.where(encoder.encode(truth[i]))[0])
outOfBucketProb = 1 - sum(predictions[i,:])
prob = predictions[i, bucketIndex2[i]]
if prob == 0:
prob = outOfBucketProb
if prob < minProb:
prob = minProb
negLL.append( -np.log(prob))
negLL = computeLikelihood(predictions, truth, encoder)
negLL[:5000] = np.nan
x = range(len(negLL))
plt.figure()
plotAccuracy((negLL, x), truth, window=480, errorType='negLL')
np.save('./result/'+dataSet+classifierType+'TMprediction.npy', predictions)
np.save('./result/'+dataSet+classifierType+'TMtruth.npy', truth)
plt.figure()
activeCellNumAvg = movingAverage(activeCellNum, 100)
plt.plot(np.array(activeCellNumAvg)/tm.numberOfCells())
plt.xlabel('data records')
plt.ylabel('sparsity')
plt.xlim([0, 5000])
plt.savefig('result/sparsity_over_training.pdf')
plt.figure()
predCellNumAvg = movingAverage(predCellNum, 100)
predSegmentNumAvg = movingAverage(predSegmentNum, 100)
# plt.plot(np.array(predCellNumAvg))
plt.plot(np.array(predSegmentNumAvg),'r', label='NMDA spike')
plt.plot(activeCellNumAvg,'b', label='spikes')
plt.xlabel('data records')
plt.ylabel('NMDA spike #')
plt.legend()
plt.xlim([0, 5000])
plt.ylim([0, 42])
plt.savefig('result/nmda_spike_over_training.pdf')
|
agpl-3.0
|
fivejjs/ibis
|
ibis/config.py
|
16
|
20779
|
# This file has been adapted from pandas/core/config.py. pandas 3-clause BSD
# license. See LICENSES/pandas
#
# Further modifications:
#
# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from collections import namedtuple
from contextlib import contextmanager
import pprint
import warnings
import sys
from six import StringIO
PY3 = (sys.version_info[0] >= 3)
if PY3:
def u(s):
return s
else:
def u(s):
return unicode(s, "unicode_escape")
DeprecatedOption = namedtuple('DeprecatedOption', 'key msg rkey removal_ver')
RegisteredOption = namedtuple(
'RegisteredOption', 'key defval doc validator cb')
_deprecated_options = {} # holds deprecated option metdata
_registered_options = {} # holds registered option metdata
_global_config = {} # holds the current values for registered options
_reserved_keys = ['all'] # keys which have a special meaning
class OptionError(AttributeError, KeyError):
"""Exception for ibis.options, backwards compatible with KeyError
checks"""
#
# User API
def _get_single_key(pat, silent):
keys = _select_options(pat)
if len(keys) == 0:
if not silent:
_warn_if_deprecated(pat)
raise OptionError('No such keys(s): %r' % pat)
if len(keys) > 1:
raise OptionError('Pattern matched multiple keys')
key = keys[0]
if not silent:
_warn_if_deprecated(key)
key = _translate_key(key)
return key
def _get_option(pat, silent=False):
key = _get_single_key(pat, silent)
# walk the nested dict
root, k = _get_root(key)
return root[k]
def _set_option(*args, **kwargs):
# must at least 1 arg deal with constraints later
nargs = len(args)
if not nargs or nargs % 2 != 0:
raise ValueError("Must provide an even number of non-keyword "
"arguments")
# default to false
silent = kwargs.get('silent', False)
for k, v in zip(args[::2], args[1::2]):
key = _get_single_key(k, silent)
o = _get_registered_option(key)
if o and o.validator:
o.validator(v)
# walk the nested dict
root, k = _get_root(key)
root[k] = v
if o.cb:
o.cb(key)
def _describe_option(pat='', _print_desc=True):
keys = _select_options(pat)
if len(keys) == 0:
raise OptionError('No such keys(s)')
s = u('')
for k in keys: # filter by pat
s += _build_option_description(k)
if _print_desc:
print(s)
else:
return s
def _reset_option(pat, silent=False):
keys = _select_options(pat)
if len(keys) == 0:
raise OptionError('No such keys(s)')
if len(keys) > 1 and len(pat) < 4 and pat != 'all':
raise ValueError('You must specify at least 4 characters when '
'resetting multiple keys, use the special keyword '
'"all" to reset all the options to their default '
'value')
for k in keys:
_set_option(k, _registered_options[k].defval, silent=silent)
def get_default_val(pat):
key = _get_single_key(pat, silent=True)
return _get_registered_option(key).defval
class DictWrapper(object):
""" provide attribute-style access to a nested dict
"""
def __init__(self, d, prefix=""):
object.__setattr__(self, "d", d)
object.__setattr__(self, "prefix", prefix)
def __repr__(self):
buf = StringIO()
pprint.pprint(self.d, stream=buf)
return buf.getvalue()
def __setattr__(self, key, val):
prefix = object.__getattribute__(self, "prefix")
if prefix:
prefix += "."
prefix += key
# you can't set new keys
# can you can't overwrite subtrees
if key in self.d and not isinstance(self.d[key], dict):
_set_option(prefix, val)
else:
raise OptionError("You can only set the value of existing options")
def __getattr__(self, key):
prefix = object.__getattribute__(self, "prefix")
if prefix:
prefix += "."
prefix += key
v = object.__getattribute__(self, "d")[key]
if isinstance(v, dict):
return DictWrapper(v, prefix)
else:
return _get_option(prefix)
def __dir__(self):
return list(self.d.keys())
# For user convenience, we'd like to have the available options described
# in the docstring. For dev convenience we'd like to generate the docstrings
# dynamically instead of maintaining them by hand. To this, we use the
# class below which wraps functions inside a callable, and converts
# __doc__ into a propery function. The doctsrings below are templates
# using the py2.6+ advanced formatting syntax to plug in a concise list
# of options, and option descriptions.
class CallableDynamicDoc(object):
def __init__(self, func, doc_tmpl):
self.__doc_tmpl__ = doc_tmpl
self.__func__ = func
def __call__(self, *args, **kwds):
return self.__func__(*args, **kwds)
@property
def __doc__(self):
opts_desc = _describe_option('all', _print_desc=False)
opts_list = pp_options_list(list(_registered_options.keys()))
return self.__doc_tmpl__.format(opts_desc=opts_desc,
opts_list=opts_list)
_get_option_tmpl = """
get_option(pat)
Retrieves the value of the specified option.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp which should match a single option.
Note: partial matches are supported for convenience, but unless you use the
full option name (e.g. x.y.z.option_name), your code may break in future
versions if new options with similar names are introduced.
Returns
-------
result : the value of the option
Raises
------
OptionError : if no such option exists
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
_set_option_tmpl = """
set_option(pat, value)
Sets the value of the specified option.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp which should match a single option.
Note: partial matches are supported for convenience, but unless you use the
full option name (e.g. x.y.z.option_name), your code may break in future
versions if new options with similar names are introduced.
value :
new value of option.
Returns
-------
None
Raises
------
OptionError if no such option exists
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
_describe_option_tmpl = """
describe_option(pat, _print_desc=False)
Prints the description for one or more registered options.
Call with not arguments to get a listing for all registered options.
Available options:
{opts_list}
Parameters
----------
pat : str
Regexp pattern. All matching keys will have their description displayed.
_print_desc : bool, default True
If True (default) the description(s) will be printed to stdout.
Otherwise, the description(s) will be returned as a unicode string
(for testing).
Returns
-------
None by default, the description(s) as a unicode string if _print_desc
is False
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
_reset_option_tmpl = """
reset_option(pat)
Reset one or more options to their default value.
Pass "all" as argument to reset all options.
Available options:
{opts_list}
Parameters
----------
pat : str/regex
If specified only options matching `prefix*` will be reset.
Note: partial matches are supported for convenience, but unless you
use the full option name (e.g. x.y.z.option_name), your code may break
in future versions if new options with similar names are introduced.
Returns
-------
None
Notes
-----
The available options with its descriptions:
{opts_desc}
"""
# bind the functions with their docstrings into a Callable
# and use that as the functions exposed in pd.api
get_option = CallableDynamicDoc(_get_option, _get_option_tmpl)
set_option = CallableDynamicDoc(_set_option, _set_option_tmpl)
reset_option = CallableDynamicDoc(_reset_option, _reset_option_tmpl)
describe_option = CallableDynamicDoc(_describe_option, _describe_option_tmpl)
options = DictWrapper(_global_config)
#
# Functions for use by pandas developers, in addition to User - api
class option_context(object):
"""
Context manager to temporarily set options in the `with` statement context.
You need to invoke as ``option_context(pat, val, [(pat, val), ...])``.
Examples
--------
>>> with option_context('display.max_rows', 10, 'display.max_columns', 5):
...
"""
def __init__(self, *args):
if not (len(args) % 2 == 0 and len(args) >= 2):
raise ValueError(
'Need to invoke as'
'option_context(pat, val, [(pat, val), ...)).'
)
self.ops = list(zip(args[::2], args[1::2]))
def __enter__(self):
undo = []
for pat, val in self.ops:
undo.append((pat, _get_option(pat, silent=True)))
self.undo = undo
for pat, val in self.ops:
_set_option(pat, val, silent=True)
def __exit__(self, *args):
if self.undo:
for pat, val in self.undo:
_set_option(pat, val, silent=True)
def register_option(key, defval, doc='', validator=None, cb=None):
"""Register an option in the package-wide ibis config object
Parameters
----------
key - a fully-qualified key, e.g. "x.y.option - z".
defval - the default value of the option
doc - a string description of the option
validator - a function of a single argument, should raise `ValueError` if
called with a value which is not a legal value for the option.
cb - a function of a single argument "key", which is called
immediately after an option value is set/reset. key is
the full name of the option.
Returns
-------
Nothing.
Raises
------
ValueError if `validator` is specified and `defval` is not a valid value.
"""
import tokenize
import keyword
key = key.lower()
if key in _registered_options:
raise OptionError("Option '%s' has already been registered" % key)
if key in _reserved_keys:
raise OptionError("Option '%s' is a reserved key" % key)
# the default value should be legal
if validator:
validator(defval)
# walk the nested dict, creating dicts as needed along the path
path = key.split('.')
for k in path:
if not bool(re.match('^' + tokenize.Name + '$', k)):
raise ValueError("%s is not a valid identifier" % k)
if keyword.iskeyword(k):
raise ValueError("%s is a python keyword" % k)
cursor = _global_config
for i, p in enumerate(path[:-1]):
if not isinstance(cursor, dict):
raise OptionError("Path prefix to option '%s' is already an option"
% '.'.join(path[:i]))
if p not in cursor:
cursor[p] = {}
cursor = cursor[p]
if not isinstance(cursor, dict):
raise OptionError("Path prefix to option '%s' is already an option"
% '.'.join(path[:-1]))
cursor[path[-1]] = defval # initialize
# save the option metadata
_registered_options[key] = RegisteredOption(key=key, defval=defval,
doc=doc, validator=validator,
cb=cb)
def deprecate_option(key, msg=None, rkey=None, removal_ver=None):
"""
Mark option `key` as deprecated, if code attempts to access this option,
a warning will be produced, using `msg` if given, or a default message
if not.
if `rkey` is given, any access to the key will be re-routed to `rkey`.
Neither the existence of `key` nor that if `rkey` is checked. If they
do not exist, any subsequence access will fail as usual, after the
deprecation warning is given.
Parameters
----------
key - the name of the option to be deprecated. must be a fully-qualified
option name (e.g "x.y.z.rkey").
msg - (Optional) a warning message to output when the key is referenced.
if no message is given a default message will be emitted.
rkey - (Optional) the name of an option to reroute access to.
If specified, any referenced `key` will be re-routed to `rkey`
including set/get/reset.
rkey must be a fully-qualified option name (e.g "x.y.z.rkey").
used by the default message if no `msg` is specified.
removal_ver - (Optional) specifies the version in which this option will
be removed. used by the default message if no `msg`
is specified.
Returns
-------
Nothing
Raises
------
OptionError - if key has already been deprecated.
"""
key = key.lower()
if key in _deprecated_options:
raise OptionError("Option '%s' has already been defined as deprecated."
% key)
_deprecated_options[key] = DeprecatedOption(key, msg, rkey, removal_ver)
#
# functions internal to the module
def _select_options(pat):
"""returns a list of keys matching `pat`
if pat=="all", returns all registered options
"""
# short-circuit for exact key
if pat in _registered_options:
return [pat]
# else look through all of them
keys = sorted(_registered_options.keys())
if pat == 'all': # reserved key
return keys
return [k for k in keys if re.search(pat, k, re.I)]
def _get_root(key):
path = key.split('.')
cursor = _global_config
for p in path[:-1]:
cursor = cursor[p]
return cursor, path[-1]
def _is_deprecated(key):
""" Returns True if the given option has been deprecated """
key = key.lower()
return key in _deprecated_options
def _get_deprecated_option(key):
"""
Retrieves the metadata for a deprecated option, if `key` is deprecated.
Returns
-------
DeprecatedOption (namedtuple) if key is deprecated, None otherwise
"""
try:
d = _deprecated_options[key]
except KeyError:
return None
else:
return d
def _get_registered_option(key):
"""
Retrieves the option metadata if `key` is a registered option.
Returns
-------
RegisteredOption (namedtuple) if key is deprecated, None otherwise
"""
return _registered_options.get(key)
def _translate_key(key):
"""
if key id deprecated and a replacement key defined, will return the
replacement key, otherwise returns `key` as - is
"""
d = _get_deprecated_option(key)
if d:
return d.rkey or key
else:
return key
def _warn_if_deprecated(key):
"""
Checks if `key` is a deprecated option and if so, prints a warning.
Returns
-------
bool - True if `key` is deprecated, False otherwise.
"""
d = _get_deprecated_option(key)
if d:
if d.msg:
print(d.msg)
warnings.warn(d.msg, DeprecationWarning)
else:
msg = "'%s' is deprecated" % key
if d.removal_ver:
msg += ' and will be removed in %s' % d.removal_ver
if d.rkey:
msg += ", please use '%s' instead." % d.rkey
else:
msg += ', please refrain from using it.'
warnings.warn(msg, DeprecationWarning)
return True
return False
def _build_option_description(k):
""" Builds a formatted description of a registered option and prints it """
o = _get_registered_option(k)
d = _get_deprecated_option(k)
s = u('%s ') % k
if o.doc:
s += '\n'.join(o.doc.strip().split('\n'))
else:
s += 'No description available.'
if o:
s += u('\n [default: %s] [currently: %s]') % (o.defval,
_get_option(k, True))
if d:
s += u('\n (Deprecated')
s += (u(', use `%s` instead.') % d.rkey if d.rkey else '')
s += u(')')
s += '\n\n'
return s
def pp_options_list(keys, width=80, _print=False):
""" Builds a concise listing of available options, grouped by prefix """
from textwrap import wrap
from itertools import groupby
def pp(name, ks):
pfx = ('- ' + name + '.[' if name else '')
ls = wrap(', '.join(ks), width, initial_indent=pfx,
subsequent_indent=' ', break_long_words=False)
if ls and ls[-1] and name:
ls[-1] = ls[-1] + ']'
return ls
ls = []
singles = [x for x in sorted(keys) if x.find('.') < 0]
if singles:
ls += pp('', singles)
keys = [x for x in keys if x.find('.') >= 0]
for k, g in groupby(sorted(keys), lambda x: x[:x.rfind('.')]):
ks = [x[len(k) + 1:] for x in list(g)]
ls += pp(k, ks)
s = '\n'.join(ls)
if _print:
print(s)
else:
return s
#
# helpers
@contextmanager
def config_prefix(prefix):
"""contextmanager for multiple invocations of API with a common prefix
supported API functions: (register / get / set )__option
Warning: This is not thread - safe, and won't work properly if you import
the API functions into your module using the "from x import y" construct.
Example:
import ibis.config as cf
with cf.config_prefix("display.font"):
cf.register_option("color", "red")
cf.register_option("size", " 5 pt")
cf.set_option(size, " 6 pt")
cf.get_option(size)
...
etc'
will register options "display.font.color", "display.font.size", set the
value of "display.font.size"... and so on.
"""
# Note: reset_option relies on set_option, and on key directly
# it does not fit in to this monkey-patching scheme
global register_option, get_option, set_option, reset_option
def wrap(func):
def inner(key, *args, **kwds):
pkey = '%s.%s' % (prefix, key)
return func(pkey, *args, **kwds)
return inner
_register_option = register_option
_get_option = get_option
_set_option = set_option
set_option = wrap(set_option)
get_option = wrap(get_option)
register_option = wrap(register_option)
yield None
set_option = _set_option
get_option = _get_option
register_option = _register_option
# These factories and methods are handy for use as the validator
# arg in register_option
def is_type_factory(_type):
"""
Parameters
----------
`_type` - a type to be compared against (e.g. type(x) == `_type`)
Returns
-------
validator - a function of a single argument x , which returns the
True if type(x) is equal to `_type`
"""
def inner(x):
if type(x) != _type:
raise ValueError("Value must have type '%s'" % str(_type))
return inner
def is_instance_factory(_type):
"""
Parameters
----------
`_type` - the type to be checked against
Returns
-------
validator - a function of a single argument x , which returns the
True if x is an instance of `_type`
"""
if isinstance(_type, (tuple, list)):
_type = tuple(_type)
type_repr = "|".join(map(str, _type))
else:
type_repr = "'%s'" % _type
def inner(x):
if not isinstance(x, _type):
raise ValueError("Value must be an instance of %s" % type_repr)
return inner
def is_one_of_factory(legal_values):
def inner(x):
if x not in legal_values:
pp_values = map(str, legal_values)
raise ValueError("Value must be one of %s"
% str("|".join(pp_values)))
return inner
# common type validators, for convenience
# usage: register_option(... , validator = is_int)
is_int = is_type_factory(int)
is_bool = is_type_factory(bool)
is_float = is_type_factory(float)
is_str = is_type_factory(str)
# is_unicode = is_type_factory(compat.text_type)
is_text = is_instance_factory((str, bytes))
|
apache-2.0
|
reyoung/Paddle
|
python/paddle/v2/dataset/uci_housing.py
|
7
|
4064
|
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
UCI Housing dataset.
This module will download dataset from
https://archive.ics.uci.edu/ml/machine-learning-databases/housing/ and
parse training set and test set into paddle reader creators.
"""
import numpy as np
import os
import paddle.v2.dataset.common
from paddle.v2.parameters import Parameters
__all__ = ['train', 'test']
URL = 'https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.data'
MD5 = 'd4accdce7a25600298819f8e28e8d593'
feature_names = [
'CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX',
'PTRATIO', 'B', 'LSTAT', 'convert'
]
UCI_TRAIN_DATA = None
UCI_TEST_DATA = None
URL_MODEL = 'https://github.com/PaddlePaddle/book/raw/develop/01.fit_a_line/fit_a_line.tar'
MD5_MODEL = '52fc3da8ef3937822fcdd87ee05c0c9b'
def feature_range(maximums, minimums):
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
feature_num = len(maximums)
ax.bar(range(feature_num), maximums - minimums, color='r', align='center')
ax.set_title('feature scale')
plt.xticks(range(feature_num), feature_names)
plt.xlim([-1, feature_num])
fig.set_figheight(6)
fig.set_figwidth(10)
if not os.path.exists('./image'):
os.makedirs('./image')
fig.savefig('image/ranges.png', dpi=48)
plt.close(fig)
def load_data(filename, feature_num=14, ratio=0.8):
global UCI_TRAIN_DATA, UCI_TEST_DATA
if UCI_TRAIN_DATA is not None and UCI_TEST_DATA is not None:
return
data = np.fromfile(filename, sep=' ')
data = data.reshape(data.shape[0] / feature_num, feature_num)
maximums, minimums, avgs = data.max(axis=0), data.min(axis=0), data.sum(
axis=0) / data.shape[0]
feature_range(maximums[:-1], minimums[:-1])
for i in xrange(feature_num - 1):
data[:, i] = (data[:, i] - avgs[i]) / (maximums[i] - minimums[i])
offset = int(data.shape[0] * ratio)
UCI_TRAIN_DATA = data[:offset]
UCI_TEST_DATA = data[offset:]
def train():
"""
UCI_HOUSING training set creator.
It returns a reader creator, each sample in the reader is features after
normalization and price number.
:return: Training reader creator
:rtype: callable
"""
global UCI_TRAIN_DATA
load_data(paddle.v2.dataset.common.download(URL, 'uci_housing', MD5))
def reader():
for d in UCI_TRAIN_DATA:
yield d[:-1], d[-1:]
return reader
def test():
"""
UCI_HOUSING test set creator.
It returns a reader creator, each sample in the reader is features after
normalization and price number.
:return: Test reader creator
:rtype: callable
"""
global UCI_TEST_DATA
load_data(paddle.v2.dataset.common.download(URL, 'uci_housing', MD5))
def reader():
for d in UCI_TEST_DATA:
yield d[:-1], d[-1:]
return reader
def model():
tar_file = paddle.v2.dataset.common.download(URL_MODEL, 'fit_a_line.tar',
MD5_MODEL)
with open(tar_file, 'r') as f:
parameters = Parameters.from_tar(f)
return parameters
def fetch():
paddle.v2.dataset.common.download(URL, 'uci_housing', MD5)
def convert(path):
"""
Converts dataset to recordio format
"""
paddle.v2.dataset.common.convert(path, train(), 1000, "uci_housing_train")
paddle.v2.dataset.common.convert(path, test(), 1000, "uci_houseing_test")
|
apache-2.0
|
loli/sklearn-ensembletrees
|
sklearn/linear_model/stochastic_gradient.py
|
2
|
42996
|
# Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
import scipy.sparse as sp
from abc import ABCMeta, abstractmethod
import warnings
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from ..base import BaseEstimator, RegressorMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (atleast2d_or_csr, check_arrays, check_random_state,
column_or_1d)
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..externals import six
from .sgd_fast import plain_sgd
from ..utils.seq_dataset import ArrayDataset, CSRDataset
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
SPARSE_INTERCEPT_DECAY = 0.01
"""For sparse data intercept updates are scaled by this decay factor to avoid
intercept oscillation."""
DEFAULT_EPSILON = 0.1
"""Default value of ``epsilon`` parameter. """
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=False,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self._validate_params()
self.coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _init_t(self, loss_function):
"""Initialize iteration counter attr ``t_``.
If ``self.learning_rate=='optimal'`` initialize ``t_`` such that
``eta`` at first sample equals ``self.eta0``.
"""
self.t_ = 1.0
if self.learning_rate == "optimal":
typw = np.sqrt(1.0 / np.sqrt(self.alpha))
# computing eta0, the initial learning rate
eta0 = typw / max(1.0, loss_function.dloss(-typw, 1.0))
# initialize t such that eta at first sample equals eta0
self.t_ = 1.0 / (eta0 * self.alpha)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided coef_ does not match dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features, dtype=np.float64, order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
def _check_fit_data(X, y):
"""Check if shape of input data matches. """
n_samples, _ = X.shape
if n_samples != y.shape[0]:
raise ValueError("Shapes of X and y do not match.")
def _make_dataset(X, y_i, sample_weight):
"""Create ``Dataset`` abstraction for sparse and dense inputs.
This also returns the ``intercept_decay`` which is different
for sparse datasets.
"""
if sp.issparse(X):
dataset = CSRDataset(X.data, X.indptr, X.indices, y_i, sample_weight)
intercept_decay = SPARSE_INTERCEPT_DECAY
else:
dataset = ArrayDataset(X, y_i, sample_weight)
intercept_decay = 1.0
return dataset, intercept_decay
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
if len(est.classes_) == 2:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.coef_[i]
intercept = est.intercept_[i]
return y_i, coef, intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
y_i, coef, intercept = _prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = _make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=False, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X = atleast2d_or_csr(X, dtype=np.float64, order="C")
y = column_or_1d(y, warn=True)
n_samples, n_features = X.shape
_check_fit_data(X, y)
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
y_ind = np.searchsorted(self.classes_, y) # XXX use a LabelBinarizer?
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_,
y_ind)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self._init_t(self.loss_function)
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
self.t_ += n_iter * n_samples
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X = atleast2d_or_csr(X, dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
# need to be 2d
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of the training data
y : numpy array of shape [n_samples]
Subset of the target values
classes : array, shape = [n_classes]
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None,
class_weight=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_classes,n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [n_classes]
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'l2' or 'l1' or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept: bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter: int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle: bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to False.
random_state: int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose: integer, optional
The verbosity level
epsilon: float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs: integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0 / (t + t0) [default]
invscaling: eta = eta0 / pow(t, power_t)
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label : weight} or "auto" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "auto" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies.
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
`coef_` : array, shape = [1, n_features] if n_classes == 2 else [n_classes,
n_features]
Weights assigned to the features.
`intercept_` : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, class_weight=None, epsilon=0.1, eta0=0.0,
fit_intercept=True, l1_ratio=0.15, learning_rate='optimal',
loss='hinge', n_iter=5, n_jobs=1, penalty='l2', power_t=0.5,
random_state=None, shuffle=False,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=False, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start)
def _check_proba(self):
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
array, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=False,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_arrays(X, y, sparse_format="csr", copy=False,
check_ccontiguous=True, dtype=np.float64)
y = column_or_1d(y, warn=True)
n_samples, n_features = X.shape
_check_fit_data(X, y)
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
self.t_ += n_iter * n_samples
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Subset of training data
y : numpy array of shape [n_samples]
Subset of target values
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : numpy array of shape [n_samples]
Target values
coef_init : array, shape = [n_features]
The initial coefficients to warm-start the optimization.
intercept_init : array, shape = [1]
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
array, shape = [n_samples]
Predicted target values per element in X.
"""
X = atleast2d_or_csr(X)
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
array, shape = [n_samples]
Predicted target values per element in X.
"""
return self.decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = _make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self._init_t(loss_function)
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
self.coef_, intercept = plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.intercept_ = np.atleast_1d(intercept)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'l2' or 'l1' or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' migh bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept: bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter: int, optional
The number of passes over the training data (aka epochs).
Defaults to 5.
shuffle: bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to False.
random_state: int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose: integer, optional
The verbosity level.
epsilon: float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(t+t0)
invscaling: eta = eta0 / pow(t, power_t) [default]
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Attributes
----------
`coef_` : array, shape = [n_features]
Weights asigned to the features.
`intercept_` : array, shape = [1]
The intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
SGDRegressor(alpha=0.0001, epsilon=0.1, eta0=0.01, fit_intercept=True,
l1_ratio=0.15, learning_rate='invscaling', loss='squared_loss',
n_iter=5, penalty='l2', power_t=0.25, random_state=None,
shuffle=False, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=False,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start)
|
bsd-3-clause
|
lacava/few
|
setup.py
|
1
|
3433
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
from setuptools.extension import Extension
from Cython.Build import cythonize
# the setup file relies on eigency to import its include paths for the
# extension modules. however eigency isn't known as a dependency until after
# setup is parsed; so we need to check for and install eigency before setup.
import importlib
try:
importlib.import_module('eigency')
except (ImportError, AttributeError):
try:
from pip._internal import main
main(['install', 'eigency'])
except ImportError:
raise ImportError('The eigency library must be installed before FEW. '
'Automatic install with pip failed.')
finally:
globals()['eigency'] = importlib.import_module('eigency')
def calculate_version():
initpy = open('few/_version.py').read().split('\n')
version = list(filter(lambda x: '__version__' in x, initpy))[0].split('\'')[1]
return version
package_version = calculate_version()
# few_lib = Extension(name='few_lib',
# sources=['few/lib/epsilon_lexicase.cpp'],
# include_dirs = ['/usr/include/eigen3'],
# depends = ['Eigen/Dense.h'],
# extra_compile_args = ['-std=c++0x']
# )
# check if windows or *nix
from sys import platform
print('platform:',platform)
if platform == 'win32':
eca = ''
else:
eca = '-std=c++0x'
setup(
name='FEW',
version=package_version,
author='William La Cava',
author_email='lacava@upenn.edu',
packages=find_packages(),
url='https://github.com/lacava/few',
download_url='https://github.com/lacava/few/releases/tag/'+package_version,
license='GNU/GPLv3',
entry_points={'console_scripts': ['few=few:main', ]},
test_suite='nose.collector',
tests_require=['nose'],
description=('Feature Engineering Wrapper'),
long_description='''
A feature engineering wrapper for scikitlearn based on genetic programming.
Contact:
===
e-mail: lacava@upenn.edu
This project is hosted at https://github.com/lacava/few
''',
zip_safe=True,
install_requires=['numpy', 'scipy', 'pandas', 'scikit-learn',
'update_checker', 'tqdm', 'joblib','DistanceClassifier',
'scikit-mdr','Cython', 'eigency'],
setup_requires=['numpy', 'scipy', 'pandas', 'scikit-learn',
'update_checker', 'tqdm', 'joblib','DistanceClassifier',
'scikit-mdr','Cython', 'eigency'],
classifiers=[
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
# 'Programming Language :: Python :: 2.7',
# 'Programming Language :: Python :: 3',
# 'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Scientific/Engineering :: Artificial Intelligence'
],
keywords=['data science', 'machine learning', 'classification'],
ext_modules=cythonize([Extension(name="few_lib",
sources=["few/lib/few_lib.pyx"],
include_dirs=[".", "./few/lib"] +
eigency.get_includes(),
extra_compile_args = [eca])],
language="c++")
)
|
gpl-3.0
|
tangi75/expfactory-python
|
expfactory/survey.py
|
2
|
29281
|
'''
survey.py: part of expfactory package
Functions to work with javascript surveys
'''
from expfactory.experiment import get_experiments
from exceptions import ValueError
from glob import glob
import pandas
import json
import uuid
import re
import os
def get_surveys(survey_repo=None,load=False,warning=True,repo_type="surveys"):
'''get_surveys is a wrapper for "get_experiments" - the functionality is the same, but provided for users: return loaded json for all valid survyes from an surveys folder
:param survey_repo: full path to the surveys repo
:param load: if True, returns a list of loaded config.json objects. If False (default) returns the paths to the experiments
:param repo_type: tells the user what kind of task is being parsed, default is "experiments," but can also be "surveys" when called by get_surveys
'''
return get_experiments(experiment_repo=survey_repo,load=load,warning=warning,repo_type=repo_type)
def get_question_types():
'''get_question_types returns a list of possible question types
'''
return ["radio","checkbox","textfield","textarea","numeric","table","instruction"]
# CREATION FUNCTIONS ##############################################################################
def create_instruction(text,id_attribute,tag="h2"):
'''create_instruction creates a tag of type [tag] with some text inside, useful for description or instructions.
:param text: the text to give in the instruction.
:param id_attribute: the unique id for the question
:param tag: the html tag for the instruction (eg, p or h2)
'''
return "<%s>%s</%s><br><br><br><br>" %(tag,text,tag)
def format_options_values(options,values):
if isinstance(options,str):
options = [options]
if isinstance(values,str):
values = [values]
return options,values
def get_required_string(required_int):
required = ""
if required_int == 1:
required = "required"
return required
def parse_meta(text,options=None):
'''parse_meta returns fields to include in inputs for question text and options, for export
:param text: the text of the question (required)
:param options: options to include with the data (optional)
'''
text = text.replace('"',"'")
if options!= None:
options_joined = "|".join([x.replace('"',"'") for x in options])
return 'meta-options="%s" meta-text="%s"' %(options_joined,text)
return 'meta-text="%s"' %(text)
def add_classes(classes,new_classes):
'''add_classess adds a string of new classes to current, if defined
:param classes: string of current classes, must be defined
:param new_classes: new classes to add, optional
'''
if new_classes != None:
classes = "%s %s" %(classes,new_classes)
return classes
def create_radio(text,id_attribute,options,values,classes="",required=0,validate=False):
'''create_radio generate a material lite radio button given a text field, and a set of options.
:param text: The text (content) of the question to ask
:param id_attribute: the unique id for the question
:param options: a list of text options for the user to select from (not the value of the field)
:param values: a list of values for corresponding options
:param classes: classes to add to the default, should be a string
:param required: is the question required? 0=False,1=True, default 0
:param validate: throw an error in the case that number of values != number of option (for testing)
'''
class_names = "mdl-radio mdl-js-radio mdl-js-ripple-effect"
options,values = format_options_values(options,values)
required = get_required_string(required)
meta = parse_meta(text,options)
# If going through validation, tell the user the question, etc.
if validate == True:
print "Testing question %s with text %s" %(id_attribute,text)
# If options provided are equal to values, parse the question
if len(options) == len(values):
radio_html = '<p id="%s_options">%s</p>' %(id_attribute,text)
for n in range(len(options)):
option_id = "%s_%s" %(id_attribute,n)
radio_html = '%s\n<label class="%s" for="option-%s">\n<input type="radio" id="option-%s" class="mdl-radio__button %s %s" name="%s_options" value="%s" %s>\n<span class="mdl-radio__label">%s</span>\n</label>' %(radio_html,class_names,option_id,option_id,required,classes,id_attribute,values[n],meta,options[n])
return "%s<br><br><br><br>" %(radio_html)
# Otherwise, we cannot include it
else:
error_message = "ERROR: %s options provided, and only %s values. Must define one option per value." %(len(options),len(values))
if validate == True:
raise ValueError(error_message)
else:
print error_message
return ""
def create_checkbox(text,id_attribute,options,classes="",required=0):
'''create_checkbox generate a material lite checkbox field given a text field, and a set of options.
:param text: The text (content) of the question to ask
:param options: a list of text options for the user to select from
:param id_attribute: the unique id for the question
:param classes: additional classes to add to the input, should be a string
:param required: is the question required? 0=False,1=True, default 0
'''
class_names = "mdl-checkbox mdl-js-checkbox mdl-js-ripple-effect"
required = get_required_string(required)
meta = parse_meta(text,options)
checkbox_html = '<p id="%s_options">%s</p>' %(id_attribute,text)
for n in range(len(options)):
option_id = "%s_%s" %(id_attribute,n)
checkbox_html = '%s\n<label class="%s" for="checkbox-%s">\n<input type="checkbox" id="checkbox-%s" %s class="mdl-checkbox__input %s %s" name="%s_options" value="%s">\n<span class="mdl-checkbox__label">%s</span>\n</label>' %(checkbox_html,class_names,option_id,option_id,meta,classes,required,option_id,options[n],options[n])
return "%s<br><br><br>" %(checkbox_html)
def base_textfield(text,id_attribute,box_text=None):
'''format_textfield parses input for a general textfield, returning base html, box_text, and id.
:param text: Any text content to precede the question field (default is None)
:param id_attribute: the unique id for the question
:param box_text: text content to go inside the box (default is None)
'''
if box_text == None:
box_text = ""
textfield_html = ""
if text != None:
textfield_html = '<p id="%s">%s</p>' %(id_attribute,text)
return textfield_html,box_text
def create_textfield(text,id_attribute,box_text=None,classes="",required=0):
'''create_textfield generates a material lite text field given a text prompt.
:param text: Any text content to precede the question field (default is None)
:param id_attribute: the unique id for the question
:param box_text: text content to go inside the box (default is None)
:param classes: additional classes to add to the input, should be a string
:param required: is the question required? 0=False,1=True, default 0
'''
class_names = "mdl-textfield mdl-js-textfield"
textfield_html,box_text = base_textfield(text,id_attribute,box_text)
required = get_required_string(required)
meta = parse_meta(text)
return '%s\n<div class="%s">\n<input class="mdl-textfield__input %s %s" name="%s" type="text" id="%s" %s>\n<label class="mdl-textfield__label" for="%s">%s</label>\n</div><br><br><br>' %(textfield_html,class_names,classes,required,id_attribute,id_attribute,meta,id_attribute,box_text)
def create_numeric_textfield(text,id_attribute,box_text=None,classes="",required=0):
'''create_numeric generates a material lite numeric text field given a text prompt.
:param text: Any text content to precede the question field (default is None)
:param id_attribute: the unique id for the question
:param box_text: text content to go inside the box (default is None)
:param id_attribute: an id to match to the text field
:param classes: classes to add to the input. Must be a string.
:param required: is the question required? 0=False,1=True, default 0
'''
class_names = "mdl-textfield mdl-js-textfield"
required = get_required_string(required)
textfield_html,box_text = base_textfield(text,id_attribute,box_text)
meta = parse_meta(text)
return '%s\n<div class="%s">\n<input class="mdl-textfield__input %s %s" type="number" id="%s" name="%s" %s>\n<label class="mdl-textfield__label" for="%s">%s</label>\n<span class="mdl-textfield__error">Input is not a number!</span>\n</div><br><br><br>' %(textfield_html,class_names,classes,required,id_attribute,id_attribute,meta,id_attribute,box_text)
def create_select_table(text,id_attribute,df,classes="",required=0):
'''create_select_table generates a material lite table from a pandas data frame.
:param df: A pandas data frame, with column names corresponding to columns, and rows
:param id_attribute: the unique id for the question
:param text: A text prompt to put before the table
:param classes: the classes to apply to the input. If none, default will be used.
:param required: is the question required? 0=False,1=True, default 0
'''
if isinstance(df,pandas.DataFrame):
class_names = "mdl-data-table mdl-js-data-table mdl-data-table--selectable mdl-shadow--2dp %s" %(required)
required = get_required_string(required)
table_html = '<p>%s</p>\n<table id="%s" class="%s %s">\n<thead>\n<tr>' %(text,id_attribute,class_names,classes)
meta = parse_meta(text)
# Parse column names
column_names = df.columns.tolist()
for column_name in columns_names:
table_html = '%s\n<th class="mdl-data-table__cell--non-numeric">%s</th>' %(table_html,column_name)
table_html = "%s\n</tr>\n</thead>\n<tbody>" %(table_html)
# Parse rows
for row in df.iterrows():
row_id = row[0]
table_html = "%s\n<tr>" %(table_html)
values = row[1].tolist()
for value in values:
if isinstance(value,str) or isinstance(value,unicode):
table_html = '%s\n<td class="mdl-data-table__cell--non-numeric">%s</td>' %(table_html,str(value))
else:
table_html = '%s\n<td>%s</td>' %(table_html,value)
table_html = "%s\n</tr>" %(table_html)
return "%s\n</tbody>\n</table><br><br><br>" %(table_html)
print "ERROR: DataFrame (df) must be a pandas.DataFrame"
def create_textarea(text,id_attribute,box_text=None,classes="",rows=3,required=0):
'''create_textarea generates a material lite multi line text field with a text prompt.
:param text: A text prompt to put before the text field
:param id_attribute: the unique id for the question
:param classes: classes to add to the text field (optional) should be string.
:param rows: number of rows to include in text field (default 3)
:param required: is the question required? 0=False,1=True, default 0
'''
textfield_html,box_text = base_textfield(text,id_attribute,box_text)
meta = parse_meta(text)
class_names = "mdl-textfield mdl-js-textfield"
return '%s\n<div class="%s"><textarea class="mdl-textfield__input %s %s" type="text" rows="%s" id="%s" name="%s" %s ></textarea>\n<label class="mdl-textfield__label" for="%s">%s</label></div><br><br><br>' %(textfield_html,class_names,classes,required,rows,id_attribute,id_attribute,meta,id_attribute,box_text)
# EXPORT FUNCTIONS ##############################################################################
def export_instruction(text,id_attribute,required=0):
return {"text":text,"id":id_attribute,"required":required}
def export_radio(text,id_attribute,options,values,required=0):
'''export_radio returns a json data structure of the question
:param text: The text (content) of the question to ask
:param id_attribute: the unique id for the question
:param options: a list of text options for the user to select from (not the value of the field)
:param values: a list of values for corresponding options
:param required: is the question required? 0=False,1=True, default 0
'''
options,values = format_options_values(options,values)
question_list = {}
if len(options) == len(values):
question_list["id"] = "%s_options" %(id_attribute)
question_list["required"] = required
question_list["text"] = text
option_list = []
for n in range(len(options)):
option_id = "%s_%s" %(id_attribute,n)
option_list.append({"id":option_id,"value":values[n],"text":options[n]})
question_list["options"] = option_list
return question_list
def export_checkbox(text,id_attribute,options,required=0):
'''export_checkbox returns json data structure to describe checkbox
:param text: The text (content) of the question to ask
:param options: a list of text options for the user to select from
:param id_attribute: the unique id for the question
:param required: is the question required? 0=False,1=True, default 0
'''
new_questions = []
option_list = []
for n in range(len(options)):
option_id = "%s_%s" %(id_attribute,n)
option_list.append({"id":option_id,"text":options[n]})
for n in range(len(options)):
option_id = "%s_%s" %(id_attribute,n)
option_entry = {"id":"%s_options" %(option_id),
"required":required,
"text":text,
"options":option_list,
"value":options[n]}
new_questions.append(option_entry)
return new_questions
def export_textfield(text,id_attribute,required=0):
'''create_textfield generates a material lite text field given a text prompt.
:param text: Any text content to precede the question field (default is None)
:param id_attribute: the unique id for the question
:param required: is the question required? 0=False,1=True, default 0
'''
question_list = {}
question_list["id"] = id_attribute
question_list["required"] = required
question_list["text"] = text
return question_list
# PARSING FUNCTIONS ############################################################################
def parse_validation(required_counts):
'''parse_validation parses code to validate each step
:param page_count: the total number of pages for the survey (called "steps")
'''
validation = ""
current_page = 1
pages = required_counts.keys()
pages.sort()
for page_number in pages:
if current_page == 1:
validation = "%s if ( state.stepIndex === %s ) {\n" %(validation,page_number)
else:
validation = "%s else if ( state.stepIndex === %s ) {\n" %(validation,page_number)
validation = '%s if (($.unique($(`.page%s.required[type=number],.page%s.required:text`).map(function(){return $(this).attr(`meta-text`)})).map(function() {return $(`[meta-text*="` + this + `"].required[type=number], [meta-text*="` + this + `"].required:text`).filter(function() { return $(this).val();}).length > 0}).get().indexOf(false) != -1) || ($.unique($(`.page%s.required:not([type=number]):not(:text)`).map(function(){return $(this).attr(`meta-text`)})).map(function() {return $(`[meta-text*="` + this + `"].required:checked`).length > 0}).get().indexOf(false) != -1)){\nis_required($(`.page%s.required:not(checked)`));\nreturn false;\n' % (validation, page_number, page_number, page_number, page_number)
# If we are at the last page, passing validation should enable the submit
if page_number == pages[-1]:
validation = '%s } else {\nexpfactory_finished=true;\n' %(validation)
validation = '%s}}' %(validation)
current_page+=1
return validation
def read_survey_file(question_file,delim="\t"):
''''read_survey_file reads in a survey file, and returns a DataFrame with columns. If there is an error, None is returned, and the error is printed to the screen.
:param question_file: the survey.tsv (or other) questions file
:param delim: the delimiter of the question_file
'''
df = pandas.read_csv(question_file,sep=delim)
required_columns = ["question_type","question_text","page_number","option_text","option_values","required"]
optional_columns = ["variables"]
acceptable_types = get_question_types()
# Parse column names, ensure lower case, check that are valid
column_names = [x.lower() for x in df.columns.tolist()]
acceptable_columns = []
for column_name in column_names:
if column_name in required_columns + optional_columns:
acceptable_columns.append(column_name)
# Make sure all required columns are included
if len([x for x in required_columns if x in acceptable_columns]) == len(required_columns):
df.columns = acceptable_columns
return df
else:
missing_columns = [x for x in required_columns if x not in acceptable_columns]
print "Question file is missing required columns %s" %(",".join(missing_columns))
return None
def parse_questions(question_file,exp_id,delim="\t",return_requiredcount=True,validate=False):
'''parse_questions reads in a text file, separated by delim, into a pandas data frame, checking that all column names are provided.
:param question_file: a TAB separated file to be read with experiment questions. Will also be validated for columns names.
:param exp_id: the experiment unique id, to be used to generate question ids
:param return_requiredcount: if True, will return questions,page_count where page_count is a dictionary to look up the number of required questions on each page {1:10}
:param validate: throw an error in the case that number of values != number of option (for testing)
'''
df = read_survey_file(question_file,delim=delim)
acceptable_types = get_question_types()
required_counts = dict()
if isinstance(df,pandas.DataFrame):
# Each question will have id [exp_id][question_count] with appended _[count] for options
question_count = 0
questions = []
current_page_number = 1
current_page = '<div class="step">'
for question in df.iterrows():
question_type = question[1].question_type
question_text = question[1].question_text
page_number = question[1].page_number
page_class = "page%s" %(page_number)
options = question[1].option_text
values = question[1].option_values
required = int(question[1].required)
unique_id = "%s_%s" %(exp_id,question_count)
new_question = None
if required == 1:
if page_number not in required_counts:
required_counts[page_number] = 1
else:
required_counts[page_number] = required_counts[page_number] + 1
if question_type in acceptable_types:
# Instruction block / text
if question_type == "instruction":
new_question = create_instruction(question_text,tag="h3",id_attribute=unique_id)
# Radio button
elif question_type == "radio":
if not str(options) == "nan" and not str(values) == "nan":
new_question = create_radio(text=question_text,
options=options.split(","),
values = values.split(","),
required=required,
id_attribute=unique_id,
classes=page_class,
validate=validate)
else:
print "Radio question %s found null for options or values, skipping." %(question_text)
# Checkbox
elif question_type == "checkbox":
if not str(options) == "nan":
new_question = create_checkbox(text=question_text,
options=options.split(","),
required=required,
id_attribute=unique_id,
classes=page_class)
else:
print "Checkbox question %s found null for options, skipping." %(question_text)
# Textareas and Textfields, regular and numeric
elif question_type == "textarea":
new_question = create_textarea(question_text,
required=required,
id_attribute=unique_id,
classes=page_class)
elif question_type == "textfield":
new_question = create_textfield(question_text,
required=required,
id_attribute=unique_id,
classes=page_class)
elif question_type == "numeric":
new_question = create_numeric_textfield(question_text,
required=required,
id_attribute=unique_id,
classes=page_class)
# Table
elif question_type == "table":
print "Table option not yet supported! Coming soon."
question_count+=1
if new_question != None:
# Add the new question to the current page
if page_number == current_page_number:
current_page = "%s\n%s" %(current_page,new_question)
# Save the current page, add the current question to the next page
else:
questions.append("%s</div>" %current_page)
current_page = '<div class="step">\n%s' %new_question
current_page_number = page_number
# Add the last page
questions.append("%s</div>" %current_page)
if return_requiredcount == True:
return questions,required_counts
return questions
else:
return None
def generate_survey(experiment,experiment_folder,form_action="#",classes=None,survey_file="survey.tsv",get_validation=True,csrf_token=False):
'''generate_survey takes a list of questions and outputs html for an expfactory survey, and validation code
:param experiment: The experiment loaded config.json
:param experiment_folder: should contain survey.tsv, a TAB separated file with question data. Will be read into a pandas data frame, and columns must follow expfactory standard. Data within columns is separated by commas.
:param form_action: the form action to take at the bottom of the page
:param classes: the classes to apply to the outer content div. If none, default will be used
:param survey_file: the survey file, should be survey.tsv for a valid survey experiment
:param get_validation: get code for validation, default is True
:param csrf_token: if true, include django code for csrf_token ({% csrf_token %})
'''
if classes == None:
classes = "experiment-layout mdl-layout mdl-layout--fixed-header mdl-js-layout mdl-color--grey-100"
# We will generate unique ids for questions based on the exp_id
exp_id = experiment[0]["exp_id"]
question_file = "%s/%s" %(experiment_folder,survey_file)
questions,required_count = parse_questions(question_file,exp_id=exp_id)
# Get validation code based on maximum page value
validation = parse_validation(required_count)
token = ""
if csrf_token == True:
token = "{% csrf_token %}"
if questions != None:
survey = '<div class="%s">\n<div class="experiment-ribbon"></div>\n<main class="experiment-main mdl-layout__content">\n<div class="experiment-container mdl-grid">\n<div class="mdl-cell mdl-cell--2-col mdl-cell--hide-tablet mdl-cell--hide-phone">\n</div>\n<div class="experiment-content mdl-color--white mdl-shadow--4dp content mdl-color-text--grey-800 mdl-cell mdl-cell--8-col">\n\n<div id="questions">\n\n<form name="questions" action="%s", method="POST">%s' %(classes,form_action,token)
for question in questions:
survey = "%s\n%s" %(survey,question)
if get_validation == True:
return survey,validation
return survey
else:
print "ERROR: parsing input text file survey.tsv. Will not generate survey HTML"
def export_questions(experiment,experiment_folder,survey_file="survey.tsv",delim="\t"):
'''export_questions reads in a text file, separated by delim, and returns a json data structure with questions to look up
:param question_file: a TAB separated file to be read with experiment questions. Will also be validated for columns names.
:param exp_id: the experiment unique id, to be used to generate question ids
:param experiment_folder: should contain survey.tsv, a TAB separated file with question data. Will be read into a pandas data frame, and columns must follow expfactory standard. Data within columns is separated by commas.
:param survey_file: the survey file, should be survey.tsv for a valid survey experiment
'''
exp_id = experiment[0]["exp_id"]
question_file = "%s/%s" %(experiment_folder,survey_file)
df = read_survey_file(question_file,delim=delim)
acceptable_types = get_question_types()
if isinstance(df,pandas.DataFrame):
# Each question will have id [exp_id][question_count] with appended _[count] for options
question_count = 0
questions = dict()
for question in df.iterrows():
question_type = question[1].question_type
question_text = question[1].question_text
page_number = question[1].page_number
page_class = "page%s" %(page_number)
options = question[1].option_text
values = question[1].option_values
required = int(question[1].required)
unique_id = "%s_%s" %(exp_id,question_count)
new_question = None
if question_type in acceptable_types:
# Instruction block / text
if question_type == "instruction":
new_question = export_instruction(question_text,
id_attribute=unique_id,
required=required)
# Radio button
elif question_type == "radio":
if not str(options) == "nan" and not str(values) == "nan":
new_question = export_radio(text=question_text,
options=options.split(","),
values = values.split(","),
required=required,
id_attribute=unique_id)
else:
print "Radio question %s found null for options or values, skipping." %(question_text)
# Checkbox
elif question_type == "checkbox":
if not str(options) == "nan":
new_question = export_checkbox(text=question_text,
options=options.split(","),
required=required,
id_attribute=unique_id)
else:
print "Checkbox question %s found null for options, skipping." %(question_text)
# Textareas and Textfields, regular and numeric
elif question_type in ["textarea","textfield","numeric"]:
new_question = export_textfield(question_text,
required=required,
id_attribute=unique_id)
question_count+=1
if isinstance(new_question,dict):
questions[new_question["id"]] = new_question
elif isinstance(new_question,list):
for nq in new_question:
questions[nq["id"]] = nq
return questions
else:
return None
|
mit
|
raysteam/zeppelin
|
interpreter/lib/python/backend_zinline.py
|
61
|
11831
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file provides a static (non-interactive) matplotlib plotting backend
# for zeppelin notebooks for use with the python/pyspark interpreters
from __future__ import print_function
import sys
import uuid
import warnings
import base64
from io import BytesIO
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import mpl_config
import matplotlib
from matplotlib._pylab_helpers import Gcf
from matplotlib.backends.backend_agg import new_figure_manager, FigureCanvasAgg
from matplotlib.backend_bases import ShowBase, FigureManagerBase
from matplotlib.figure import Figure
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
class Show(ShowBase):
"""
A callable object that displays the figures to the screen. Valid kwargs
include figure width and height (in units supported by the div tag), block
(allows users to override blocking behavior regardless of whether or not
interactive mode is enabled, currently unused) and close (Implicitly call
matplotlib.pyplot.close('all') with each call to show()).
"""
def __call__(self, close=None, block=None, **kwargs):
if close is None:
close = mpl_config.get('close')
try:
managers = Gcf.get_all_fig_managers()
if not managers:
return
# Tell zeppelin that the output will be html using the %html magic
# We want to do this only once to avoid seeing "%html" printed
# directly to the outout when multiple figures are displayed from
# one paragraph.
if mpl_config.get('angular'):
print('%angular')
else:
print('%html')
# Show all open figures
for manager in managers:
manager.show(**kwargs)
finally:
# This closes all the figures if close is set to True.
if close and Gcf.get_all_fig_managers():
Gcf.destroy_all()
class FigureCanvasZInline(FigureCanvasAgg):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
"""
def get_bytes(self, **kwargs):
"""
Get the byte representation of the figure.
Should only be used with jpg/png formats.
"""
# Make sure format is correct
fmt = kwargs.get('format', mpl_config.get('format'))
if fmt == 'svg':
raise ValueError("get_bytes() does not support svg, use png or jpg")
# Express the image as bytes
buf = BytesIO()
self.print_figure(buf, **kwargs)
fmt = fmt.encode()
if sys.version_info >= (3, 4) and sys.version_info < (3, 5):
byte_str = bytes("data:image/%s;base64," %fmt, "utf-8")
else:
byte_str = b"data:image/%s;base64," %fmt
byte_str += base64.b64encode(buf.getvalue())
# Python3 forces all strings to default to unicode, but for raster image
# formats (eg png, jpg), we want to work with bytes. Thus this step is
# needed to ensure compatability for all python versions.
byte_str = byte_str.decode('ascii')
buf.close()
return byte_str
def get_svg(self, **kwargs):
"""
Get the svg representation of the figure.
Should only be used with svg format.
"""
# Make sure format is correct
fmt = kwargs.get('format', mpl_config.get('format'))
if fmt != 'svg':
raise ValueError("get_svg() does not support png or jpg, use svg")
# For SVG the data string has to be unicode, not bytes
buf = StringIO()
self.print_figure(buf, **kwargs)
svg_str = buf.getvalue()
buf.close()
return svg_str
def draw_idle(self, *args, **kwargs):
"""
Called when the figure gets updated (eg through a plotting command).
This is overriden to allow open figures to be reshown after they
are updated when mpl_config.get('close') is False.
"""
if not self._is_idle_drawing:
with self._idle_draw_cntx():
self.draw(*args, **kwargs)
draw_if_interactive()
class FigureManagerZInline(FigureManagerBase):
"""
Wrap everything up into a window for the pylab interface
"""
def __init__(self, canvas, num):
FigureManagerBase.__init__(self, canvas, num)
self.fig_id = "figure_{0}".format(uuid.uuid4().hex)
self._shown = False
def angular_bind(self, **kwargs):
"""
Bind figure data to Zeppelin's Angular Object Registry.
If mpl_config("angular") is True and PY4J is supported, this allows
for the possibility to interactively update a figure from a separate
paragraph without having to display it multiple times.
"""
# This doesn't work for SVG so make sure it's not our format
fmt = kwargs.get('format', mpl_config.get('format'))
if fmt == 'svg':
return
# Get the figure data as a byte array
src = self.canvas.get_bytes(**kwargs)
# Flag to determine whether or not to use
# zeppelin's angular display system
angular = mpl_config.get('angular')
# ZeppelinContext instance (requires PY4J)
context = mpl_config.get('context')
# Finally we must ensure that automatic closing is set to False,
# as otherwise using the angular display system is pointless
close = mpl_config.get('close')
# If above conditions are met, bind the figure data to
# the Angular Object Registry.
if not close and angular:
if hasattr(context, 'angularBind'):
# Binding is performed through figure ID to ensure this works
# if multiple figures are open
context.angularBind(self.fig_id, src)
# Zeppelin will automatically replace this value even if it
# is updated from another pargraph thanks to the {{}} notation
src = "{{%s}}" %self.fig_id
else:
warnings.warn("Cannot bind figure to Angular Object Registry. "
"Check if PY4J is installed.")
return src
def angular_unbind(self):
"""
Unbind figure from angular display system.
"""
context = mpl_config.get('context')
if hasattr(context, 'angularUnbind'):
context.angularUnbind(self.fig_id)
def destroy(self):
"""
Called when close=True or implicitly by pyplot.close().
Overriden to automatically clean up the angular object registry.
"""
self.angular_unbind()
def show(self, **kwargs):
if not self._shown:
zdisplay(self.canvas.figure, **kwargs)
else:
self.canvas.draw_idle()
self.angular_bind(**kwargs)
self._shown = True
def draw_if_interactive():
"""
If interactive mode is on, this allows for updating properties of
the figure when each new plotting command is called.
"""
manager = Gcf.get_active()
interactive = matplotlib.is_interactive()
angular = mpl_config.get('angular')
# Don't bother continuing if we aren't in interactive mode
# or if there are no active figures. Also pointless to continue
# in angular mode as we don't want to reshow the figure.
if not interactive or angular or manager is None:
return
# Allow for figure to be reshown if close is false since
# this function call implies that it has been updated
if not mpl_config.get('close'):
manager._shown = False
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# if a main-level app must be created, this (and
# new_figure_manager_given_figure) is the usual place to
# do it -- see backend_wx, backend_wxagg and backend_tkagg for
# examples. Not all GUIs require explicit instantiation of a
# main-level app (egg backend_gtk, backend_gtkagg) for pylab
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasZInline(figure)
manager = FigureManagerZInline(canvas, num)
return manager
########################################################################
#
# Backend specific functions
#
########################################################################
def zdisplay(fig, **kwargs):
"""
Publishes a matplotlib figure to the notebook paragraph output.
"""
# kwargs can be width or height (in units supported by div tag)
width = kwargs.pop('width', 'auto')
height = kwargs.pop('height', 'auto')
fmt = kwargs.get('format', mpl_config.get('format'))
# Check if format is supported
supported_formats = mpl_config.get('supported_formats')
if fmt not in supported_formats:
raise ValueError("Unsupported format %s" %fmt)
# For SVG the data string has to be unicode, not bytes
if fmt == 'svg':
img = fig.canvas.get_svg(**kwargs)
# This is needed to ensure the SVG image is the correct size.
# We should find a better way to do this...
width = '{}px'.format(mpl_config.get('width'))
height = '{}px'.format(mpl_config.get('height'))
else:
# Express the image as bytes
src = fig.canvas.manager.angular_bind(**kwargs)
img = "<img src={src} style='width={width};height:{height}'>"
img = img.format(src=src, width=width, height=height)
# Print the image to the notebook paragraph via the %html magic
html = "<div style='width:{width};height:{height}'>{img}<div>"
print(html.format(width=width, height=height, img=img))
def displayhook():
"""
Called post paragraph execution if interactive mode is on
"""
if matplotlib.is_interactive():
show()
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
# Create a reference to the show function we are using. This is what actually
# gets called by matplotlib.pyplot.show().
show = Show()
# Default FigureCanvas and FigureManager classes to use from the backend
FigureCanvas = FigureCanvasZInline
FigureManager = FigureManagerZInline
|
apache-2.0
|
yufengg/tensorflow
|
tensorflow/contrib/learn/python/learn/estimators/estimator.py
|
9
|
55718
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base Estimator class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import copy
import os
import tempfile
import numpy as np
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib import layers
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_args
from tensorflow.contrib.framework import list_variables
from tensorflow.contrib.framework import load_variable
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import monitors as monitor_lib
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import _sklearn as sklearn
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import metric_key
from tensorflow.contrib.learn.python.learn.estimators import model_fn as model_fn_lib
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import tensor_signature
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.learn_io import data_feeder
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
from tensorflow.contrib.training.python.training import evaluation
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import device_setter
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver
from tensorflow.python.training import summary_io
from tensorflow.python.util import compat
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
AS_ITERABLE_DATE = '2016-09-15'
AS_ITERABLE_INSTRUCTIONS = (
'The default behavior of predict() is changing. The default value for\n'
'as_iterable will change to True, and then the flag will be removed\n'
'altogether. The behavior of this flag is described below.')
SCIKIT_DECOUPLE_DATE = '2016-12-01'
SCIKIT_DECOUPLE_INSTRUCTIONS = (
'Estimator is decoupled from Scikit Learn interface by moving into\n'
'separate class SKCompat. Arguments x, y and batch_size are only\n'
'available in the SKCompat class, Estimator will only accept input_fn.\n'
'Example conversion:\n'
' est = Estimator(...) -> est = SKCompat(Estimator(...))')
def _verify_input_args(x, y, input_fn, feed_fn, batch_size):
"""Verifies validity of co-existence of input arguments."""
if input_fn is None:
if x is None:
raise ValueError('Either x or input_fn must be provided.')
if contrib_framework.is_tensor(x) or (y is not None and
contrib_framework.is_tensor(y)):
raise ValueError('Inputs cannot be tensors. Please provide input_fn.')
if feed_fn is not None:
raise ValueError('Can not provide both feed_fn and x or y.')
else:
if (x is not None) or (y is not None):
raise ValueError('Can not provide both input_fn and x or y.')
if batch_size is not None:
raise ValueError('Can not provide both input_fn and batch_size.')
def _get_input_fn(x, y, input_fn, feed_fn, batch_size, shuffle=False, epochs=1):
"""Make inputs into input and feed functions.
Args:
x: Numpy, Pandas or Dask matrix or iterable.
y: Numpy, Pandas or Dask matrix or iterable.
input_fn: Pre-defined input function for training data.
feed_fn: Pre-defined data feeder function.
batch_size: Size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
Data input and feeder function based on training data.
Raises:
ValueError: Only one of `(x & y)` or `input_fn` must be provided.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if input_fn is not None:
return input_fn, feed_fn
df = data_feeder.setup_train_data_feeder(
x,
y,
n_classes=None,
batch_size=batch_size,
shuffle=shuffle,
epochs=epochs)
return df.input_builder, df.get_feed_dict_fn()
def infer_real_valued_columns_from_input_fn(input_fn):
"""Creates `FeatureColumn` objects for inputs defined by `input_fn`.
This interprets all inputs as dense, fixed-length float values. This creates
a local graph in which it calls `input_fn` to build the tensors, then discards
it.
Args:
input_fn: Input function returning a tuple of:
features - Dictionary of string feature name to `Tensor` or `Tensor`.
labels - `Tensor` of label values.
Returns:
List of `FeatureColumn` objects.
"""
with ops.Graph().as_default():
features, _ = input_fn()
return layers.infer_real_valued_columns(features)
def infer_real_valued_columns_from_input(x):
"""Creates `FeatureColumn` objects for inputs defined by input `x`.
This interprets all inputs as dense, fixed-length float values.
Args:
x: Real-valued matrix of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features.
Returns:
List of `FeatureColumn` objects.
"""
input_fn, _ = _get_input_fn(
x=x, y=None, input_fn=None, feed_fn=None, batch_size=None)
return infer_real_valued_columns_from_input_fn(input_fn)
def _model_fn_args(fn):
"""Get argument names for function-like object.
Args:
fn: Function, or function-like object (e.g., result of `functools.partial`).
Returns:
`tuple` of string argument names.
Raises:
ValueError: if partial function has positionally bound arguments
"""
_, fn = tf_decorator.unwrap(fn)
if hasattr(fn, 'func') and hasattr(fn, 'keywords') and hasattr(fn, 'args'):
# Handle functools.partial and similar objects.
return tuple([
arg for arg in tf_inspect.getargspec(fn.func).args[len(fn.args):]
if arg not in set(fn.keywords.keys())
])
# Handle function.
return tuple(tf_inspect.getargspec(fn).args)
def _get_replica_device_setter(config):
"""Creates a replica device setter if required.
Args:
config: A RunConfig instance.
Returns:
A replica device setter, or None.
"""
ps_ops = [
'Variable', 'VariableV2', 'AutoReloadVariable', 'MutableHashTable',
'MutableHashTableV2', 'MutableHashTableOfTensors',
'MutableHashTableOfTensorsV2', 'MutableDenseHashTable',
'MutableDenseHashTableV2'
]
if config.task_type:
worker_device = '/job:%s/task:%d' % (config.task_type, config.task_id)
else:
worker_device = '/job:worker'
if config.num_ps_replicas > 0:
return device_setter.replica_device_setter(
ps_tasks=config.num_ps_replicas, worker_device=worker_device,
merge_devices=True, ps_ops=ps_ops, cluster=config.cluster_spec)
else:
return None
def _make_metrics_ops(metrics, features, labels, predictions):
"""Add metrics based on `features`, `labels`, and `predictions`.
`metrics` contains a specification for how to run metrics. It is a dict
mapping friendly names to either `MetricSpec` objects, or directly to a metric
function (assuming that `predictions` and `labels` are single tensors), or to
`(pred_name, metric)` `tuple`, which passes `predictions[pred_name]` and
`labels` to `metric` (assuming `labels` is a single tensor).
Users are encouraged to use `MetricSpec` objects, which are more flexible and
cleaner. They also lead to clearer errors.
Args:
metrics: A dict mapping names to metrics specification, for example
`MetricSpec` objects.
features: A dict of tensors returned from an input_fn as features/inputs.
labels: A single tensor or a dict of tensors returned from an input_fn as
labels.
predictions: A single tensor or a dict of tensors output from a model as
predictions.
Returns:
A dict mapping the friendly given in `metrics` to the result of calling the
given metric function.
Raises:
ValueError: If metrics specifications do not work with the type of
`features`, `labels`, or `predictions` provided. Mostly, a dict is given
but no pred_name specified.
"""
metrics = metrics or {}
# If labels is a dict with a single key, unpack into a single tensor.
labels_tensor_or_dict = labels
if isinstance(labels, dict) and len(labels) == 1:
labels_tensor_or_dict = labels[list(labels.keys())[0]]
result = {}
# Iterate in lexicographic order, so the graph is identical among runs.
for name, metric in sorted(six.iteritems(metrics)):
if isinstance(metric, metric_spec.MetricSpec):
result[name] = metric.create_metric_ops(features, labels, predictions)
continue
# TODO(b/31229024): Remove the rest of this loop
logging.warning('Please specify metrics using MetricSpec. Using bare '
'functions or (key, fn) tuples is deprecated and support '
'for it will be removed on Oct 1, 2016.')
if isinstance(name, tuple):
# Multi-head metrics.
if len(name) != 2:
raise ValueError('Invalid metric for {}. It returned a tuple with '
'len {}, expected 2.'.format(name, len(name)))
if not isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide (name, prediction), '
'but predictions are not dict. '
'Metrics: %s, Predictions: %s.' % (metrics, predictions))
# Here are two options: labels are single Tensor or a dict.
if isinstance(labels, dict) and name[1] in labels:
# If labels are dict and the prediction name is in it, apply metric.
result[name[0]] = metric(predictions[name[1]], labels[name[1]])
else:
# Otherwise pass the labels to the metric.
result[name[0]] = metric(predictions[name[1]], labels_tensor_or_dict)
else:
# Single head metrics.
if isinstance(predictions, dict):
raise ValueError(
'Metrics passed provide only name, no prediction, '
'but predictions are dict. '
'Metrics: %s, Labels: %s.' % (metrics, labels_tensor_or_dict))
result[name] = metric(predictions, labels_tensor_or_dict)
return result
def _dict_to_str(dictionary):
"""Get a `str` representation of a `dict`.
Args:
dictionary: The `dict` to be represented as `str`.
Returns:
A `str` representing the `dictionary`.
"""
return ', '.join('%s = %s' % (k, v) for k, v in sorted(dictionary.items()))
def _write_dict_to_summary(output_dir,
dictionary,
current_global_step):
"""Writes a `dict` into summary file in given output directory.
Args:
output_dir: `str`, directory to write the summary file in.
dictionary: the `dict` to be written to summary file.
current_global_step: `int`, the current global step.
"""
logging.info('Saving dict for global step %d: %s', current_global_step,
_dict_to_str(dictionary))
summary_writer = summary_io.SummaryWriterCache.get(output_dir)
summary_proto = summary_pb2.Summary()
for key in dictionary:
if dictionary[key] is None:
continue
if key == 'global_step':
continue
value = summary_proto.value.add()
value.tag = key
if (isinstance(dictionary[key], np.float32) or
isinstance(dictionary[key], float)):
value.simple_value = float(dictionary[key])
elif (isinstance(dictionary[key], np.int64) or
isinstance(dictionary[key], np.int32) or
isinstance(dictionary[key], int)):
value.simple_value = int(dictionary[key])
else:
logging.warn(
'Skipping summary for %s, must be a float, np.float32, np.int64, np.int32 or int.',
key)
summary_writer.add_summary(summary_proto, current_global_step)
summary_writer.flush()
class BaseEstimator(
sklearn.BaseEstimator, evaluable.Evaluable, trainable.Trainable):
"""Abstract BaseEstimator class to train and evaluate TensorFlow models.
Users should not instantiate or subclass this class. Instead, use `Estimator`.
"""
__metaclass__ = abc.ABCMeta
# Note that for Google users, this is overridden with
# learn_runner.EstimatorConfig.
# TODO(wicke): Remove this once launcher takes over config functionality
_Config = run_config.RunConfig # pylint: disable=invalid-name
def __init__(self, model_dir=None, config=None):
"""Initializes a BaseEstimator instance.
Args:
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model. If `None`, the model_dir in
`config` will be used if set. If both are set, they must be same.
config: A RunConfig instance.
"""
# Create a run configuration.
if config is None:
self._config = BaseEstimator._Config()
logging.info('Using default config.')
else:
self._config = config
if self._config.session_config is None:
self._session_config = config_pb2.ConfigProto(allow_soft_placement=True)
else:
self._session_config = self._config.session_config
# Model directory.
if (model_dir is not None) and (self._config.model_dir is not None):
if model_dir != self._config.model_dir:
# TODO(b/9965722): remove this suppression after it is no longer
# necessary.
# pylint: disable=g-doc-exception
raise ValueError(
"model_dir are set both in constructor and RunConfig, but with "
"different values. In constructor: '{}', in RunConfig: "
"'{}' ".format(model_dir, self._config.model_dir))
self._model_dir = model_dir or self._config.model_dir
if self._model_dir is None:
self._model_dir = tempfile.mkdtemp()
logging.warning('Using temporary folder as model directory: %s',
self._model_dir)
if self._config.model_dir is None:
self._config = self._config.replace(model_dir=self._model_dir)
logging.info('Using config: %s', str(vars(self._config)))
# Set device function depending if there are replicas or not.
self._device_fn = _get_replica_device_setter(self._config)
# Features and labels TensorSignature objects.
# TODO(wicke): Rename these to something more descriptive
self._features_info = None
self._labels_info = None
self._graph = None
@property
def config(self):
# TODO(wicke): make RunConfig immutable, and then return it without a copy.
return copy.deepcopy(self._config)
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Trainable`.
Raises:
ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`.
ValueError: If both `steps` and `max_steps` are not `None`.
"""
if (steps is not None) and (max_steps is not None):
raise ValueError('Can not provide both steps and max_steps.')
_verify_input_args(x, y, input_fn, None, batch_size)
if x is not None:
SKCompat(self).fit(x, y, batch_size, steps, max_steps, monitors)
return self
if max_steps is not None:
try:
start_step = load_variable(self._model_dir, ops.GraphKeys.GLOBAL_STEP)
if max_steps <= start_step:
logging.info('Skipping training since max_steps has already saved.')
return self
except: # pylint: disable=bare-except
pass
hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)
if steps is not None or max_steps is not None:
hooks.append(basic_session_run_hooks.StopAtStepHook(steps, max_steps))
loss = self._train_model(input_fn=input_fn, hooks=hooks)
logging.info('Loss for final step: %s.', loss)
return self
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def partial_fit(
self, x=None, y=None, input_fn=None, steps=1, batch_size=None,
monitors=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different or the same chunks of the dataset. This either can
implement iterative training or out-of-core/online training.
This is especially useful when the whole dataset is too big to
fit in memory at the same time. Or when model is taking long time
to converge, and you want to split up training into subparts.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of labels. The training label values
(class labels in classification, real numbers in regression). If set,
`input_fn` must be `None`.
input_fn: Input function. If set, `x`, `y`, and `batch_size` must be
`None`.
steps: Number of steps for which to train model. If `None`, train forever.
batch_size: minibatch size to use on the input, defaults to first
dimension of `x`. Must be `None` if `input_fn` is provided.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
Returns:
`self`, for chaining.
Raises:
ValueError: If at least one of `x` and `y` is provided, and `input_fn` is
provided.
"""
logging.warning('The current implementation of partial_fit is not optimized'
' for use in a loop. Consider using fit() instead.')
return self.fit(x=x, y=y, input_fn=input_fn, steps=steps,
batch_size=batch_size, monitors=monitors)
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('y', None), ('batch_size', None)
)
def evaluate(self,
x=None,
y=None,
input_fn=None,
feed_fn=None,
batch_size=None,
steps=None,
metrics=None,
name=None,
checkpoint_path=None,
hooks=None,
log_progress=True):
# pylint: disable=g-doc-args,g-doc-return-or-yield
"""See `Evaluable`.
Raises:
ValueError: If at least one of `x` or `y` is provided, and at least one of
`input_fn` or `feed_fn` is provided.
Or if `metrics` is not `None` or `dict`.
"""
_verify_input_args(x, y, input_fn, feed_fn, batch_size)
if x is not None:
return SKCompat(self).score(x, y, batch_size, steps, metrics, name)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name=name,
checkpoint_path=checkpoint_path,
hooks=hooks,
log_progress=log_progress)
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
@deprecated_args(
SCIKIT_DECOUPLE_DATE, SCIKIT_DECOUPLE_INSTRUCTIONS, ('x', None),
('batch_size', None), ('as_iterable', True)
)
def predict(
self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
Args:
x: Matrix of shape [n_samples, n_features...]. Can be iterator that
returns arrays of features. The training input samples for fitting the
model. If set, `input_fn` must be `None`.
input_fn: Input function. If set, `x` and 'batch_size' must be `None`.
batch_size: Override default batch size. If set, 'input_fn' must be
'None'.
outputs: list of `str`, name of the output to predict.
If `None`, returns all.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
A numpy array of predicted classes or regression values if the
constructor's `model_fn` returns a `Tensor` for `predictions` or a `dict`
of numpy arrays if `model_fn` returns a `dict`. Returns an iterable of
predictions if as_iterable is True.
Raises:
ValueError: If x and input_fn are both provided or both `None`.
"""
_verify_input_args(x, None, input_fn, None, batch_size)
if x is not None and not as_iterable:
return SKCompat(self).predict(x, batch_size)
input_fn, feed_fn = _get_input_fn(x, None, input_fn, None, batch_size)
return self._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=as_iterable)
def get_variable_value(self, name):
"""Returns value of the variable given by name.
Args:
name: string, name of the tensor.
Returns:
Numpy array - value of the tensor.
"""
return load_variable(self.model_dir, name)
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
"""
return [name for name, _ in list_variables(self.model_dir)]
@property
def model_dir(self):
return self._model_dir
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def export(self,
export_dir,
input_fn=export._default_input_fn, # pylint: disable=protected-access
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
prediction_key=None,
default_batch_size=1,
exports_to_keep=None,
checkpoint_path=None):
"""Exports inference graph into given dir.
Args:
export_dir: A string containing a directory to write the exported graph
and checkpoints.
input_fn: If `use_deprecated_input_fn` is true, then a function that given
`Tensor` of `Example` strings, parses it into features that are then
passed to the model. Otherwise, a function that takes no argument and
returns a tuple of (features, labels), where features is a dict of
string key to `Tensor` and labels is a `Tensor` that's currently not
used (and so can be `None`).
input_feature_key: Only used if `use_deprecated_input_fn` is false. String
key into the features dict returned by `input_fn` that corresponds to a
the raw `Example` strings `Tensor` that the exported model will take as
input. Can only be `None` if you're using a custom `signature_fn` that
does not use the first arg (examples).
use_deprecated_input_fn: Determines the signature format of `input_fn`.
signature_fn: Function that returns a default signature and a named
signature map, given `Tensor` of `Example` strings, `dict` of `Tensor`s
for features and `Tensor` or `dict` of `Tensor`s for predictions.
prediction_key: The key for a tensor in the `predictions` dict (output
from the `model_fn`) to use as the `predictions` input to the
`signature_fn`. Optional. If `None`, predictions will pass to
`signature_fn` without filtering.
default_batch_size: Default batch size of the `Example` placeholder.
exports_to_keep: Number of exports to keep.
checkpoint_path: the checkpoint path of the model to be exported. If it is
`None` (which is default), will use the latest checkpoint in
export_dir.
Returns:
The string path to the exported directory. NB: this functionality was
added ca. 2016/09/25; clients that depend on the return value may need
to handle the case where this function returns None because subclasses
are not returning a value.
"""
# pylint: disable=protected-access
return export._export_estimator(
estimator=self,
export_dir=export_dir,
signature_fn=signature_fn,
prediction_key=prediction_key,
input_fn=input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep,
checkpoint_path=checkpoint_path)
@abc.abstractproperty
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overridden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
@abc.abstractproperty
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
A `ModelFnOps` object.
"""
pass
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overridden by sub-classes that require custom support.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
A `ModelFnOps` object.
"""
raise NotImplementedError('_get_eval_ops not implemented in BaseEstimator')
@deprecated(
'2016-09-23',
'The signature of the input_fn accepted by export is changing to be '
'consistent with what\'s used by tf.Learn Estimator\'s train/evaluate, '
'which makes this function useless. This will be removed after the '
'deprecation date.')
def _get_feature_ops_from_example(self, examples_batch):
"""Returns feature parser for given example batch using features info.
This function requires `fit()` has been called.
Args:
examples_batch: batch of tf.Example
Returns:
features: `Tensor` or `dict` of `Tensor` objects.
Raises:
ValueError: If `_features_info` attribute is not available (usually
because `fit()` has not been called).
"""
if self._features_info is None:
raise ValueError('Features information missing, was fit() ever called?')
return tensor_signature.create_example_parser_from_signatures(
self._features_info, examples_batch)
def _check_inputs(self, features, labels):
if self._features_info is not None:
logging.debug('Given features: %s, required signatures: %s.',
str(features), str(self._features_info))
if not tensor_signature.tensors_compatible(features, self._features_info):
raise ValueError('Features are incompatible with given information. '
'Given features: %s, required signatures: %s.' %
(str(features), str(self._features_info)))
else:
self._features_info = tensor_signature.create_signatures(features)
logging.debug('Setting feature info to %s.', str(self._features_info))
if labels is not None:
if self._labels_info is not None:
logging.debug('Given labels: %s, required signatures: %s.',
str(labels), str(self._labels_info))
if not tensor_signature.tensors_compatible(labels, self._labels_info):
raise ValueError('Labels are incompatible with given information. '
'Given labels: %s, required signatures: %s.' %
(str(labels), str(self._labels_info)))
else:
self._labels_info = tensor_signature.create_signatures(labels)
logging.debug('Setting labels info to %s', str(self._labels_info))
def _extract_metric_update_ops(self, eval_dict):
"""Separate update operations from metric value operations."""
update_ops = []
value_ops = {}
for name, metric_ops in six.iteritems(eval_dict):
if isinstance(metric_ops, (list, tuple)):
if len(metric_ops) == 2:
value_ops[name] = metric_ops[0]
update_ops.append(metric_ops[1])
else:
logging.warning(
'Ignoring metric {}. It returned a list|tuple with len {}, '
'expected 2'.format(name, len(metric_ops)))
value_ops[name] = metric_ops
else:
value_ops[name] = metric_ops
if update_ops:
update_ops = control_flow_ops.group(*update_ops)
else:
update_ops = None
return update_ops, value_ops
def _evaluate_model(self,
input_fn,
steps,
feed_fn=None,
metrics=None,
name='',
checkpoint_path=None,
hooks=None,
log_progress=True):
# TODO(wicke): Remove this once Model and associated code are gone.
if (hasattr(self._config, 'execution_mode') and
self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset')):
return None, None
# Check that model has been trained (if nothing has been set explicitly).
if not checkpoint_path:
latest_path = saver.latest_checkpoint(self._model_dir)
if not latest_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
checkpoint_path = latest_path
# Setup output directory.
eval_dir = os.path.join(self._model_dir, 'eval' if not name else
'eval_' + name)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
model_fn_results = self._get_eval_ops(features, labels, metrics)
eval_dict = model_fn_results.eval_metric_ops
update_op, eval_dict = self._extract_metric_update_ops(eval_dict)
# We need to copy the hook array as we modify it, thus [:].
hooks = hooks[:] if hooks else []
if feed_fn:
hooks.append(basic_session_run_hooks.FeedFnHook(feed_fn))
if steps:
hooks.append(
evaluation.StopAfterNEvalsHook(
steps, log_progress=log_progress))
global_step_key = 'global_step'
while global_step_key in eval_dict:
global_step_key = '_' + global_step_key
eval_dict[global_step_key] = global_step
eval_results = evaluation.evaluate_once(
checkpoint_path=checkpoint_path,
master=self._config.evaluation_master,
scaffold=model_fn_results.scaffold,
eval_ops=update_op,
final_ops=eval_dict,
hooks=hooks,
config=self._session_config)
current_global_step = eval_results[global_step_key]
_write_dict_to_summary(eval_dir, eval_results, current_global_step)
return eval_results, current_global_step
def _get_features_from_input_fn(self, input_fn):
result = input_fn()
if isinstance(result, (list, tuple)):
return result[0]
return result
def _infer_model(self,
input_fn,
feed_fn=None,
outputs=None,
as_iterable=True,
iterate_batches=False):
# Check that model has been trained.
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
with ops.Graph().as_default() as g:
random_seed.set_random_seed(self._config.tf_random_seed)
contrib_framework.create_global_step(g)
features = self._get_features_from_input_fn(input_fn)
infer_ops = self._get_predict_ops(features)
predictions = self._filter_predictions(infer_ops.predictions, outputs)
mon_sess = monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
checkpoint_filename_with_path=checkpoint_path,
scaffold=infer_ops.scaffold,
config=self._session_config))
if not as_iterable:
with mon_sess:
if not mon_sess.should_stop():
return mon_sess.run(predictions, feed_fn() if feed_fn else None)
else:
return self._predict_generator(mon_sess, predictions, feed_fn,
iterate_batches)
def _predict_generator(self, mon_sess, predictions, feed_fn, iterate_batches):
with mon_sess:
while not mon_sess.should_stop():
preds = mon_sess.run(predictions, feed_fn() if feed_fn else None)
if iterate_batches:
yield preds
elif not isinstance(predictions, dict):
for pred in preds:
yield pred
else:
first_tensor = list(preds.values())[0]
if isinstance(first_tensor, sparse_tensor.SparseTensorValue):
batch_length = first_tensor.dense_shape[0]
else:
batch_length = first_tensor.shape[0]
for i in range(batch_length):
yield {key: value[i] for key, value in six.iteritems(preds)}
if self._is_input_constant(feed_fn, mon_sess.graph):
return
def _is_input_constant(self, feed_fn, graph):
# If there are no queue_runners, the input `predictions` is a
# constant, and we should stop after the first epoch. If,
# instead, there are queue_runners, eventually they should throw
# an `OutOfRangeError`.
if graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS):
return False
# data_feeder uses feed_fn to generate `OutOfRangeError`.
if feed_fn is not None:
return False
return True
def _filter_predictions(self, predictions, outputs):
if not outputs:
return predictions
if not isinstance(predictions, dict):
raise ValueError(
'outputs argument is not valid in case of non-dict predictions.')
existing_keys = predictions.keys()
predictions = {
key: value
for key, value in six.iteritems(predictions) if key in outputs
}
if not predictions:
raise ValueError('Expected to run at least one output from %s, '
'provided %s.' % (existing_keys, outputs))
return predictions
def _train_model(self, input_fn, hooks):
all_hooks = []
self._graph = ops.Graph()
with self._graph.as_default() as g, g.device(self._device_fn):
random_seed.set_random_seed(self._config.tf_random_seed)
global_step = contrib_framework.create_global_step(g)
features, labels = input_fn()
self._check_inputs(features, labels)
model_fn_ops = self._get_train_ops(features, labels)
ops.add_to_collection(ops.GraphKeys.LOSSES, model_fn_ops.loss)
all_hooks.extend(hooks)
all_hooks.extend([
basic_session_run_hooks.NanTensorHook(model_fn_ops.loss),
basic_session_run_hooks.LoggingTensorHook(
{
'loss': model_fn_ops.loss,
'step': global_step
},
every_n_iter=100)
])
scaffold = model_fn_ops.scaffold or monitored_session.Scaffold()
if not (scaffold.saver or ops.get_collection(ops.GraphKeys.SAVERS)):
ops.add_to_collection(
ops.GraphKeys.SAVERS,
saver.Saver(
sharded=True,
max_to_keep=self._config.keep_checkpoint_max,
defer_build=True,
save_relative_paths=True))
chief_hooks = []
if (self._config.save_checkpoints_secs or
self._config.save_checkpoints_steps):
saver_hook_exists = any([
isinstance(h, basic_session_run_hooks.CheckpointSaverHook)
for h in (all_hooks + model_fn_ops.training_hooks + chief_hooks +
model_fn_ops.training_chief_hooks)
])
if not saver_hook_exists:
chief_hooks = [
basic_session_run_hooks.CheckpointSaverHook(
self._model_dir,
save_secs=self._config.save_checkpoints_secs,
save_steps=self._config.save_checkpoints_steps,
scaffold=scaffold)
]
with monitored_session.MonitoredTrainingSession(
master=self._config.master,
is_chief=self._config.is_chief,
checkpoint_dir=self._model_dir,
scaffold=scaffold,
hooks=all_hooks + model_fn_ops.training_hooks,
chief_only_hooks=chief_hooks + model_fn_ops.training_chief_hooks,
save_checkpoint_secs=0, # Saving is handled by a hook.
save_summaries_steps=self._config.save_summary_steps,
config=self._session_config
) as mon_sess:
loss = None
while not mon_sess.should_stop():
_, loss = mon_sess.run([model_fn_ops.train_op, model_fn_ops.loss])
summary_io.SummaryWriterCache.clear()
return loss
def _identity_feature_engineering_fn(features, labels):
return features, labels
class Estimator(BaseEstimator):
"""Estimator class is the basic TensorFlow model trainer/evaluator.
"""
def __init__(self,
model_fn=None,
model_dir=None,
config=None,
params=None,
feature_engineering_fn=None):
"""Constructs an `Estimator` instance.
Args:
model_fn: Model function. Follows the signature:
* Args:
* `features`: single `Tensor` or `dict` of `Tensor`s
(depending on data passed to `fit`),
* `labels`: `Tensor` or `dict` of `Tensor`s (for multi-head
models). If mode is `ModeKeys.INFER`, `labels=None` will be
passed. If the `model_fn`'s signature does not accept
`mode`, the `model_fn` must still be able to handle
`labels=None`.
* `mode`: Optional. Specifies if this training, evaluation or
prediction. See `ModeKeys`.
* `params`: Optional `dict` of hyperparameters. Will receive what
is passed to Estimator in `params` parameter. This allows
to configure Estimators from hyper parameter tuning.
* `config`: Optional configuration object. Will receive what is passed
to Estimator in `config` parameter, or the default `config`.
Allows updating things in your model_fn based on configuration
such as `num_ps_replicas`.
* `model_dir`: Optional directory where model parameters, graph etc
are saved. Will receive what is passed to Estimator in
`model_dir` parameter, or the default `model_dir`. Allows
updating things in your model_fn that expect model_dir, such as
training hooks.
* Returns:
`ModelFnOps`
Also supports a legacy signature which returns tuple of:
* predictions: `Tensor`, `SparseTensor` or dictionary of same.
Can also be any type that is convertible to a `Tensor` or
`SparseTensor`, or dictionary of same.
* loss: Scalar loss `Tensor`.
* train_op: Training update `Tensor` or `Operation`.
Supports next three signatures for the function:
* `(features, labels) -> (predictions, loss, train_op)`
* `(features, labels, mode) -> (predictions, loss, train_op)`
* `(features, labels, mode, params) -> (predictions, loss, train_op)`
* `(features, labels, mode, params, config) ->
(predictions, loss, train_op)`
* `(features, labels, mode, params, config, model_dir) ->
(predictions, loss, train_op)`
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
config: Configuration object.
params: `dict` of hyper parameters that will be passed into `model_fn`.
Keys are names of parameters, values are basic python types.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into `model_fn`. Please check `model_fn` for
a definition of features and labels.
Raises:
ValueError: parameters of `model_fn` don't match `params`.
"""
super(Estimator, self).__init__(model_dir=model_dir, config=config)
if model_fn is not None:
# Check number of arguments of the given function matches requirements.
model_fn_args = _model_fn_args(model_fn)
if params is not None and 'params' not in model_fn_args:
raise ValueError('Estimator\'s model_fn (%s) does not have a params '
'argument, but params (%s) were passed to the '
'Estimator\'s constructor.' %
(model_fn, params))
if params is None and 'params' in model_fn_args:
logging.warning('Estimator\'s model_fn (%s) includes params '
'argument, but params are not passed to Estimator.',
model_fn)
self._model_fn = model_fn
self.params = params
self._feature_engineering_fn = (
feature_engineering_fn or _identity_feature_engineering_fn)
def _call_model_fn(self, features, labels, mode):
"""Calls model function with support of 2, 3 or 4 arguments.
Args:
features: features dict.
labels: labels dict.
mode: ModeKeys
Returns:
A `ModelFnOps` object. If model_fn returns a tuple, wraps them up in a
`ModelFnOps` object.
Raises:
ValueError: if model_fn returns invalid objects.
"""
features, labels = self._feature_engineering_fn(features, labels)
model_fn_args = _model_fn_args(self._model_fn)
kwargs = {}
if 'mode' in model_fn_args:
kwargs['mode'] = mode
if 'params' in model_fn_args:
kwargs['params'] = self.params
if 'config' in model_fn_args:
kwargs['config'] = self.config
if 'model_dir' in model_fn_args:
kwargs['model_dir'] = self.model_dir
model_fn_results = self._model_fn(features, labels, **kwargs)
if isinstance(model_fn_results, model_fn_lib.ModelFnOps):
return model_fn_results
# Here model_fn_results should be a tuple with 3 elements.
if len(model_fn_results) != 3:
raise ValueError('Unrecognized value returned by model_fn, '
'please return ModelFnOps.')
return model_fn_lib.ModelFnOps(
mode=mode,
predictions=model_fn_results[0],
loss=model_fn_results[1],
train_op=model_fn_results[2])
def _get_train_ops(self, features, labels):
"""Method that builds model graph and returns trainer ops.
Expected to be overridden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.TRAIN)
def _get_eval_ops(self, features, labels, metrics):
"""Method that builds model graph and returns evaluation ops.
Expected to be overridden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
labels: `Tensor` or `dict` of `Tensor` objects.
metrics: Dict of metrics to run. If None, the default metric functions
are used; if {}, no metrics are used. Otherwise, `metrics` should map
friendly names for the metric to a `MetricSpec` object defining which
model outputs to evaluate against which labels with which metric
function. Metric ops should support streaming, e.g., returning
update_op and value tensors. See more details in
`../../../../metrics/python/metrics/ops/streaming_metrics.py` and
`../metric_spec.py`.
Returns:
`ModelFnOps` object.
Raises:
ValueError: if `metrics` don't match `labels`.
"""
model_fn_ops = self._call_model_fn(
features, labels, model_fn_lib.ModeKeys.EVAL)
features, labels = self._feature_engineering_fn(features, labels)
# Custom metrics should overwrite defaults.
if metrics:
model_fn_ops.eval_metric_ops.update(_make_metrics_ops(
metrics, features, labels, model_fn_ops.predictions))
if metric_key.MetricKey.LOSS not in model_fn_ops.eval_metric_ops:
model_fn_ops.eval_metric_ops[metric_key.MetricKey.LOSS] = (
metrics_lib.streaming_mean(model_fn_ops.loss))
return model_fn_ops
def _get_predict_ops(self, features):
"""Method that builds model graph and returns prediction ops.
Expected to be overridden by sub-classes that require custom support.
This implementation uses `model_fn` passed as parameter to constructor to
build model.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
Returns:
`ModelFnOps` object.
"""
labels = tensor_signature.create_placeholders_from_signatures(
self._labels_info)
return self._call_model_fn(features, labels, model_fn_lib.ModeKeys.INFER)
def export_savedmodel(
self, export_dir_base, serving_input_fn,
default_output_alternative_key=None,
assets_extra=None,
as_text=False,
checkpoint_path=None):
"""Exports inference graph as a SavedModel into given dir.
Args:
export_dir_base: A string containing a directory to write the exported
graph and checkpoints.
serving_input_fn: A function that takes no argument and
returns an `InputFnOps`.
default_output_alternative_key: the name of the head to serve when none is
specified. Not needed for single-headed models.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel. Each key should give the destination
path (including the filename) relative to the assets.extra directory.
The corresponding value gives the full path of the source file to be
copied. For example, the simple case of copying a single file without
renaming it is specified as
`{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.
as_text: whether to write the SavedModel proto in text format.
checkpoint_path: The checkpoint path to export. If None (the default),
the most recent checkpoint found within the model directory is chosen.
Returns:
The string path to the exported directory.
Raises:
ValueError: if an unrecognized export_type is requested.
"""
if serving_input_fn is None:
raise ValueError('serving_input_fn must be defined.')
with ops.Graph().as_default() as g:
contrib_variables.create_global_step(g)
# Call the serving_input_fn and collect the input alternatives.
input_ops = serving_input_fn()
input_alternatives, features = (
saved_model_export_utils.get_input_alternatives(input_ops))
# TODO(b/34388557) This is a stopgap, pending recording model provenance.
# Record which features are expected at serving time. It is assumed that
# these are the features that were used in training.
for feature_key in input_ops.features.keys():
ops.add_to_collection(
constants.COLLECTION_DEF_KEY_FOR_INPUT_FEATURE_KEYS, feature_key)
# Call the model_fn and collect the output alternatives.
model_fn_ops = self._call_model_fn(features, None,
model_fn_lib.ModeKeys.INFER)
output_alternatives, actual_default_output_alternative_key = (
saved_model_export_utils.get_output_alternatives(
model_fn_ops, default_output_alternative_key))
# Build the SignatureDefs from all pairs of input and output alternatives
signature_def_map = saved_model_export_utils.build_all_signature_defs(
input_alternatives, output_alternatives,
actual_default_output_alternative_key)
if not checkpoint_path:
# Locate the latest checkpoint
checkpoint_path = saver.latest_checkpoint(self._model_dir)
if not checkpoint_path:
raise NotFittedError("Couldn't find trained model at %s."
% self._model_dir)
export_dir = saved_model_export_utils.get_timestamped_export_dir(
export_dir_base)
if (model_fn_ops.scaffold is not None and
model_fn_ops.scaffold.saver is not None):
saver_for_restore = model_fn_ops.scaffold.saver
else:
saver_for_restore = saver.Saver(sharded=True)
with tf_session.Session('') as session:
saver_for_restore.restore(session, checkpoint_path)
init_op = control_flow_ops.group(
variables.local_variables_initializer(),
resources.initialize_resources(resources.shared_resources()),
lookup_ops.tables_initializer())
# Perform the export
builder = saved_model_builder.SavedModelBuilder(export_dir)
builder.add_meta_graph_and_variables(
session, [tag_constants.SERVING],
signature_def_map=signature_def_map,
assets_collection=ops.get_collection(
ops.GraphKeys.ASSET_FILEPATHS),
legacy_init_op=init_op)
builder.save(as_text)
# Add the extra assets
if assets_extra:
assets_extra_path = os.path.join(compat.as_bytes(export_dir),
compat.as_bytes('assets.extra'))
for dest_relative, source in assets_extra.items():
dest_absolute = os.path.join(compat.as_bytes(assets_extra_path),
compat.as_bytes(dest_relative))
dest_path = os.path.dirname(dest_absolute)
gfile.MakeDirs(dest_path)
gfile.Copy(source, dest_absolute)
return export_dir
# For time of deprecation x,y from Estimator allow direct access.
# pylint: disable=protected-access
class SKCompat(sklearn.BaseEstimator):
"""Scikit learn wrapper for TensorFlow Learn Estimator."""
def __init__(self, estimator):
self._estimator = estimator
def fit(self, x, y, batch_size=128, steps=None, max_steps=None,
monitors=None):
input_fn, feed_fn = _get_input_fn(x, y, input_fn=None, feed_fn=None,
batch_size=batch_size, shuffle=True,
epochs=None)
all_monitors = []
if feed_fn:
all_monitors = [basic_session_run_hooks.FeedFnHook(feed_fn)]
if monitors:
all_monitors.extend(monitors)
self._estimator.fit(input_fn=input_fn,
steps=steps,
max_steps=max_steps,
monitors=all_monitors)
return self
def score(self, x, y, batch_size=128, steps=None, metrics=None, name=None):
input_fn, feed_fn = _get_input_fn(x, y, input_fn=None,
feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
if metrics is not None and not isinstance(metrics, dict):
raise ValueError('Metrics argument should be None or dict. '
'Got %s.' % metrics)
eval_results, global_step = self._estimator._evaluate_model(
input_fn=input_fn,
feed_fn=feed_fn,
steps=steps,
metrics=metrics,
name=name)
if eval_results is not None:
eval_results.update({'global_step': global_step})
return eval_results
def predict(self, x, batch_size=128, outputs=None):
input_fn, feed_fn = _get_input_fn(
x, None, input_fn=None, feed_fn=None, batch_size=batch_size,
shuffle=False, epochs=1)
results = list(
self._estimator._infer_model(
input_fn=input_fn,
feed_fn=feed_fn,
outputs=outputs,
as_iterable=True,
iterate_batches=True))
if not isinstance(results[0], dict):
return np.concatenate([output for output in results], axis=0)
return {
key: np.concatenate(
[output[key] for output in results], axis=0)
for key in results[0]
}
|
apache-2.0
|
jasontlam/snorkel
|
tutorials/workshop/lib/util.py
|
1
|
3232
|
import pandas as pd
from snorkel.models import StableLabel
from snorkel.db_helpers import reload_annotator_labels
FPATH = 'data/gold_labels.tsv'
def number_of_people(sentence):
active_sequence = False
count = 0
for tag in sentence.ner_tags:
if tag == 'PERSON' and not active_sequence:
active_sequence = True
count += 1
elif tag != 'PERSON' and active_sequence:
active_sequence = False
return count
def load_external_labels(session, candidate_class, annotator_name='gold'):
gold_labels = pd.read_csv(FPATH, sep="\t")
for index, row in gold_labels.iterrows():
# We check if the label already exists, in case this cell was already executed
context_stable_ids = "~~".join([row['person1'], row['person2']])
query = session.query(StableLabel).filter(StableLabel.context_stable_ids == context_stable_ids)
query = query.filter(StableLabel.annotator_name == annotator_name)
if query.count() == 0:
session.add(StableLabel(
context_stable_ids=context_stable_ids,
annotator_name=annotator_name,
value=row['label']))
# Because it's a symmetric relation, load both directions...
context_stable_ids = "~~".join([row['person2'], row['person1']])
query = session.query(StableLabel).filter(StableLabel.context_stable_ids == context_stable_ids)
query = query.filter(StableLabel.annotator_name == annotator_name)
if query.count() == 0:
session.add(StableLabel(
context_stable_ids=context_stable_ids,
annotator_name=annotator_name,
value=row['label']))
# Commit session
session.commit()
# Reload annotator labels
reload_annotator_labels(session, candidate_class, annotator_name, split=1, filter_label_split=False)
reload_annotator_labels(session, candidate_class, annotator_name, split=2, filter_label_split=False)
# create distant superivsion subset for workshop
# from lib.viz import display_candidate
# known = []
# dev_cands = session.query(Candidate).filter(Candidate.split == 1).order_by(Candidate.id).all()
# for i in range(L_gold_dev.shape[0]):
# if L_gold_dev[i,0] == 1:
# p1,p2 = dev_cands[i][0].get_span(), dev_cands[i][1].get_span()
# if re.search("(Dr|Mr|Mrs|Sir)",p1 + " "+ p2):
# continue
# if len(p1.split()) > 1 and len(p2.split()) > 1:
# #display_candidate(dev_cands[i])
# known.append( (p1,p2) )
# print len(set(known))
# for c in sorted(set(known)):
# print ",".join(c)
# exercises
def check_exercise_1(subclass):
"""
Check if type is Person
:param subclass:
:return:
"""
v = subclass.__mapper_args__['polymorphic_identity'] == "person"
v &= len(subclass.__argnames__) == 1 and 'person' in subclass.__argnames__
print 'Correct!' if v else 'Sorry, try again!'
def check_exercise_2(c):
s1 = c[0].get_span()
s2 = c[1].get_span()
print 'Correct!' if "{} {}".format(s1, s2) == "Katrina Dawson Paul Smith" else 'Sorry, try again!'
|
apache-2.0
|
ucloud/uai-sdk
|
examples/caffe/train/faster-rcnn/code/tools/demo.py
|
10
|
5028
|
#!/usr/bin/env python
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
Demo script showing detections in sample images.
See README.md for installation instructions before running.
"""
import _init_paths
from fast_rcnn.config import cfg
from fast_rcnn.test import im_detect
from fast_rcnn.nms_wrapper import nms
from utils.timer import Timer
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import caffe, os, sys, cv2
import argparse
CLASSES = ('__background__',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
NETS = {'vgg16': ('VGG16',
'VGG16_faster_rcnn_final.caffemodel'),
'zf': ('ZF',
'ZF_faster_rcnn_final.caffemodel')}
def vis_detections(im, class_name, dets, thresh=0.5):
"""Draw detected bounding boxes."""
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
return
im = im[:, :, (2, 1, 0)]
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(im, aspect='equal')
for i in inds:
bbox = dets[i, :4]
score = dets[i, -1]
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='red', linewidth=3.5)
)
ax.text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(class_name, score),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
ax.set_title(('{} detections with '
'p({} | box) >= {:.1f}').format(class_name, class_name,
thresh),
fontsize=14)
plt.axis('off')
plt.tight_layout()
plt.draw()
def demo(net, image_name):
"""Detect object classes in an image using pre-computed object proposals."""
# Load the demo image
im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)
im = cv2.imread(im_file)
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
scores, boxes = im_detect(net, im)
timer.toc()
print ('Detection took {:.3f}s for '
'{:d} object proposals').format(timer.total_time, boxes.shape[0])
# Visualize detections for each class
CONF_THRESH = 0.8
NMS_THRESH = 0.3
for cls_ind, cls in enumerate(CLASSES[1:]):
cls_ind += 1 # because we skipped background
cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
vis_detections(im, cls, dets, thresh=CONF_THRESH)
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Faster R-CNN demo')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--cpu', dest='cpu_mode',
help='Use CPU mode (overrides --gpu)',
action='store_true')
parser.add_argument('--net', dest='demo_net', help='Network to use [vgg16]',
choices=NETS.keys(), default='vgg16')
args = parser.parse_args()
return args
if __name__ == '__main__':
cfg.TEST.HAS_RPN = True # Use RPN for proposals
args = parse_args()
prototxt = os.path.join(cfg.MODELS_DIR, NETS[args.demo_net][0],
'faster_rcnn_alt_opt', 'faster_rcnn_test.pt')
caffemodel = os.path.join(cfg.DATA_DIR, 'faster_rcnn_models',
NETS[args.demo_net][1])
if not os.path.isfile(caffemodel):
raise IOError(('{:s} not found.\nDid you run ./data/script/'
'fetch_faster_rcnn_models.sh?').format(caffemodel))
if args.cpu_mode:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
cfg.GPU_ID = args.gpu_id
net = caffe.Net(prototxt, caffemodel, caffe.TEST)
print '\n\nLoaded network {:s}'.format(caffemodel)
# Warmup on a dummy image
im = 128 * np.ones((300, 500, 3), dtype=np.uint8)
for i in xrange(2):
_, _= im_detect(net, im)
im_names = ['000456.jpg', '000542.jpg', '001150.jpg',
'001763.jpg', '004545.jpg']
for im_name in im_names:
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Demo for data/demo/{}'.format(im_name)
demo(net, im_name)
plt.show()
|
apache-2.0
|
celiafish/scikit-xray
|
doc/sphinxext/tests/test_docscrape.py
|
12
|
14257
|
# -*- encoding:utf-8 -*-
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
from docscrape_sphinx import SphinxDocString, SphinxClassDoc
from nose.tools import *
doc_txt = '''\
numpy.multivariate_normal(mean, cov, shape=None)
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
Parameters
----------
mean : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
cov : (N,N) ndarray
Covariance matrix of the distribution.
shape : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
Returns
-------
out : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
Warnings
--------
Certain warnings apply.
Notes
-----
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
References
----------
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
See Also
--------
some, other, funcs
otherfunc : relationship
Examples
--------
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print(x.shape)
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print(list( (x[0,0,:] - mean) < 0.6 ))
[True, True]
.. index:: random
:refguide: random;distributions, random;gauss
'''
doc = NumpyDocString(doc_txt)
def test_signature():
assert doc['Signature'].startswith('numpy.multivariate_normal(')
assert doc['Signature'].endswith('shape=None)')
def test_summary():
assert doc['Summary'][0].startswith('Draw values')
assert doc['Summary'][-1].endswith('covariance.')
def test_extended_summary():
assert doc['Extended Summary'][0].startswith('The multivariate normal')
def test_parameters():
assert_equal(len(doc['Parameters']), 3)
assert_equal(
[n for n, _, _ in doc['Parameters']], ['mean', 'cov', 'shape'])
arg, arg_type, desc = doc['Parameters'][1]
assert_equal(arg_type, '(N,N) ndarray')
assert desc[0].startswith('Covariance matrix')
assert doc['Parameters'][0][-1][-2] == ' (1+2+3)/3'
def test_returns():
assert_equal(len(doc['Returns']), 1)
arg, arg_type, desc = doc['Returns'][0]
assert_equal(arg, 'out')
assert_equal(arg_type, 'ndarray')
assert desc[0].startswith('The drawn samples')
assert desc[-1].endswith('distribution.')
def test_notes():
assert doc['Notes'][0].startswith('Instead')
assert doc['Notes'][-1].endswith('definite.')
assert_equal(len(doc['Notes']), 17)
def test_references():
assert doc['References'][0].startswith('..')
assert doc['References'][-1].endswith('2001.')
def test_examples():
assert doc['Examples'][0].startswith('>>>')
assert doc['Examples'][-1].endswith('True]')
def test_index():
assert_equal(doc['index']['default'], 'random')
print(doc['index'])
assert_equal(len(doc['index']), 2)
assert_equal(len(doc['index']['refguide']), 2)
def non_blank_line_by_line_compare(a, b):
a = [l for l in a.split('\n') if l.strip()]
b = [l for l in b.split('\n') if l.strip()]
for n, line in enumerate(a):
if not line == b[n]:
raise AssertionError("Lines %s of a and b differ: "
"\n>>> %s\n<<< %s\n" %
(n, line, b[n]))
def test_str():
non_blank_line_by_line_compare(str(doc),
"""numpy.multivariate_normal(mean, cov, shape=None)
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
Parameters
----------
mean : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
cov : (N,N) ndarray
Covariance matrix of the distribution.
shape : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
Returns
-------
out : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
Warnings
--------
Certain warnings apply.
See Also
--------
`some`_, `other`_, `funcs`_
`otherfunc`_
relationship
Notes
-----
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
References
----------
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
Examples
--------
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print(x.shape)
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print(list( (x[0,0,:] - mean) < 0.6 ))
[True, True]
.. index:: random
:refguide: random;distributions, random;gauss""")
def test_sphinx_str():
sphinx_doc = SphinxDocString(doc_txt)
non_blank_line_by_line_compare(str(sphinx_doc),
"""
.. index:: random
single: random;distributions, random;gauss
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
:Parameters:
**mean** : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
**cov** : (N,N) ndarray
Covariance matrix of the distribution.
**shape** : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
:Returns:
**out** : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
.. warning::
Certain warnings apply.
.. seealso::
:obj:`some`, :obj:`other`, :obj:`funcs`
:obj:`otherfunc`
relationship
.. rubric:: Notes
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
.. rubric:: References
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
.. only:: latex
[1]_, [2]_
.. rubric:: Examples
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print(x.shape)
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print(list( (x[0,0,:] - mean) < 0.6 ))
[True, True]
""")
doc2 = NumpyDocString("""
Returns array of indices of the maximum values of along the given axis.
Parameters
----------
a : {array_like}
Array to look in.
axis : {None, integer}
If None, the index is into the flattened array, otherwise along
the specified axis""")
def test_parameters_without_extended_description():
assert_equal(len(doc2['Parameters']), 2)
doc3 = NumpyDocString("""
my_signature(*params, **kwds)
Return this and that.
""")
def test_escape_stars():
signature = str(doc3).split('\n')[0]
assert_equal(signature, 'my_signature(\*params, \*\*kwds)')
doc4 = NumpyDocString(
"""a.conj()
Return an array with all complex-valued elements conjugated.""")
def test_empty_extended_summary():
assert_equal(doc4['Extended Summary'], [])
doc5 = NumpyDocString(
"""
a.something()
Raises
------
LinAlgException
If array is singular.
""")
def test_raises():
assert_equal(len(doc5['Raises']), 1)
name, _, desc = doc5['Raises'][0]
assert_equal(name, 'LinAlgException')
assert_equal(desc, ['If array is singular.'])
def test_see_also():
doc6 = NumpyDocString(
"""
z(x,theta)
See Also
--------
func_a, func_b, func_c
func_d : some equivalent func
foo.func_e : some other func over
multiple lines
func_f, func_g, :meth:`func_h`, func_j,
func_k
:obj:`baz.obj_q`
:class:`class_j`: fubar
foobar
""")
assert len(doc6['See Also']) == 12
for func, desc, role in doc6['See Also']:
if func in ('func_a', 'func_b', 'func_c', 'func_f',
'func_g', 'func_h', 'func_j', 'func_k', 'baz.obj_q'):
assert(not desc)
else:
assert(desc)
if func == 'func_h':
assert role == 'meth'
elif func == 'baz.obj_q':
assert role == 'obj'
elif func == 'class_j':
assert role == 'class'
else:
assert role is None
if func == 'func_d':
assert desc == ['some equivalent func']
elif func == 'foo.func_e':
assert desc == ['some other func over', 'multiple lines']
elif func == 'class_j':
assert desc == ['fubar', 'foobar']
def test_see_also_print():
class Dummy(object):
"""
See Also
--------
func_a, func_b
func_c : some relationship
goes here
func_d
"""
pass
obj = Dummy()
s = str(FunctionDoc(obj, role='func'))
assert(':func:`func_a`, :func:`func_b`' in s)
assert(' some relationship' in s)
assert(':func:`func_d`' in s)
doc7 = NumpyDocString("""
Doc starts on second line.
""")
def test_empty_first_line():
assert doc7['Summary'][0].startswith('Doc starts')
def test_no_summary():
str(SphinxDocString("""
Parameters
----------"""))
def test_unicode():
doc = SphinxDocString("""
öäöäöäöäöåååå
öäöäöäööäååå
Parameters
----------
ååå : äää
ööö
Returns
-------
ååå : ööö
äää
""")
assert doc['Summary'][0] == u'öäöäöäöäöåååå'.encode('utf-8')
def test_plot_examples():
cfg = dict(use_plots=True)
doc = SphinxDocString("""
Examples
--------
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3],[4,5,6])
>>> plt.show()
""", config=cfg)
assert 'plot::' in str(doc), str(doc)
doc = SphinxDocString("""
Examples
--------
.. plot::
import matplotlib.pyplot as plt
plt.plot([1,2,3],[4,5,6])
plt.show()
""", config=cfg)
assert str(doc).count('plot::') == 1, str(doc)
def test_class_members():
class Dummy(object):
"""
Dummy class.
"""
def spam(self, a, b):
"""Spam\n\nSpam spam."""
pass
def ham(self, c, d):
"""Cheese\n\nNo cheese."""
pass
for cls in (ClassDoc, SphinxClassDoc):
doc = cls(Dummy, config=dict(show_class_members=False))
assert 'Methods' not in str(doc), (cls, str(doc))
assert 'spam' not in str(doc), (cls, str(doc))
assert 'ham' not in str(doc), (cls, str(doc))
doc = cls(Dummy, config=dict(show_class_members=True))
assert 'Methods' in str(doc), (cls, str(doc))
assert 'spam' in str(doc), (cls, str(doc))
assert 'ham' in str(doc), (cls, str(doc))
if cls is SphinxClassDoc:
assert '.. autosummary::' in str(doc), str(doc)
|
bsd-3-clause
|
miloharper/neural-network-animation
|
matplotlib/tests/test_figure.py
|
9
|
4546
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
from nose.tools import assert_equal, assert_true, assert_raises
from matplotlib.testing.decorators import image_comparison, cleanup
from matplotlib.axes import Axes
import matplotlib.pyplot as plt
@cleanup
def test_figure_label():
# pyplot figure creation, selection and closing with figure label and
# number
plt.close('all')
plt.figure('today')
plt.figure(3)
plt.figure('tomorrow')
plt.figure()
plt.figure(0)
plt.figure(1)
plt.figure(3)
assert_equal(plt.get_fignums(), [0, 1, 3, 4, 5])
assert_equal(plt.get_figlabels(), ['', 'today', '', 'tomorrow', ''])
plt.close(10)
plt.close()
plt.close(5)
plt.close('tomorrow')
assert_equal(plt.get_fignums(), [0, 1])
assert_equal(plt.get_figlabels(), ['', 'today'])
@image_comparison(baseline_images=['figure_today'])
def test_figure():
# named figure support
fig = plt.figure('today')
ax = fig.add_subplot(111)
ax.set_title(fig.get_label())
ax.plot(list(xrange(5)))
# plot red line in a different figure.
plt.figure('tomorrow')
plt.plot([0, 1], [1, 0], 'r')
# Return to the original; make sure the red line is not there.
plt.figure('today')
plt.close('tomorrow')
@cleanup
def test_gca():
fig = plt.figure()
ax1 = fig.add_axes([0, 0, 1, 1])
assert_true(fig.gca(projection='rectilinear') is ax1)
assert_true(fig.gca() is ax1)
ax2 = fig.add_subplot(121, projection='polar')
assert_true(fig.gca() is ax2)
assert_true(fig.gca(polar=True)is ax2)
ax3 = fig.add_subplot(122)
assert_true(fig.gca() is ax3)
# the final request for a polar axes will end up creating one
# with a spec of 111.
assert_true(fig.gca(polar=True) is not ax3)
assert_true(fig.gca(polar=True) is not ax2)
assert_equal(fig.gca().get_geometry(), (1, 1, 1))
fig.sca(ax1)
assert_true(fig.gca(projection='rectilinear') is ax1)
assert_true(fig.gca() is ax1)
@image_comparison(baseline_images=['figure_suptitle'])
def test_suptitle():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
fig.suptitle('hello', color='r')
fig.suptitle('title', color='g', rotation='30')
@image_comparison(baseline_images=['alpha_background'],
# only test png and svg. The PDF output appears correct,
# but Ghostscript does not preserve the background color.
extensions=['png', 'svg'],
savefig_kwarg={'facecolor': (0, 1, 0.4),
'edgecolor': 'none'})
def test_alpha():
# We want an image which has a background color and an
# alpha of 0.4.
fig = plt.figure(figsize=[2, 1])
fig.set_facecolor((0, 1, 0.4))
fig.patch.set_alpha(0.4)
import matplotlib.patches as mpatches
fig.patches.append(mpatches.CirclePolygon([20, 20],
radius=15,
alpha=0.6,
facecolor='red'))
@cleanup
def test_too_many_figures():
import warnings
with warnings.catch_warnings(record=True) as w:
for i in range(22):
fig = plt.figure()
assert len(w) == 1
def test_iterability_axes_argument():
# This is a regression test for matplotlib/matplotlib#3196. If one of the
# arguments returned by _as_mpl_axes defines __getitem__ but is not
# iterable, this would raise an execption. This is because we check
# whether the arguments are iterable, and if so we try and convert them
# to a tuple. However, the ``iterable`` function returns True if
# __getitem__ is present, but some classes can define __getitem__ without
# being iterable. The tuple conversion is now done in a try...except in
# case it fails.
class MyAxes(Axes):
def __init__(self, *args, **kwargs):
kwargs.pop('myclass', None)
return Axes.__init__(self, *args, **kwargs)
class MyClass(object):
def __getitem__(self, item):
if item != 'a':
raise ValueError("item should be a")
def _as_mpl_axes(self):
return MyAxes, {'myclass': self}
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1, projection=MyClass())
plt.close(fig)
if __name__ == "__main__":
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
|
mit
|
tetherless-world/satoru
|
whyis/interpreter.py
|
2
|
57286
|
import rdflib
from datetime import datetime
from nanopub import Nanopublication
import logging
import sys
import pandas as pd
import configparser
import hashlib
from .autonomic.update_change_service import UpdateChangeService
from whyis.namespace import whyis, prov, sio
class Interpreter(UpdateChangeService):
kb = ":"
cb_fn = None
timeline_fn = None
data_fn = None
prefix_fn = "prefixes.txt"
prefixes = {}
studyRef = None
unit_code_list = []
unit_uri_list = []
unit_label_list = []
explicit_entry_list = []
virtual_entry_list = []
explicit_entry_tuples = []
virtual_entry_tuples = []
cb_tuple = {}
timeline_tuple = {}
config = configparser.ConfigParser()
def __init__(self, config_fn=None): # prefixes should be
if config_fn is not None:
try:
self.config.read(config_fn)
except Exception as e:
logging.exception("Error: Unable to open configuration file: ")
if hasattr(e, 'message'):
logging.exception(e.message)
else:
logging.exception(e)
sys.exit(1)
if self.config.has_option('Prefixes', 'prefixes'):
self.prefix_fn = self.config.get('Prefixes', 'prefixes')
# prefix_file = open(self.prefix_fn,"r")
# self.prefixes = prefix_file.readlines()
prefix_file = pd.read_csv(self.prefix_fn, dtype=object)
try:
for row in prefix_file.itertuples():
self.prefixes[row.prefix] = row.url
except Exception as e:
logging.exception("Error: Something went wrong when trying to read the Prefix File: ")
if hasattr(e, 'message'):
logging.exception(e.message)
else:
logging.exception(e)
sys.exit(1)
if self.config.has_option('Prefixes', 'base_uri'):
self.kb = self.config.get('Prefixes', 'base_uri')
if self.config.has_option('Source Files', 'dictionary'):
dm_fn = self.config.get('Source Files', 'dictionary')
try:
dm_file = pd.read_csv(dm_fn, dtype=object)
try: # Populate virtual and explicit entry lists
for row in dm_file.itertuples():
if pd.isnull(row.Column):
logging.exception("Error: The SDD must have a column named 'Column'")
sys.exit(1)
if row.Column.startswith("??"):
self.virtual_entry_list.append(row)
else:
self.explicit_entry_list.append(row)
except Exception as e:
logging.exception(
"Error: Something went wrong when trying to read the Dictionary Mapping File: ")
if hasattr(e, 'message'):
logging.exception(e.message)
else:
logging.exception(e)
sys.exit(1)
except Exception as e:
logging.exception("Error: The specified Dictionary Mapping file does not exist: ")
if hasattr(e, 'message'):
logging.exception(e.message)
else:
logging.exception(e)
sys.exit(1)
if self.config.has_option('Source Files', 'codebook'):
self.cb_fn = self.config.get('Source Files', 'codebook')
if self.cb_fn is not None:
try:
cb_file = pd.read_csv(self.cb_fn, dtype=object)
try:
inner_tuple_list = []
for row in cb_file.itertuples():
if (pd.notnull(row.Column) and row.Column not in self.cb_tuple):
inner_tuple_list = []
inner_tuple = {}
inner_tuple["Code"] = row.Code
if pd.notnull(row.Label):
inner_tuple["Label"] = row.Label
if pd.notnull(row.Class):
inner_tuple["Class"] = row.Class
if "Resource" in row and pd.notnull(row.Resource):
inner_tuple["Resource"] = row.Resource
inner_tuple_list.append(inner_tuple)
self.cb_tuple[row.Column] = inner_tuple_list
except Exception as e:
logging.warning("Warning: Unable to process Codebook file: ")
if hasattr(e, 'message'):
logging.warning(e.message)
else:
logging.warning(e)
except Exception as e:
logging.exception("Error: The specified Codebook file does not exist: ")
if hasattr(e, 'message'):
logging.exception(e.message)
else:
logging.exception(e)
sys.exit(1)
if self.config.has_option('Source Files', 'timeline'):
self.timeline_fn = self.config.get('Source Files', 'timeline')
if self.timeline_fn is not None:
try:
timeline_file = pd.read_csv(self.timeline_fn, dtype=object)
try:
inner_tuple_list = []
for row in timeline_file.itertuples():
if pd.notnull(row.Name) and row.Name not in self.timeline_tuple:
inner_tuple_list = []
inner_tuple = {}
inner_tuple["Type"] = row.Type
if pd.notnull(row.Label):
inner_tuple["Label"] = row.Label
if pd.notnull(row.Start):
inner_tuple["Start"] = row.Start
if pd.notnull(row.End):
inner_tuple["End"] = row.End
if pd.notnull(row.Unit):
inner_tuple["Unit"] = row.Unit
if pd.notnull(row.inRelationTo):
inner_tuple["inRelationTo"] = row.inRelationTo
inner_tuple_list.append(inner_tuple)
self.timeline_tuple[row.Name] = inner_tuple_list
except Exception as e:
logging.warning("Warning: Unable to process Timeline file: ")
if hasattr(e, 'message'):
logging.warning(e.message)
else:
logging.warning(e)
except Exception as e:
logging.exception("Error: The specified Timeline file does not exist: ")
if hasattr(e, 'message'):
logging.exception(e.message)
else:
logging.exception(e)
sys.exit(1)
if self.config.has_option('Source Files', 'code_mappings'):
cmap_fn = self.config.get('Source Files', 'code_mappings')
code_mappings_reader = pd.read_csv(cmap_fn)
for code_row in code_mappings_reader.itertuples():
if pd.notnull(code_row.code):
self.unit_code_list.append(code_row.code)
if pd.notnull(code_row.uri):
self.unit_uri_list.append(code_row.uri)
if pd.notnull(code_row.label):
self.unit_label_list.append(code_row.label)
if self.config.has_option('Source Files', 'data_file'):
self.data_fn = self.config.get('Source Files', 'data_file')
def getInputClass(self):
return whyis.SemanticDataDictionary
def getOutputClass(self):
return whyis.SemanticDataDictionaryInterpretation
def get_query(self):
return '''SELECT ?s WHERE { ?s ?p ?o .} LIMIT 1\n'''
def process(self, i, o):
print("Processing SDD...")
self.app.db.store.nsBindings = {}
npub = Nanopublication(store=o.graph.store)
# prefixes={}
# prefixes.update(self.prefixes)
# prefixes.update(self.app.NS.prefixes)
self.writeVirtualEntryNano(npub)
self.writeExplicitEntryNano(npub)
self.interpretData(npub)
def parseString(self, input_string, delim):
my_list = input_string.split(delim)
my_list = [element.strip() for element in my_list]
return my_list
def rdflibConverter(self, input_word):
if "http" in input_word:
return rdflib.term.URIRef(input_word)
if ':' in input_word:
word_list = input_word.split(":")
term = self.prefixes[word_list[0]] + word_list[1]
return rdflib.term.URIRef(term)
return rdflib.Literal(input_word, datatype=rdflib.XSD.string)
def codeMapper(self, input_word):
unitVal = input_word
for unit_label in self.unit_label_list:
if unit_label == input_word:
unit_index = self.unit_label_list.index(unit_label)
unitVal = self.unit_uri_list[unit_index]
for unit_code in self.unit_code_list:
if unit_code == input_word:
unit_index = self.unit_code_list.index(unit_code)
unitVal = self.unit_uri_list[unit_index]
return unitVal
def convertVirtualToKGEntry(self, *args):
if args[0][:2] == "??":
if self.studyRef is not None:
if args[0] == self.studyRef:
return self.prefixes[self.kb] + args[0][2:]
if len(args) == 2:
return self.prefixes[self.kb] + args[0][2:] + "-" + args[1]
return self.prefixes[self.kb] + args[0][2:]
if ':' not in args[0]:
# Check for entry in column list
for item in self.explicit_entry_list:
if args[0] == item.Column:
if len(args) == 2:
return self.prefixes[self.kb] + args[0].replace(" ", "_").replace(",", "").replace("(",
"").replace(
")", "").replace("/", "-").replace("\\", "-") + "-" + args[1]
return self.prefixes[self.kb] + args[0].replace(" ", "_").replace(",", "").replace("(", "").replace(
")", "").replace("/", "-").replace("\\", "-")
return '"' + args[0] + "\"^^xsd:string"
return args[0]
def checkVirtual(self, input_word):
try:
if input_word[:2] == "??":
return True
return False
except Exception as e:
logging.exception("Something went wrong in Interpreter.checkVirtual(): ")
if hasattr(e, 'message'):
logging.exception(e.message)
else:
logging.exception(e)
sys.exit(1)
def isfloat(self, value):
try:
float(value)
return True
except ValueError:
return False
def writeVirtualEntryNano(self, nanopub):
for item in self.virtual_entry_list:
virtual_tuple = {}
term = rdflib.term.URIRef(self.prefixes[self.kb] + str(item.Column[2:]))
nanopub.assertion.add((term, rdflib.RDF.type, rdflib.OWL.Class))
nanopub.assertion.add(
(term, rdflib.RDFS.label, rdflib.Literal(str(item.Column[2:]), datatype=rdflib.XSD.string)))
# Set the rdf:type of the virtual row to either the Attribute or Entity value (or else owl:Individual)
if (pd.notnull(item.Entity)) and (pd.isnull(item.Attribute)):
if ',' in item.Entity:
entities = self.parseString(item.Entity, ',')
for entity in entities:
nanopub.assertion.add(
(term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(entity))))
else:
nanopub.assertion.add(
(term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(item.Entity))))
virtual_tuple["Column"] = item.Column
virtual_tuple["Entity"] = self.codeMapper(item.Entity)
if virtual_tuple["Entity"] == "hasco:Study":
self.studyRef = item.Column
virtual_tuple["Study"] = item.Column
elif (pd.isnull(item.Entity)) and (pd.notnull(item.Attribute)):
if ',' in item.Attribute:
attributes = self.parseString(item.Attribute, ',')
for attribute in attributes:
nanopub.assertion.add(
(term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(attribute))))
else:
nanopub.assertion.add(
(term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(item.Attribute))))
virtual_tuple["Column"] = item.Column
virtual_tuple["Attribute"] = self.codeMapper(item.Attribute)
else:
logging.warning(
"Warning: Virtual entry not assigned an Entity or Attribute value, or was assigned both.")
virtual_tuple["Column"] = item.Column
# If there is a value in the inRelationTo column ...
if pd.notnull(item.inRelationTo):
virtual_tuple["inRelationTo"] = item.inRelationTo
# If there is a value in the Relation column but not the Role column ...
if (pd.notnull(item.Relation)) and (pd.isnull(item.Role)):
nanopub.assertion.add((term, self.rdflibConverter(item.Relation),
self.rdflibConverter(self.convertVirtualToKGEntry(item.inRelationTo))))
virtual_tuple["Relation"] = item.Relation
# If there is a value in the Role column but not the Relation column ...
elif (pd.isnull(item.Relation)) and (pd.notnull(item.Role)):
role = rdflib.BNode()
nanopub.assertion.add(
(role, rdflib.RDF.type, self.rdflibConverter(self.convertVirtualToKGEntry(item.Role))))
nanopub.assertion.add(
(role, sio.inRelationTo, self.rdflibConverter(self.convertVirtualToKGEntry(item.inRelationTo))))
nanopub.assertion.add((term, sio.hasRole, role))
virtual_tuple["Role"] = item.Role
# If there is a value in the Role and Relation columns ...
elif (pd.notnull(item.Relation)) and (pd.notnull(item.Role)):
virtual_tuple["Relation"] = item.Relation
virtual_tuple["Role"] = item.Role
nanopub.assertion.add(
(term, sio.hasRole, self.rdflibConverter(self.convertVirtualToKGEntry(item.Role))))
nanopub.assertion.add((term, self.rdflibConverter(item.Relation),
self.rdflibConverter(self.convertVirtualToKGEntry(item.inRelationTo))))
nanopub.provenance.add((term, prov.generatedAtTime, rdflib.Literal(
"{:4d}-{:02d}-{:02d}".format(datetime.utcnow().year, datetime.utcnow().month,
datetime.utcnow().day) + "T" + "{:02d}:{:02d}:{:02d}".format(
datetime.utcnow().hour, datetime.utcnow().minute, datetime.utcnow().second) + "Z",
datatype=rdflib.XSD.dateTime)))
if pd.notnull(item.wasDerivedFrom):
if ',' in item.wasDerivedFrom:
derivedFromTerms = self.parseString(item.wasDerivedFrom, ',')
for derivedFromTerm in derivedFromTerms:
nanopub.provenance.add((term, prov.wasDerivedFrom,
self.rdflibConverter(self.convertVirtualToKGEntry(derivedFromTerm))))
else:
nanopub.provenance.add((term, prov.wasDerivedFrom,
self.rdflibConverter(self.convertVirtualToKGEntry(item.wasDerivedFrom))))
virtual_tuple["wasDerivedFrom"] = item.wasDerivedFrom
if pd.notnull(item.wasGeneratedBy):
if ',' in item.wasGeneratedBy:
generatedByTerms = self.parseString(item.wasGeneratedBy, ',')
for generatedByTerm in generatedByTerms:
nanopub.provenance.add((term, prov.wasGeneratedBy,
self.rdflibConverter(self.convertVirtualToKGEntry(generatedByTerm))))
else:
nanopub.provenance.add((term, prov.wasGeneratedBy,
self.rdflibConverter(self.convertVirtualToKGEntry(item.wasGeneratedBy))))
virtual_tuple["wasGeneratedBy"] = item.wasGeneratedBy
self.virtual_entry_tuples.append(virtual_tuple)
if self.timeline_fn is not None:
for key in self.timeline_tuple:
tl_term = self.rdflibConverter(self.convertVirtualToKGEntry(key))
nanopub.assertion.add((tl_term, rdflib.RDF.type, rdflib.OWL.Class))
for timeEntry in self.timeline_tuple[key]:
if 'Type' in timeEntry:
nanopub.assertion.add(
(tl_term, rdflib.RDFS.subClassOf, self.rdflibConverter(timeEntry['Type'])))
if 'Label' in timeEntry:
nanopub.assertion.add((tl_term, rdflib.RDFS.label,
rdflib.Literal(str(timeEntry['Label']), datatype=rdflib.XSD.string)))
if 'Start' in timeEntry and 'End' in timeEntry and timeEntry['Start'] == timeEntry['End']:
nanopub.assertion.add((tl_term, sio.hasValue, self.rdflibConverter(str(timeEntry['Start']))))
if 'Start' in timeEntry:
start_time = rdflib.BNode()
nanopub.assertion.add((start_time, sio.hasValue, self.rdflibConverter(str(timeEntry['Start']))))
nanopub.assertion.add((tl_term, sio.hasStartTime, start_time))
if 'End' in timeEntry:
end_time = rdflib.BNode()
nanopub.assertion.add((end_time, sio.hasValue, self.rdflibConverter(str(timeEntry['End']))))
nanopub.assertion.add((tl_term, sio.hasEndTime, end_time))
if 'Unit' in timeEntry:
nanopub.assertion.add(
(tl_term, sio.hasUnit, self.rdflibConverter(self.codeMapper(timeEntry['Unit']))))
if 'inRelationTo' in timeEntry:
nanopub.assertion.add((tl_term, sio.inRelationTo, self.rdflibConverter(
self.convertVirtualToKGEntry(timeEntry['inRelationTo']))))
nanopub.provenance.add((tl_term, prov.generatedAtTime, rdflib.Literal(
"{:4d}-{:02d}-{:02d}".format(datetime.utcnow().year, datetime.utcnow().month,
datetime.utcnow().day) + "T" + "{:02d}:{:02d}:{:02d}".format(
datetime.utcnow().hour, datetime.utcnow().minute, datetime.utcnow().second) + "Z",
datatype=rdflib.XSD.dateTime)))
def writeExplicitEntryNano(self, nanopub):
for item in self.explicit_entry_list:
explicit_entry_tuple = {}
term = rdflib.term.URIRef(self.prefixes[self.kb] + str(
item.Column.replace(" ", "_").replace(",", "").replace("(", "").replace(")", "").replace("/",
"-").replace(
"\\", "-")))
nanopub.assertion.add((term, rdflib.RDF.type, rdflib.OWL.Class))
if pd.notnull(item.Attribute):
if ',' in item.Attribute:
attributes = self.parseString(item.Attribute, ',')
for attribute in attributes:
nanopub.assertion.add(
(term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(attribute))))
else:
nanopub.assertion.add(
(term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(item.Attribute))))
explicit_entry_tuple["Column"] = item.Column
explicit_entry_tuple["Attribute"] = self.codeMapper(item.Attribute)
elif pd.notnull(item.Entity):
if ',' in item.Entity:
entities = self.parseString(item.Entity, ',')
for entity in entities:
nanopub.assertion.add(
(term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(entity))))
else:
nanopub.assertion.add(
(term, rdflib.RDFS.subClassOf, self.rdflibConverter(self.codeMapper(item.Entity))))
explicit_entry_tuple["Column"] = item.Column
explicit_entry_tuple["Entity"] = self.codeMapper(item.Entity)
else:
nanopub.assertion.add((term, rdflib.RDFS.subClassOf, sio.Attribute))
explicit_entry_tuple["Column"] = item.Column
explicit_entry_tuple["Attribute"] = self.codeMapper("sio:Attribute")
logging.warning("Warning: Explicit entry not assigned an Attribute or Entity value.")
if pd.notnull(item.attributeOf):
nanopub.assertion.add(
(term, sio.isAttributeOf, self.rdflibConverter(self.convertVirtualToKGEntry(item.attributeOf))))
explicit_entry_tuple["isAttributeOf"] = self.convertVirtualToKGEntry(item.attributeOf)
else:
logging.warning("Warning: Explicit entry not assigned an isAttributeOf value.")
if pd.notnull(item.Unit):
nanopub.assertion.add(
(term, sio.hasUnit, self.rdflibConverter(self.convertVirtualToKGEntry(self.codeMapper(item.Unit)))))
explicit_entry_tuple["Unit"] = self.convertVirtualToKGEntry(self.codeMapper(item.Unit))
if pd.notnull(item.Time):
nanopub.assertion.add(
(term, sio.existsAt, self.rdflibConverter(self.convertVirtualToKGEntry(item.Time))))
explicit_entry_tuple["Time"] = item.Time
if pd.notnull(item.inRelationTo):
explicit_entry_tuple["inRelationTo"] = item.inRelationTo
# If there is a value in the Relation column but not the Role column ...
if (pd.notnull(item.Relation)) and (pd.isnull(item.Role)):
nanopub.assertion.add((term, self.rdflibConverter(item.Relation),
self.rdflibConverter(self.convertVirtualToKGEntry(item.inRelationTo))))
explicit_entry_tuple["Relation"] = item.Relation
# If there is a value in the Role column but not the Relation column ...
elif (pd.isnull(item.Relation)) and (pd.notnull(item.Role)):
role = rdflib.BNode()
nanopub.assertion.add(
(role, rdflib.RDF.type, self.rdflibConverter(self.convertVirtualToKGEntry(item.Role))))
nanopub.assertion.add(
(role, sio.inRelationTo, self.rdflibConverter(self.convertVirtualToKGEntry(item.inRelationTo))))
nanopub.assertion.add((term, sio.hasRole, role))
explicit_entry_tuple["Role"] = item.Role
# If there is a value in the Role and Relation columns ...
elif (pd.notnull(item.Relation)) and (pd.notnull(item.Role)):
nanopub.assertion.add(
(term, sio.hasRole, self.rdflibConverter(self.convertVirtualToKGEntry(item.Role))))
nanopub.assertion.add((term, self.rdflibConverter(item.Relation),
self.rdflibConverter(self.convertVirtualToKGEntry(item.inRelationTo))))
explicit_entry_tuple["Relation"] = item.Relation
explicit_entry_tuple["Role"] = item.Role
if ("Label" in item and pd.notnull(item.Label)):
nanopub.assertion.add((term, rdflib.RDFS.label, self.rdflibConverter(item.Label)))
explicit_entry_tuple["Label"] = item.Label
if ("Comment" in item and pd.notnull(item.Comment)):
nanopub.assertion.add((term, rdflib.RDFS.comment, self.rdflibConverter(item.Comment)))
explicit_entry_tuple["Comment"] = item.Comment
nanopub.provenance.add((term, prov.generatedAtTime, rdflib.Literal(
"{:4d}-{:02d}-{:02d}".format(datetime.utcnow().year, datetime.utcnow().month,
datetime.utcnow().day) + "T" + "{:02d}:{:02d}:{:02d}".format(
datetime.utcnow().hour, datetime.utcnow().minute, datetime.utcnow().second) + "Z",
datatype=rdflib.XSD.dateTime)))
if pd.notnull(item.wasDerivedFrom):
if ',' in item.wasDerivedFrom:
derivedFromTerms = self.parseString(item.wasDerivedFrom, ',')
for derivedFromTerm in derivedFromTerms:
nanopub.provenance.add((term, prov.wasDerivedFrom,
self.rdflibConverter(self.convertVirtualToKGEntry(derivedFromTerm))))
else:
nanopub.provenance.add((term, prov.wasDerivedFrom,
self.rdflibConverter(self.convertVirtualToKGEntry(item.wasDerivedFrom))))
explicit_entry_tuple["wasDerivedFrom"] = item.wasDerivedFrom
if pd.notnull(item.wasGeneratedBy):
if ',' in item.wasGeneratedBy:
generatedByTerms = self.parseString(item.wasGeneratedBy, ',')
for generatedByTerm in generatedByTerms:
nanopub.provenance.add((term, prov.wasGeneratedBy,
self.rdflibConverter(self.convertVirtualToKGEntry(generatedByTerm))))
else:
nanopub.provenance.add((term, prov.wasGeneratedBy,
self.rdflibConverter(self.convertVirtualToKGEntry(item.wasGeneratedBy))))
explicit_entry_tuple["wasGeneratedBy"] = item.wasGeneratedBy
self.explicit_entry_tuples.append(explicit_entry_tuple)
def writeVirtualEntry(self, nanopub, vref_list, v_column, index):
term = self.rdflibConverter(self.convertVirtualToKGEntry(v_column, index))
try:
if self.timeline_fn is not None:
if v_column in self.timeline_tuple:
nanopub.assertion.add(
(term, rdflib.RDF.type, self.rdflibConverter(self.convertVirtualToKGEntry(v_column))))
for timeEntry in self.timeline_tuple[v_column]:
if 'Type' in timeEntry:
nanopub.assertion.add((term, rdflib.RDF.type, self.rdflibConverter(timeEntry['Type'])))
if 'Label' in timeEntry:
nanopub.assertion.add((term, rdflib.RDFS.label,
rdflib.Literal(str(timeEntry['Label']), datatype=rdflib.XSD.string)))
if 'Start' in timeEntry and 'End' in timeEntry and timeEntry['Start'] == timeEntry['End']:
nanopub.assertion.add((term, sio.hasValue, self.rdflibConverter(str(timeEntry['Start']))))
if 'Start' in timeEntry:
start_time = rdflib.BNode()
nanopub.assertion.add(
(start_time, sio.hasValue, self.rdflibConverter(str(timeEntry['Start']))))
nanopub.assertion.add((term, sio.hasStartTime, start_time))
if 'End' in timeEntry:
end_time = rdflib.BNode()
nanopub.assertion.add((end_time, sio.hasValue, self.rdflibConverter(str(timeEntry['End']))))
nanopub.assertion.add((term, sio.hasEndTime, end_time))
if 'Unit' in timeEntry:
nanopub.assertion.add(
(term, sio.hasUnit, self.rdflibConverter(self.codeMapper(timeEntry['Unit']))))
if 'inRelationTo' in timeEntry:
nanopub.assertion.add((term, sio.inRelationTo, self.rdflibConverter(
self.convertVirtualToKGEntry(timeEntry['inRelationTo']))))
if self.checkVirtual(timeEntry['inRelationTo']) and timeEntry[
'inRelationTo'] not in vref_list:
vref_list.append(timeEntry['inRelationTo'])
for v_tuple in self.virtual_entry_tuples:
if v_tuple["Column"] == v_column:
if "Study" in v_tuple:
continue
else:
v_term = rdflib.term.URIRef(self.prefixes[self.kb] + str(v_tuple["Column"][2:]) + "-" + index)
nanopub.assertion.add((v_term, rdflib.RDF.type,
rdflib.term.URIRef(self.prefixes[self.kb] + str(v_tuple["Column"][2:]))))
if "Entity" in v_tuple:
if ',' in v_tuple["Entity"]:
entities = self.parseString(v_tuple["Entity"], ',')
for entity in entities:
nanopub.assertion.add(
(term, rdflib.RDF.type, self.rdflibConverter(self.codeMapper(entity))))
else:
nanopub.assertion.add(
(term, rdflib.RDF.type, self.rdflibConverter(self.codeMapper(v_tuple["Entity"]))))
if "Attribute" in v_tuple:
if ',' in v_tuple["Attribute"]:
attributes = self.parseString(v_tuple["Attribute"], ',')
for attribute in attributes:
nanopub.assertion.add(
(term, rdflib.RDF.type, self.rdflibConverter(self.codeMapper(attribute))))
else:
nanopub.assertion.add((term, rdflib.RDF.type,
self.rdflibConverter(self.codeMapper(v_tuple["Attribute"]))))
if "Subject" in v_tuple:
nanopub.assertion.add((term, sio.hasIdentifier, rdflib.term.URIRef(
self.prefixes[self.kb] + v_tuple["Subject"] + "-" + index)))
if "inRelationTo" in v_tuple:
if ("Role" in v_tuple) and ("Relation" not in v_tuple):
role = rdflib.BNode()
nanopub.assertion.add((role, rdflib.RDF.type, self.rdflibConverter(
self.convertVirtualToKGEntry(v_tuple["Role"], index))))
nanopub.assertion.add((role, sio.inRelationTo, self.rdflibConverter(
self.convertVirtualToKGEntry(v_tuple["inRelationTo"], index))))
nanopub.assertion.add((term, sio.hasRole, role))
elif ("Role" not in v_tuple) and ("Relation" in v_tuple):
nanopub.assertion.add((term, self.rdflibConverter(v_tuple["Relation"]),
self.rdflibConverter(
self.convertVirtualToKGEntry(v_tuple["inRelationTo"],
index))))
elif ("Role" not in v_tuple) and ("Relation" not in v_tuple):
nanopub.assertion.add((term, sio.inRelationTo, self.rdflibConverter(
self.convertVirtualToKGEntry(v_tuple["inRelationTo"], index))))
nanopub.provenance.add((term, prov.generatedAtTime, rdflib.Literal(
"{:4d}-{:02d}-{:02d}".format(datetime.utcnow().year, datetime.utcnow().month,
datetime.utcnow().day) + "T" + "{:02d}:{:02d}:{:02d}".format(
datetime.utcnow().hour, datetime.utcnow().minute, datetime.utcnow().second) + "Z",
datatype=rdflib.XSD.dateTime)))
if "wasGeneratedBy" in v_tuple:
if ',' in v_tuple["wasGeneratedBy"]:
generatedByTerms = self.parseString(v_tuple["wasGeneratedBy"], ',')
for generatedByTerm in generatedByTerms:
nanopub.provenance.add((term, prov.wasGeneratedBy, self.rdflibConverter(
self.convertVirtualToKGEntry(generatedByTerm, index))))
if self.checkVirtual(generatedByTerm) and generatedByTerm not in vref_list:
vref_list.append(generatedByTerm)
else:
nanopub.provenance.add((term, prov.wasGeneratedBy, self.rdflibConverter(
self.convertVirtualToKGEntry(v_tuple["wasGeneratedBy"], index))))
if self.checkVirtual(v_tuple["wasGeneratedBy"]) and v_tuple[
"wasGeneratedBy"] not in vref_list:
vref_list.append(v_tuple["wasGeneratedBy"])
if "wasDerivedFrom" in v_tuple:
if ',' in v_tuple["wasDerivedFrom"]:
derivedFromTerms = self.parseString(v_tuple["wasDerivedFrom"], ',')
for derivedFromTerm in derivedFromTerms:
nanopub.provenance.add((term, prov.wasDerivedFrom, self.rdflibConverter(
self.convertVirtualToKGEntry(derivedFromTerm, index))))
if self.checkVirtual(derivedFromTerm) and derivedFromTerm not in vref_list:
vref_list.append(derivedFromTerm)
else:
nanopub.provenance.add((term, prov.wasDerivedFrom, self.rdflibConverter(
self.convertVirtualToKGEntry(v_tuple["wasDerivedFrom"], index))))
if self.checkVirtual(v_tuple["wasDerivedFrom"]) and v_tuple[
"wasDerivedFrom"] not in vref_list:
vref_list.append(v_tuple["wasDerivedFrom"])
return vref_list
except Exception as e:
logging.warning("Warning: Unable to create virtual entry:")
if hasattr(e, 'message'):
logging.warning(e.message)
else:
logging.warning(e)
def interpretData(self, nanopub):
if self.data_fn is not None:
try:
data_file = pd.read_csv(self.data_fn, dtype=object)
except Exception as e:
logging.exception("Error: The specified Data file does not exist: ")
if hasattr(e, 'message'):
logging.exception(e.message)
else:
logging.exception(e)
sys.exit(1)
try:
col_headers = list(data_file.columns.values)
try:
for a_tuple in self.explicit_entry_tuples:
if "Attribute" in a_tuple:
if ((a_tuple["Attribute"] == "hasco:originalID") or (a_tuple["Attribute"] == "sio:Identifier")):
if a_tuple["Column"] in col_headers:
for v_tuple in self.virtual_entry_tuples:
if "isAttributeOf" in a_tuple:
if a_tuple["isAttributeOf"] == v_tuple["Column"]:
v_tuple["Subject"] = a_tuple["Column"].replace(" ", "_").replace(",",
"").replace(
"(", "").replace(")", "").replace("/", "-").replace("\\", "-")
except Exception as e:
logging.exception("Error: Something went wrong when processing column headers:")
if hasattr(e, 'message'):
logging.exception(e.message)
else:
logging.exception(e)
for row in data_file.itertuples():
id_string = ''
for element in row:
id_string += str(element)
identifierString = hashlib.md5(id_string).hexdigest()
try:
vref_list = []
for a_tuple in self.explicit_entry_tuples:
if a_tuple["Column"] in col_headers:
try:
try:
term = rdflib.term.URIRef(self.prefixes[self.kb] + str(
a_tuple["Column"].replace(" ", "_").replace(",", "").replace("(", "").replace(
")", "").replace("/", "-").replace("\\", "-")) + "-" + identifierString)
nanopub.assertion.add((term, rdflib.RDF.type, rdflib.term.URIRef(
self.prefixes[self.kb] + str(
a_tuple["Column"].replace(" ", "_").replace(",", "").replace("(",
"").replace(
")", "").replace("/", "-").replace("\\", "-")))))
print(term)
if "Attribute" in a_tuple:
if ',' in a_tuple["Attribute"]:
attributes = self.parseString(a_tuple["Attribute"], ',')
for attribute in attributes:
nanopub.assertion.add((term, rdflib.RDF.type, self.rdflibConverter(
self.codeMapper(attribute))))
else:
nanopub.assertion.add((term, rdflib.RDF.type, self.rdflibConverter(
self.codeMapper(a_tuple["Attribute"]))))
if "Entity" in a_tuple:
if ',' in a_tuple["Entity"]:
entities = self.parseString(a_tuple["Entity"], ',')
for entity in entities:
nanopub.assertion.add((term, rdflib.RDF.type,
self.rdflibConverter(self.codeMapper(entity))))
else:
nanopub.assertion.add((term, rdflib.RDF.type, self.rdflibConverter(
self.codeMapper(a_tuple["Entity"]))))
if "isAttributeOf" in a_tuple:
nanopub.assertion.add((term, sio.isAttributeOf, self.rdflibConverter(
self.convertVirtualToKGEntry(a_tuple["isAttributeOf"], identifierString))))
if self.checkVirtual(a_tuple["isAttributeOf"]):
if a_tuple["isAttributeOf"] not in vref_list:
vref_list.append(a_tuple["isAttributeOf"])
if "Unit" in a_tuple:
nanopub.assertion.add(
(term, sio.hasUnit, self.rdflibConverter(self.codeMapper(a_tuple["Unit"]))))
if "Time" in a_tuple:
nanopub.assertion.add((term, sio.existsAt, self.rdflibConverter(
self.convertVirtualToKGEntry(a_tuple["Time"], identifierString))))
if self.checkVirtual(a_tuple["Time"]):
if a_tuple["Time"] not in vref_list:
vref_list.append(a_tuple["Time"])
if "Label" in a_tuple:
nanopub.assertion.add(
(term, rdflib.RDFS.label, self.rdflibConverter(a_tuple["Label"])))
if "Comment" in a_tuple:
nanopub.assertion.add(
(term, rdflib.RDFS.comment, self.rdflibConverter(a_tuple["Comment"])))
if "inRelationTo" in a_tuple:
if ("Role" in a_tuple) and ("Relation" not in a_tuple):
role = rdflib.BNode()
nanopub.assertion.add((role, rdflib.RDF.type, self.rdflibConverter(
self.convertVirtualToKGEntry(a_tuple["Role"], identifierString))))
nanopub.assertion.add((role, sio.inRelationTo, self.rdflibConverter(
self.convertVirtualToKGEntry(a_tuple["inRelationTo"],
identifierString))))
nanopub.assertion.add((term, sio.hasRole, role))
elif ("Role" not in a_tuple) and ("Relation" in a_tuple):
nanopub.assertion.add((term, self.rdflibConverter(a_tuple["Relation"]),
self.rdflibConverter(self.convertVirtualToKGEntry(
a_tuple["inRelationTo"], identifierString))))
elif ("Role" not in a_tuple) and ("Relation" not in a_tuple):
nanopub.assertion.add((term, sio.inRelationTo, self.rdflibConverter(
self.convertVirtualToKGEntry(a_tuple["inRelationTo"],
identifierString))))
except Exception as e:
logging.exception("Error: something went wrong for initial assertions:")
if hasattr(e, 'message'):
print(e.message)
else:
print(e)
sys.exit(1)
try:
if row[col_headers.index(a_tuple["Column"]) + 1] != "":
if self.cb_fn is not None:
if a_tuple["Column"] in self.cb_tuple:
for tuple_row in self.cb_tuple[a_tuple["Column"]]:
if ("Code" in tuple_row) and (str(tuple_row['Code']) == str(
row[col_headers.index(a_tuple["Column"]) + 1])):
if ("Class" in tuple_row) and (tuple_row['Class'] != ""):
if ',' in tuple_row['Class']:
classTerms = self.parseString(tuple_row['Class'], ',')
for classTerm in classTerms:
nanopub.assertion.add((term, rdflib.RDF.type,
self.rdflibConverter(
self.codeMapper(
classTerm))))
else:
nanopub.assertion.add((term, rdflib.RDF.type,
self.rdflibConverter(
self.codeMapper(
tuple_row['Class']))))
if ("Resource" in tuple_row) and (tuple_row['Resource'] != ""):
if ',' in tuple_row['Resource']:
resourceTerms = self.parseString(tuple_row['Resource'],
',')
for resourceTerm in resourceTerms:
nanopub.assertion.add((term, rdflib.OWL.sameAs,
self.rdflibConverter(
self.convertVirtualToKGEntry(
self.codeMapper(
resourceTerm)))))
else:
nanopub.assertion.add((term, rdflib.OWL.sameAs,
self.rdflibConverter(
self.convertVirtualToKGEntry(
self.codeMapper(
tuple_row[
'Resource'])))))
if ("Label" in tuple_row) and (tuple_row['Label'] != ""):
nanopub.assertion.add((term, rdflib.RDFS.label,
self.rdflibConverter(
tuple_row["Label"])))
try:
if str(row[col_headers.index(a_tuple["Column"]) + 1]) == "nan":
pass
elif str(row[col_headers.index(a_tuple["Column"]) + 1]).isdigit():
nanopub.assertion.add((term, sio.hasValue, rdflib.Literal(
str(row[col_headers.index(a_tuple["Column"]) + 1]),
datatype=rdflib.XSD.integer)))
elif self.isfloat(str(row[col_headers.index(a_tuple["Column"]) + 1])):
nanopub.assertion.add((term, sio.hasValue, rdflib.Literal(
str(row[col_headers.index(a_tuple["Column"]) + 1]),
datatype=rdflib.XSD.float)))
else:
nanopub.assertion.add((term, sio.hasValue, rdflib.Literal(
str(row[col_headers.index(a_tuple["Column"]) + 1]),
datatype=rdflib.XSD.string)))
except Exception as e:
logging.warning("Warning: Unable to add assertion: %s",
row[col_headers.index(a_tuple["Column"]) + 1] + ":")
if hasattr(e, 'message'):
logging.warning(e.message)
else:
logging.warning(e)
except Exception as e:
logging.exception("Error: Something went wrong when asserting data value: %s",
row[col_headers.index(a_tuple["Column"]) + 1] + ":")
if hasattr(e, 'message'):
logging.exception(e.message)
else:
logging.exception(e)
try:
nanopub.provenance.add((term, prov.generatedAtTime, rdflib.Literal(
"{:4d}-{:02d}-{:02d}".format(datetime.utcnow().year, datetime.utcnow().month,
datetime.utcnow().day) + "T" + "{:02d}:{:02d}:{:02d}".format(
datetime.utcnow().hour, datetime.utcnow().minute,
datetime.utcnow().second) + "Z", datatype=rdflib.XSD.dateTime)))
if "wasDerivedFrom" in a_tuple:
if ',' in a_tuple["wasDerivedFrom"]:
derivedFromTerms = self.parseString(a_tuple["wasDerivedFrom"], ',')
for derivedFromTerm in derivedFromTerms:
nanopub.provenance.add((term, prov.wasDerivedFrom, self.rdflibConverter(
self.convertVirtualToKGEntry(derivedFromTerm, identifierString))))
if self.checkVirtual(derivedFromTerm):
if derivedFromTerm not in vref_list:
vref_list.append(derivedFromTerm)
else:
nanopub.provenance.add((term, prov.wasDerivedFrom, self.rdflibConverter(
self.convertVirtualToKGEntry(a_tuple["wasDerivedFrom"],
identifierString))))
if self.checkVirtual(a_tuple["wasDerivedFrom"]):
if a_tuple["wasDerivedFrom"] not in vref_list:
vref_list.append(a_tuple["wasDerivedFrom"])
if "wasGeneratedBy" in a_tuple:
if ',' in a_tuple["wasGeneratedBy"]:
generatedByTerms = self.parseString(a_tuple["wasGeneratedBy"], ',')
for generatedByTerm in generatedByTerms:
nanopub.provenance.add((term, prov.wasGeneratedBy, self.rdflibConverter(
self.convertVirtualToKGEntry(generatedByTerm, identifierString))))
if self.checkVirtual(generatedByTerm):
if generatedByTerm not in vref_list:
vref_list.append(generatedByTerm)
else:
nanopub.provenance.add((term, prov.wasGeneratedBy, self.rdflibConverter(
self.convertVirtualToKGEntry(a_tuple["wasGeneratedBy"],
identifierString))))
if self.checkVirtual(a_tuple["wasGeneratedBy"]):
if a_tuple["wasGeneratedBy"] not in vref_list:
vref_list.append(a_tuple["wasGeneratedBy"])
except Exception as e:
logging.exception("Error: Something went wrong when adding provenance:")
if hasattr(e, 'message'):
print(e.message)
else:
print(e)
except Exception as e:
logging.warning("Warning: Unable to process tuple %s", a_tuple.__str__() + ":")
if hasattr(e, 'message'):
print(e.message)
else:
print(e)
try:
for vref in vref_list:
vref_list = self.writeVirtualEntry(nanopub, vref_list, vref, identifierString)
except Exception as e:
logging.warning("Warning: Something went writing vref entries:")
if hasattr(e, 'message'):
print(e.message)
else:
print(e)
except Exception as e:
logging.exception("Error: Something went wrong when processing explicit tuples:")
if hasattr(e, 'message'):
print(e.message)
else:
print(e)
sys.exit(1)
except Exception as e:
logging.warning("Warning: Unable to process Data file:")
if hasattr(e, 'message'):
print(e.message)
else:
print(e)
|
apache-2.0
|
asnorkin/sentiment_analysis
|
site/lib/python2.7/site-packages/scipy/optimize/_lsq/least_squares.py
|
27
|
37725
|
"""Generic interface for least-square minimization."""
from __future__ import division, print_function, absolute_import
from warnings import warn
import numpy as np
from numpy.linalg import norm
from scipy.sparse import issparse, csr_matrix
from scipy.sparse.linalg import LinearOperator
from scipy.optimize import _minpack, OptimizeResult
from scipy.optimize._numdiff import approx_derivative, group_columns
from scipy._lib.six import string_types
from .trf import trf
from .dogbox import dogbox
from .common import EPS, in_bounds, make_strictly_feasible
TERMINATION_MESSAGES = {
-1: "Improper input parameters status returned from `leastsq`",
0: "The maximum number of function evaluations is exceeded.",
1: "`gtol` termination condition is satisfied.",
2: "`ftol` termination condition is satisfied.",
3: "`xtol` termination condition is satisfied.",
4: "Both `ftol` and `xtol` termination conditions are satisfied."
}
FROM_MINPACK_TO_COMMON = {
0: -1, # Improper input parameters from MINPACK.
1: 2,
2: 3,
3: 4,
4: 1,
5: 0
# There are 6, 7, 8 for too small tolerance parameters,
# but we guard against it by checking ftol, xtol, gtol beforehand.
}
def call_minpack(fun, x0, jac, ftol, xtol, gtol, max_nfev, x_scale, diff_step):
n = x0.size
if diff_step is None:
epsfcn = EPS
else:
epsfcn = diff_step**2
# Compute MINPACK's `diag`, which is inverse of our `x_scale` and
# ``x_scale='jac'`` corresponds to ``diag=None``.
if isinstance(x_scale, string_types) and x_scale == 'jac':
diag = None
else:
diag = 1 / x_scale
full_output = True
col_deriv = False
factor = 100.0
if jac is None:
if max_nfev is None:
# n squared to account for Jacobian evaluations.
max_nfev = 100 * n * (n + 1)
x, info, status = _minpack._lmdif(
fun, x0, (), full_output, ftol, xtol, gtol,
max_nfev, epsfcn, factor, diag)
else:
if max_nfev is None:
max_nfev = 100 * n
x, info, status = _minpack._lmder(
fun, jac, x0, (), full_output, col_deriv,
ftol, xtol, gtol, max_nfev, factor, diag)
f = info['fvec']
if callable(jac):
J = jac(x)
else:
J = np.atleast_2d(approx_derivative(fun, x))
cost = 0.5 * np.dot(f, f)
g = J.T.dot(f)
g_norm = norm(g, ord=np.inf)
nfev = info['nfev']
njev = info.get('njev', None)
status = FROM_MINPACK_TO_COMMON[status]
active_mask = np.zeros_like(x0, dtype=int)
return OptimizeResult(
x=x, cost=cost, fun=f, jac=J, grad=g, optimality=g_norm,
active_mask=active_mask, nfev=nfev, njev=njev, status=status)
def prepare_bounds(bounds, n):
lb, ub = [np.asarray(b, dtype=float) for b in bounds]
if lb.ndim == 0:
lb = np.resize(lb, n)
if ub.ndim == 0:
ub = np.resize(ub, n)
return lb, ub
def check_tolerance(ftol, xtol, gtol):
message = "{} is too low, setting to machine epsilon {}."
if ftol < EPS:
warn(message.format("`ftol`", EPS))
ftol = EPS
if xtol < EPS:
warn(message.format("`xtol`", EPS))
xtol = EPS
if gtol < EPS:
warn(message.format("`gtol`", EPS))
gtol = EPS
return ftol, xtol, gtol
def check_x_scale(x_scale, x0):
if isinstance(x_scale, string_types) and x_scale == 'jac':
return x_scale
try:
x_scale = np.asarray(x_scale, dtype=float)
valid = np.all(np.isfinite(x_scale)) and np.all(x_scale > 0)
except (ValueError, TypeError):
valid = False
if not valid:
raise ValueError("`x_scale` must be 'jac' or array_like with "
"positive numbers.")
if x_scale.ndim == 0:
x_scale = np.resize(x_scale, x0.shape)
if x_scale.shape != x0.shape:
raise ValueError("Inconsistent shapes between `x_scale` and `x0`.")
return x_scale
def check_jac_sparsity(jac_sparsity, m, n):
if jac_sparsity is None:
return None
if not issparse(jac_sparsity):
jac_sparsity = np.atleast_2d(jac_sparsity)
if jac_sparsity.shape != (m, n):
raise ValueError("`jac_sparsity` has wrong shape.")
return jac_sparsity, group_columns(jac_sparsity)
# Loss functions.
def huber(z, rho, cost_only):
mask = z <= 1
rho[0, mask] = z[mask]
rho[0, ~mask] = 2 * z[~mask]**0.5 - 1
if cost_only:
return
rho[1, mask] = 1
rho[1, ~mask] = z[~mask]**-0.5
rho[2, mask] = 0
rho[2, ~mask] = -0.5 * z[~mask]**-1.5
def soft_l1(z, rho, cost_only):
t = 1 + z
rho[0] = 2 * (t**0.5 - 1)
if cost_only:
return
rho[1] = t**-0.5
rho[2] = -0.5 * t**-1.5
def cauchy(z, rho, cost_only):
rho[0] = np.log1p(z)
if cost_only:
return
t = 1 + z
rho[1] = 1 / t
rho[2] = -1 / t**2
def arctan(z, rho, cost_only):
rho[0] = np.arctan(z)
if cost_only:
return
t = 1 + z**2
rho[1] = 1 / t
rho[2] = -2 * z / t**2
IMPLEMENTED_LOSSES = dict(linear=None, huber=huber, soft_l1=soft_l1,
cauchy=cauchy, arctan=arctan)
def construct_loss_function(m, loss, f_scale):
if loss == 'linear':
return None
if not callable(loss):
loss = IMPLEMENTED_LOSSES[loss]
rho = np.empty((3, m))
def loss_function(f, cost_only=False):
z = (f / f_scale) ** 2
loss(z, rho, cost_only=cost_only)
if cost_only:
return 0.5 * f_scale ** 2 * np.sum(rho[0])
rho[0] *= f_scale ** 2
rho[2] /= f_scale ** 2
return rho
else:
def loss_function(f, cost_only=False):
z = (f / f_scale) ** 2
rho = loss(z)
if cost_only:
return 0.5 * f_scale ** 2 * np.sum(rho[0])
rho[0] *= f_scale ** 2
rho[2] /= f_scale ** 2
return rho
return loss_function
def least_squares(
fun, x0, jac='2-point', bounds=(-np.inf, np.inf), method='trf',
ftol=1e-8, xtol=1e-8, gtol=1e-8, x_scale=1.0, loss='linear',
f_scale=1.0, diff_step=None, tr_solver=None, tr_options={},
jac_sparsity=None, max_nfev=None, verbose=0, args=(), kwargs={}):
"""Solve a nonlinear least-squares problem with bounds on the variables.
Given the residuals f(x) (an m-dimensional real function of n real
variables) and the loss function rho(s) (a scalar function), `least_squares`
finds a local minimum of the cost function F(x)::
minimize F(x) = 0.5 * sum(rho(f_i(x)**2), i = 0, ..., m - 1)
subject to lb <= x <= ub
The purpose of the loss function rho(s) is to reduce the influence of
outliers on the solution.
Parameters
----------
fun : callable
Function which computes the vector of residuals, with the signature
``fun(x, *args, **kwargs)``, i.e., the minimization proceeds with
respect to its first argument. The argument ``x`` passed to this
function is an ndarray of shape (n,) (never a scalar, even for n=1).
It must return a 1-d array_like of shape (m,) or a scalar. If the
argument ``x`` is complex or the function ``fun`` returns complex
residuals, it must be wrapped in a real function of real arguments,
as shown at the end of the Examples section.
x0 : array_like with shape (n,) or float
Initial guess on independent variables. If float, it will be treated
as a 1-d array with one element.
jac : {'2-point', '3-point', 'cs', callable}, optional
Method of computing the Jacobian matrix (an m-by-n matrix, where
element (i, j) is the partial derivative of f[i] with respect to
x[j]). The keywords select a finite difference scheme for numerical
estimation. The scheme '3-point' is more accurate, but requires
twice as much operations compared to '2-point' (default). The
scheme 'cs' uses complex steps, and while potentially the most
accurate, it is applicable only when `fun` correctly handles
complex inputs and can be analytically continued to the complex
plane. Method 'lm' always uses the '2-point' scheme. If callable,
it is used as ``jac(x, *args, **kwargs)`` and should return a
good approximation (or the exact value) for the Jacobian as an
array_like (np.atleast_2d is applied), a sparse matrix or a
`scipy.sparse.linalg.LinearOperator`.
bounds : 2-tuple of array_like, optional
Lower and upper bounds on independent variables. Defaults to no bounds.
Each array must match the size of `x0` or be a scalar, in the latter
case a bound will be the same for all variables. Use ``np.inf`` with
an appropriate sign to disable bounds on all or some variables.
method : {'trf', 'dogbox', 'lm'}, optional
Algorithm to perform minimization.
* 'trf' : Trust Region Reflective algorithm, particularly suitable
for large sparse problems with bounds. Generally robust method.
* 'dogbox' : dogleg algorithm with rectangular trust regions,
typical use case is small problems with bounds. Not recommended
for problems with rank-deficient Jacobian.
* 'lm' : Levenberg-Marquardt algorithm as implemented in MINPACK.
Doesn't handle bounds and sparse Jacobians. Usually the most
efficient method for small unconstrained problems.
Default is 'trf'. See Notes for more information.
ftol : float, optional
Tolerance for termination by the change of the cost function. Default
is 1e-8. The optimization process is stopped when ``dF < ftol * F``,
and there was an adequate agreement between a local quadratic model and
the true model in the last step.
xtol : float, optional
Tolerance for termination by the change of the independent variables.
Default is 1e-8. The exact condition depends on the `method` used:
* For 'trf' and 'dogbox' : ``norm(dx) < xtol * (xtol + norm(x))``
* For 'lm' : ``Delta < xtol * norm(xs)``, where ``Delta`` is
a trust-region radius and ``xs`` is the value of ``x``
scaled according to `x_scale` parameter (see below).
gtol : float, optional
Tolerance for termination by the norm of the gradient. Default is 1e-8.
The exact condition depends on a `method` used:
* For 'trf' : ``norm(g_scaled, ord=np.inf) < gtol``, where
``g_scaled`` is the value of the gradient scaled to account for
the presence of the bounds [STIR]_.
* For 'dogbox' : ``norm(g_free, ord=np.inf) < gtol``, where
``g_free`` is the gradient with respect to the variables which
are not in the optimal state on the boundary.
* For 'lm' : the maximum absolute value of the cosine of angles
between columns of the Jacobian and the residual vector is less
than `gtol`, or the residual vector is zero.
x_scale : array_like or 'jac', optional
Characteristic scale of each variable. Setting `x_scale` is equivalent
to reformulating the problem in scaled variables ``xs = x / x_scale``.
An alternative view is that the size of a trust region along j-th
dimension is proportional to ``x_scale[j]``. Improved convergence may
be achieved by setting `x_scale` such that a step of a given size
along any of the scaled variables has a similar effect on the cost
function. If set to 'jac', the scale is iteratively updated using the
inverse norms of the columns of the Jacobian matrix (as described in
[JJMore]_).
loss : str or callable, optional
Determines the loss function. The following keyword values are allowed:
* 'linear' (default) : ``rho(z) = z``. Gives a standard
least-squares problem.
* 'soft_l1' : ``rho(z) = 2 * ((1 + z)**0.5 - 1)``. The smooth
approximation of l1 (absolute value) loss. Usually a good
choice for robust least squares.
* 'huber' : ``rho(z) = z if z <= 1 else 2*z**0.5 - 1``. Works
similarly to 'soft_l1'.
* 'cauchy' : ``rho(z) = ln(1 + z)``. Severely weakens outliers
influence, but may cause difficulties in optimization process.
* 'arctan' : ``rho(z) = arctan(z)``. Limits a maximum loss on
a single residual, has properties similar to 'cauchy'.
If callable, it must take a 1-d ndarray ``z=f**2`` and return an
array_like with shape (3, m) where row 0 contains function values,
row 1 contains first derivatives and row 2 contains second
derivatives. Method 'lm' supports only 'linear' loss.
f_scale : float, optional
Value of soft margin between inlier and outlier residuals, default
is 1.0. The loss function is evaluated as follows
``rho_(f**2) = C**2 * rho(f**2 / C**2)``, where ``C`` is `f_scale`,
and ``rho`` is determined by `loss` parameter. This parameter has
no effect with ``loss='linear'``, but for other `loss` values it is
of crucial importance.
max_nfev : None or int, optional
Maximum number of function evaluations before the termination.
If None (default), the value is chosen automatically:
* For 'trf' and 'dogbox' : 100 * n.
* For 'lm' : 100 * n if `jac` is callable and 100 * n * (n + 1)
otherwise (because 'lm' counts function calls in Jacobian
estimation).
diff_step : None or array_like, optional
Determines the relative step size for the finite difference
approximation of the Jacobian. The actual step is computed as
``x * diff_step``. If None (default), then `diff_step` is taken to be
a conventional "optimal" power of machine epsilon for the finite
difference scheme used [NR]_.
tr_solver : {None, 'exact', 'lsmr'}, optional
Method for solving trust-region subproblems, relevant only for 'trf'
and 'dogbox' methods.
* 'exact' is suitable for not very large problems with dense
Jacobian matrices. The computational complexity per iteration is
comparable to a singular value decomposition of the Jacobian
matrix.
* 'lsmr' is suitable for problems with sparse and large Jacobian
matrices. It uses the iterative procedure
`scipy.sparse.linalg.lsmr` for finding a solution of a linear
least-squares problem and only requires matrix-vector product
evaluations.
If None (default) the solver is chosen based on the type of Jacobian
returned on the first iteration.
tr_options : dict, optional
Keyword options passed to trust-region solver.
* ``tr_solver='exact'``: `tr_options` are ignored.
* ``tr_solver='lsmr'``: options for `scipy.sparse.linalg.lsmr`.
Additionally ``method='trf'`` supports 'regularize' option
(bool, default is True) which adds a regularization term to the
normal equation, which improves convergence if the Jacobian is
rank-deficient [Byrd]_ (eq. 3.4).
jac_sparsity : {None, array_like, sparse matrix}, optional
Defines the sparsity structure of the Jacobian matrix for finite
difference estimation, its shape must be (m, n). If the Jacobian has
only few non-zero elements in *each* row, providing the sparsity
structure will greatly speed up the computations [Curtis]_. A zero
entry means that a corresponding element in the Jacobian is identically
zero. If provided, forces the use of 'lsmr' trust-region solver.
If None (default) then dense differencing will be used. Has no effect
for 'lm' method.
verbose : {0, 1, 2}, optional
Level of algorithm's verbosity:
* 0 (default) : work silently.
* 1 : display a termination report.
* 2 : display progress during iterations (not supported by 'lm'
method).
args, kwargs : tuple and dict, optional
Additional arguments passed to `fun` and `jac`. Both empty by default.
The calling signature is ``fun(x, *args, **kwargs)`` and the same for
`jac`.
Returns
-------
`OptimizeResult` with the following fields defined:
x : ndarray, shape (n,)
Solution found.
cost : float
Value of the cost function at the solution.
fun : ndarray, shape (m,)
Vector of residuals at the solution.
jac : ndarray, sparse matrix or LinearOperator, shape (m, n)
Modified Jacobian matrix at the solution, in the sense that J^T J
is a Gauss-Newton approximation of the Hessian of the cost function.
The type is the same as the one used by the algorithm.
grad : ndarray, shape (m,)
Gradient of the cost function at the solution.
optimality : float
First-order optimality measure. In unconstrained problems, it is always
the uniform norm of the gradient. In constrained problems, it is the
quantity which was compared with `gtol` during iterations.
active_mask : ndarray of int, shape (n,)
Each component shows whether a corresponding constraint is active
(that is, whether a variable is at the bound):
* 0 : a constraint is not active.
* -1 : a lower bound is active.
* 1 : an upper bound is active.
Might be somewhat arbitrary for 'trf' method as it generates a sequence
of strictly feasible iterates and `active_mask` is determined within a
tolerance threshold.
nfev : int
Number of function evaluations done. Methods 'trf' and 'dogbox' do not
count function calls for numerical Jacobian approximation, as opposed
to 'lm' method.
njev : int or None
Number of Jacobian evaluations done. If numerical Jacobian
approximation is used in 'lm' method, it is set to None.
status : int
The reason for algorithm termination:
* -1 : improper input parameters status returned from MINPACK.
* 0 : the maximum number of function evaluations is exceeded.
* 1 : `gtol` termination condition is satisfied.
* 2 : `ftol` termination condition is satisfied.
* 3 : `xtol` termination condition is satisfied.
* 4 : Both `ftol` and `xtol` termination conditions are satisfied.
message : str
Verbal description of the termination reason.
success : bool
True if one of the convergence criteria is satisfied (`status` > 0).
See Also
--------
leastsq : A legacy wrapper for the MINPACK implementation of the
Levenberg-Marquadt algorithm.
curve_fit : Least-squares minimization applied to a curve fitting problem.
Notes
-----
Method 'lm' (Levenberg-Marquardt) calls a wrapper over least-squares
algorithms implemented in MINPACK (lmder, lmdif). It runs the
Levenberg-Marquardt algorithm formulated as a trust-region type algorithm.
The implementation is based on paper [JJMore]_, it is very robust and
efficient with a lot of smart tricks. It should be your first choice
for unconstrained problems. Note that it doesn't support bounds. Also
it doesn't work when m < n.
Method 'trf' (Trust Region Reflective) is motivated by the process of
solving a system of equations, which constitute the first-order optimality
condition for a bound-constrained minimization problem as formulated in
[STIR]_. The algorithm iteratively solves trust-region subproblems
augmented by a special diagonal quadratic term and with trust-region shape
determined by the distance from the bounds and the direction of the
gradient. This enhancements help to avoid making steps directly into bounds
and efficiently explore the whole space of variables. To further improve
convergence, the algorithm considers search directions reflected from the
bounds. To obey theoretical requirements, the algorithm keeps iterates
strictly feasible. With dense Jacobians trust-region subproblems are
solved by an exact method very similar to the one described in [JJMore]_
(and implemented in MINPACK). The difference from the MINPACK
implementation is that a singular value decomposition of a Jacobian
matrix is done once per iteration, instead of a QR decomposition and series
of Givens rotation eliminations. For large sparse Jacobians a 2-d subspace
approach of solving trust-region subproblems is used [STIR]_, [Byrd]_.
The subspace is spanned by a scaled gradient and an approximate
Gauss-Newton solution delivered by `scipy.sparse.linalg.lsmr`. When no
constraints are imposed the algorithm is very similar to MINPACK and has
generally comparable performance. The algorithm works quite robust in
unbounded and bounded problems, thus it is chosen as a default algorithm.
Method 'dogbox' operates in a trust-region framework, but considers
rectangular trust regions as opposed to conventional ellipsoids [Voglis]_.
The intersection of a current trust region and initial bounds is again
rectangular, so on each iteration a quadratic minimization problem subject
to bound constraints is solved approximately by Powell's dogleg method
[NumOpt]_. The required Gauss-Newton step can be computed exactly for
dense Jacobians or approximately by `scipy.sparse.linalg.lsmr` for large
sparse Jacobians. The algorithm is likely to exhibit slow convergence when
the rank of Jacobian is less than the number of variables. The algorithm
often outperforms 'trf' in bounded problems with a small number of
variables.
Robust loss functions are implemented as described in [BA]_. The idea
is to modify a residual vector and a Jacobian matrix on each iteration
such that computed gradient and Gauss-Newton Hessian approximation match
the true gradient and Hessian approximation of the cost function. Then
the algorithm proceeds in a normal way, i.e. robust loss functions are
implemented as a simple wrapper over standard least-squares algorithms.
.. versionadded:: 0.17.0
References
----------
.. [STIR] M. A. Branch, T. F. Coleman, and Y. Li, "A Subspace, Interior,
and Conjugate Gradient Method for Large-Scale Bound-Constrained
Minimization Problems," SIAM Journal on Scientific Computing,
Vol. 21, Number 1, pp 1-23, 1999.
.. [NR] William H. Press et. al., "Numerical Recipes. The Art of Scientific
Computing. 3rd edition", Sec. 5.7.
.. [Byrd] R. H. Byrd, R. B. Schnabel and G. A. Shultz, "Approximate
solution of the trust region problem by minimization over
two-dimensional subspaces", Math. Programming, 40, pp. 247-263,
1988.
.. [Curtis] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
sparse Jacobian matrices", Journal of the Institute of
Mathematics and its Applications, 13, pp. 117-120, 1974.
.. [JJMore] J. J. More, "The Levenberg-Marquardt Algorithm: Implementation
and Theory," Numerical Analysis, ed. G. A. Watson, Lecture
Notes in Mathematics 630, Springer Verlag, pp. 105-116, 1977.
.. [Voglis] C. Voglis and I. E. Lagaris, "A Rectangular Trust Region
Dogleg Approach for Unconstrained and Bound Constrained
Nonlinear Optimization", WSEAS International Conference on
Applied Mathematics, Corfu, Greece, 2004.
.. [NumOpt] J. Nocedal and S. J. Wright, "Numerical optimization,
2nd edition", Chapter 4.
.. [BA] B. Triggs et. al., "Bundle Adjustment - A Modern Synthesis",
Proceedings of the International Workshop on Vision Algorithms:
Theory and Practice, pp. 298-372, 1999.
Examples
--------
In this example we find a minimum of the Rosenbrock function without bounds
on independed variables.
>>> def fun_rosenbrock(x):
... return np.array([10 * (x[1] - x[0]**2), (1 - x[0])])
Notice that we only provide the vector of the residuals. The algorithm
constructs the cost function as a sum of squares of the residuals, which
gives the Rosenbrock function. The exact minimum is at ``x = [1.0, 1.0]``.
>>> from scipy.optimize import least_squares
>>> x0_rosenbrock = np.array([2, 2])
>>> res_1 = least_squares(fun_rosenbrock, x0_rosenbrock)
>>> res_1.x
array([ 1., 1.])
>>> res_1.cost
9.8669242910846867e-30
>>> res_1.optimality
8.8928864934219529e-14
We now constrain the variables, in such a way that the previous solution
becomes infeasible. Specifically, we require that ``x[1] >= 1.5``, and
``x[0]`` left unconstrained. To this end, we specify the `bounds` parameter
to `least_squares` in the form ``bounds=([-np.inf, 1.5], np.inf)``.
We also provide the analytic Jacobian:
>>> def jac_rosenbrock(x):
... return np.array([
... [-20 * x[0], 10],
... [-1, 0]])
Putting this all together, we see that the new solution lies on the bound:
>>> res_2 = least_squares(fun_rosenbrock, x0_rosenbrock, jac_rosenbrock,
... bounds=([-np.inf, 1.5], np.inf))
>>> res_2.x
array([ 1.22437075, 1.5 ])
>>> res_2.cost
0.025213093946805685
>>> res_2.optimality
1.5885401433157753e-07
Now we solve a system of equations (i.e., the cost function should be zero
at a minimum) for a Broyden tridiagonal vector-valued function of 100000
variables:
>>> def fun_broyden(x):
... f = (3 - x) * x + 1
... f[1:] -= x[:-1]
... f[:-1] -= 2 * x[1:]
... return f
The corresponding Jacobian matrix is sparse. We tell the algorithm to
estimate it by finite differences and provide the sparsity structure of
Jacobian to significantly speed up this process.
>>> from scipy.sparse import lil_matrix
>>> def sparsity_broyden(n):
... sparsity = lil_matrix((n, n), dtype=int)
... i = np.arange(n)
... sparsity[i, i] = 1
... i = np.arange(1, n)
... sparsity[i, i - 1] = 1
... i = np.arange(n - 1)
... sparsity[i, i + 1] = 1
... return sparsity
...
>>> n = 100000
>>> x0_broyden = -np.ones(n)
...
>>> res_3 = least_squares(fun_broyden, x0_broyden,
... jac_sparsity=sparsity_broyden(n))
>>> res_3.cost
4.5687069299604613e-23
>>> res_3.optimality
1.1650454296851518e-11
Let's also solve a curve fitting problem using robust loss function to
take care of outliers in the data. Define the model function as
``y = a + b * exp(c * t)``, where t is a predictor variable, y is an
observation and a, b, c are parameters to estimate.
First, define the function which generates the data with noise and
outliers, define the model parameters, and generate data:
>>> def gen_data(t, a, b, c, noise=0, n_outliers=0, random_state=0):
... y = a + b * np.exp(t * c)
...
... rnd = np.random.RandomState(random_state)
... error = noise * rnd.randn(t.size)
... outliers = rnd.randint(0, t.size, n_outliers)
... error[outliers] *= 10
...
... return y + error
...
>>> a = 0.5
>>> b = 2.0
>>> c = -1
>>> t_min = 0
>>> t_max = 10
>>> n_points = 15
...
>>> t_train = np.linspace(t_min, t_max, n_points)
>>> y_train = gen_data(t_train, a, b, c, noise=0.1, n_outliers=3)
Define function for computing residuals and initial estimate of
parameters.
>>> def fun(x, t, y):
... return x[0] + x[1] * np.exp(x[2] * t) - y
...
>>> x0 = np.array([1.0, 1.0, 0.0])
Compute a standard least-squares solution:
>>> res_lsq = least_squares(fun, x0, args=(t_train, y_train))
Now compute two solutions with two different robust loss functions. The
parameter `f_scale` is set to 0.1, meaning that inlier residuals should
not significantly exceed 0.1 (the noise level used).
>>> res_soft_l1 = least_squares(fun, x0, loss='soft_l1', f_scale=0.1,
... args=(t_train, y_train))
>>> res_log = least_squares(fun, x0, loss='cauchy', f_scale=0.1,
... args=(t_train, y_train))
And finally plot all the curves. We see that by selecting an appropriate
`loss` we can get estimates close to optimal even in the presence of
strong outliers. But keep in mind that generally it is recommended to try
'soft_l1' or 'huber' losses first (if at all necessary) as the other two
options may cause difficulties in optimization process.
>>> t_test = np.linspace(t_min, t_max, n_points * 10)
>>> y_true = gen_data(t_test, a, b, c)
>>> y_lsq = gen_data(t_test, *res_lsq.x)
>>> y_soft_l1 = gen_data(t_test, *res_soft_l1.x)
>>> y_log = gen_data(t_test, *res_log.x)
...
>>> import matplotlib.pyplot as plt
>>> plt.plot(t_train, y_train, 'o')
>>> plt.plot(t_test, y_true, 'k', linewidth=2, label='true')
>>> plt.plot(t_test, y_lsq, label='linear loss')
>>> plt.plot(t_test, y_soft_l1, label='soft_l1 loss')
>>> plt.plot(t_test, y_log, label='cauchy loss')
>>> plt.xlabel("t")
>>> plt.ylabel("y")
>>> plt.legend()
>>> plt.show()
In the next example, we show how complex-valued residual functions of
complex variables can be optimized with ``least_squares()``. Consider the
following function:
>>> def f(z):
... return z - (0.5 + 0.5j)
We wrap it into a function of real variables that returns real residuals
by simply handling the real and imaginary parts as independent variables:
>>> def f_wrap(x):
... fx = f(x[0] + 1j*x[1])
... return np.array([fx.real, fx.imag])
Thus, instead of the original m-dimensional complex function of n complex
variables we optimize a 2m-dimensional real function of 2n real variables:
>>> from scipy.optimize import least_squares
>>> res_wrapped = least_squares(f_wrap, (0.1, 0.1), bounds=([0, 0], [1, 1]))
>>> z = res_wrapped.x[0] + res_wrapped.x[1]*1j
>>> z
(0.49999999999925893+0.49999999999925893j)
"""
if method not in ['trf', 'dogbox', 'lm']:
raise ValueError("`method` must be 'trf', 'dogbox' or 'lm'.")
if jac not in ['2-point', '3-point', 'cs'] and not callable(jac):
raise ValueError("`jac` must be '2-point', '3-point', 'cs' or "
"callable.")
if tr_solver not in [None, 'exact', 'lsmr']:
raise ValueError("`tr_solver` must be None, 'exact' or 'lsmr'.")
if loss not in IMPLEMENTED_LOSSES and not callable(loss):
raise ValueError("`loss` must be one of {0} or a callable."
.format(IMPLEMENTED_LOSSES.keys()))
if method == 'lm' and loss != 'linear':
raise ValueError("method='lm' supports only 'linear' loss function.")
if verbose not in [0, 1, 2]:
raise ValueError("`verbose` must be in [0, 1, 2].")
if len(bounds) != 2:
raise ValueError("`bounds` must contain 2 elements.")
if max_nfev is not None and max_nfev <= 0:
raise ValueError("`max_nfev` must be None or positive integer.")
if np.iscomplexobj(x0):
raise ValueError("`x0` must be real.")
x0 = np.atleast_1d(x0).astype(float)
if x0.ndim > 1:
raise ValueError("`x0` must have at most 1 dimension.")
lb, ub = prepare_bounds(bounds, x0.shape[0])
if method == 'lm' and not np.all((lb == -np.inf) & (ub == np.inf)):
raise ValueError("Method 'lm' doesn't support bounds.")
if lb.shape != x0.shape or ub.shape != x0.shape:
raise ValueError("Inconsistent shapes between bounds and `x0`.")
if np.any(lb >= ub):
raise ValueError("Each lower bound must be strictly less than each "
"upper bound.")
if not in_bounds(x0, lb, ub):
raise ValueError("`x0` is infeasible.")
x_scale = check_x_scale(x_scale, x0)
ftol, xtol, gtol = check_tolerance(ftol, xtol, gtol)
def fun_wrapped(x):
return np.atleast_1d(fun(x, *args, **kwargs))
if method == 'trf':
x0 = make_strictly_feasible(x0, lb, ub)
f0 = fun_wrapped(x0)
if f0.ndim != 1:
raise ValueError("`fun` must return at most 1-d array_like.")
if not np.all(np.isfinite(f0)):
raise ValueError("Residuals are not finite in the initial point.")
n = x0.size
m = f0.size
if method == 'lm' and m < n:
raise ValueError("Method 'lm' doesn't work when the number of "
"residuals is less than the number of variables.")
loss_function = construct_loss_function(m, loss, f_scale)
if callable(loss):
rho = loss_function(f0)
if rho.shape != (3, m):
raise ValueError("The return value of `loss` callable has wrong "
"shape.")
initial_cost = 0.5 * np.sum(rho[0])
elif loss_function is not None:
initial_cost = loss_function(f0, cost_only=True)
else:
initial_cost = 0.5 * np.dot(f0, f0)
if callable(jac):
J0 = jac(x0, *args, **kwargs)
if issparse(J0):
J0 = csr_matrix(J0)
def jac_wrapped(x, _=None):
return csr_matrix(jac(x, *args, **kwargs))
elif isinstance(J0, LinearOperator):
def jac_wrapped(x, _=None):
return jac(x, *args, **kwargs)
else:
J0 = np.atleast_2d(J0)
def jac_wrapped(x, _=None):
return np.atleast_2d(jac(x, *args, **kwargs))
else: # Estimate Jacobian by finite differences.
if method == 'lm':
if jac_sparsity is not None:
raise ValueError("method='lm' does not support "
"`jac_sparsity`.")
if jac != '2-point':
warn("jac='{0}' works equivalently to '2-point' "
"for method='lm'.".format(jac))
J0 = jac_wrapped = None
else:
if jac_sparsity is not None and tr_solver == 'exact':
raise ValueError("tr_solver='exact' is incompatible "
"with `jac_sparsity`.")
jac_sparsity = check_jac_sparsity(jac_sparsity, m, n)
def jac_wrapped(x, f):
J = approx_derivative(fun, x, rel_step=diff_step, method=jac,
f0=f, bounds=bounds, args=args,
kwargs=kwargs, sparsity=jac_sparsity)
if J.ndim != 2: # J is guaranteed not sparse.
J = np.atleast_2d(J)
return J
J0 = jac_wrapped(x0, f0)
if J0 is not None:
if J0.shape != (m, n):
raise ValueError(
"The return value of `jac` has wrong shape: expected {0}, "
"actual {1}.".format((m, n), J0.shape))
if not isinstance(J0, np.ndarray):
if method == 'lm':
raise ValueError("method='lm' works only with dense "
"Jacobian matrices.")
if tr_solver == 'exact':
raise ValueError(
"tr_solver='exact' works only with dense "
"Jacobian matrices.")
jac_scale = isinstance(x_scale, string_types) and x_scale == 'jac'
if isinstance(J0, LinearOperator) and jac_scale:
raise ValueError("x_scale='jac' can't be used when `jac` "
"returns LinearOperator.")
if tr_solver is None:
if isinstance(J0, np.ndarray):
tr_solver = 'exact'
else:
tr_solver = 'lsmr'
if method == 'lm':
result = call_minpack(fun_wrapped, x0, jac_wrapped, ftol, xtol, gtol,
max_nfev, x_scale, diff_step)
elif method == 'trf':
result = trf(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol, xtol,
gtol, max_nfev, x_scale, loss_function, tr_solver,
tr_options.copy(), verbose)
elif method == 'dogbox':
if tr_solver == 'lsmr' and 'regularize' in tr_options:
warn("The keyword 'regularize' in `tr_options` is not relevant "
"for 'dogbox' method.")
tr_options = tr_options.copy()
del tr_options['regularize']
result = dogbox(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol,
xtol, gtol, max_nfev, x_scale, loss_function,
tr_solver, tr_options, verbose)
result.message = TERMINATION_MESSAGES[result.status]
result.success = result.status > 0
if verbose >= 1:
print(result.message)
print("Function evaluations {0}, initial cost {1:.4e}, final cost "
"{2:.4e}, first-order optimality {3:.2e}."
.format(result.nfev, initial_cost, result.cost,
result.optimality))
return result
|
mit
|
matthew-tucker/mne-python
|
mne/tests/test_source_space.py
|
9
|
23354
|
from __future__ import print_function
import os
import os.path as op
from nose.tools import assert_true, assert_raises
from nose.plugins.skip import SkipTest
import numpy as np
from numpy.testing import assert_array_equal, assert_allclose, assert_equal
import warnings
from mne.datasets import testing
from mne import (read_source_spaces, vertex_to_mni, write_source_spaces,
setup_source_space, setup_volume_source_space,
add_source_space_distances, read_bem_surfaces)
from mne.utils import (_TempDir, requires_fs_or_nibabel, requires_nibabel,
requires_freesurfer, run_subprocess,
requires_mne, requires_scipy_version,
run_tests_if_main, slow_test)
from mne.surface import _accumulate_normals, _triangle_neighbors
from mne.source_space import _get_mgz_header
from mne.externals.six.moves import zip
from mne.source_space import (get_volume_labels_from_aseg, SourceSpaces,
_compare_source_spaces)
from mne.io.constants import FIFF
warnings.simplefilter('always')
data_path = testing.data_path(download=False)
subjects_dir = op.join(data_path, 'subjects')
fname_mri = op.join(data_path, 'subjects', 'sample', 'mri', 'T1.mgz')
fname = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-6-src.fif')
fname_vol = op.join(subjects_dir, 'sample', 'bem',
'sample-volume-7mm-src.fif')
fname_bem = op.join(data_path, 'subjects', 'sample', 'bem',
'sample-1280-bem.fif')
base_dir = op.join(op.dirname(__file__), '..', 'io', 'tests', 'data')
fname_small = op.join(base_dir, 'small-src.fif.gz')
@testing.requires_testing_data
@requires_nibabel(vox2ras_tkr=True)
def test_mgz_header():
"""Test MGZ header reading"""
import nibabel as nib
header = _get_mgz_header(fname_mri)
mri_hdr = nib.load(fname_mri).get_header()
assert_allclose(mri_hdr.get_data_shape(), header['dims'])
assert_allclose(mri_hdr.get_vox2ras_tkr(), header['vox2ras_tkr'])
assert_allclose(mri_hdr.get_ras2vox(), header['ras2vox'])
@requires_scipy_version('0.11')
def test_add_patch_info():
"""Test adding patch info to source space"""
# let's setup a small source space
src = read_source_spaces(fname_small)
src_new = read_source_spaces(fname_small)
for s in src_new:
s['nearest'] = None
s['nearest_dist'] = None
s['pinfo'] = None
# test that no patch info is added for small dist_limit
try:
add_source_space_distances(src_new, dist_limit=0.00001)
except RuntimeError: # what we throw when scipy version is wrong
pass
else:
assert_true(all(s['nearest'] is None for s in src_new))
assert_true(all(s['nearest_dist'] is None for s in src_new))
assert_true(all(s['pinfo'] is None for s in src_new))
# now let's use one that works
add_source_space_distances(src_new)
for s1, s2 in zip(src, src_new):
assert_array_equal(s1['nearest'], s2['nearest'])
assert_allclose(s1['nearest_dist'], s2['nearest_dist'], atol=1e-7)
assert_equal(len(s1['pinfo']), len(s2['pinfo']))
for p1, p2 in zip(s1['pinfo'], s2['pinfo']):
assert_array_equal(p1, p2)
@testing.requires_testing_data
@requires_scipy_version('0.11')
def test_add_source_space_distances_limited():
"""Test adding distances to source space with a dist_limit"""
tempdir = _TempDir()
src = read_source_spaces(fname)
src_new = read_source_spaces(fname)
del src_new[0]['dist']
del src_new[1]['dist']
n_do = 200 # limit this for speed
src_new[0]['vertno'] = src_new[0]['vertno'][:n_do].copy()
src_new[1]['vertno'] = src_new[1]['vertno'][:n_do].copy()
out_name = op.join(tempdir, 'temp-src.fif')
try:
add_source_space_distances(src_new, dist_limit=0.007)
except RuntimeError: # what we throw when scipy version is wrong
raise SkipTest('dist_limit requires scipy > 0.13')
write_source_spaces(out_name, src_new)
src_new = read_source_spaces(out_name)
for so, sn in zip(src, src_new):
assert_array_equal(so['dist_limit'], np.array([-0.007], np.float32))
assert_array_equal(sn['dist_limit'], np.array([0.007], np.float32))
do = so['dist']
dn = sn['dist']
# clean out distances > 0.007 in C code
do.data[do.data > 0.007] = 0
do.eliminate_zeros()
# make sure we have some comparable distances
assert_true(np.sum(do.data < 0.007) > 400)
# do comparison over the region computed
d = (do - dn)[:sn['vertno'][n_do - 1]][:, :sn['vertno'][n_do - 1]]
assert_allclose(np.zeros_like(d.data), d.data, rtol=0, atol=1e-6)
@slow_test
@testing.requires_testing_data
@requires_scipy_version('0.11')
def test_add_source_space_distances():
"""Test adding distances to source space"""
tempdir = _TempDir()
src = read_source_spaces(fname)
src_new = read_source_spaces(fname)
del src_new[0]['dist']
del src_new[1]['dist']
n_do = 20 # limit this for speed
src_new[0]['vertno'] = src_new[0]['vertno'][:n_do].copy()
src_new[1]['vertno'] = src_new[1]['vertno'][:n_do].copy()
out_name = op.join(tempdir, 'temp-src.fif')
add_source_space_distances(src_new)
write_source_spaces(out_name, src_new)
src_new = read_source_spaces(out_name)
# iterate over both hemispheres
for so, sn in zip(src, src_new):
v = so['vertno'][:n_do]
assert_array_equal(so['dist_limit'], np.array([-0.007], np.float32))
assert_array_equal(sn['dist_limit'], np.array([np.inf], np.float32))
do = so['dist']
dn = sn['dist']
# clean out distances > 0.007 in C code (some residual), and Python
ds = list()
for d in [do, dn]:
d.data[d.data > 0.007] = 0
d = d[v][:, v]
d.eliminate_zeros()
ds.append(d)
# make sure we actually calculated some comparable distances
assert_true(np.sum(ds[0].data < 0.007) > 10)
# do comparison
d = ds[0] - ds[1]
assert_allclose(np.zeros_like(d.data), d.data, rtol=0, atol=1e-9)
@testing.requires_testing_data
@requires_mne
def test_discrete_source_space():
"""Test setting up (and reading/writing) discrete source spaces
"""
tempdir = _TempDir()
src = read_source_spaces(fname)
v = src[0]['vertno']
# let's make a discrete version with the C code, and with ours
temp_name = op.join(tempdir, 'temp-src.fif')
try:
# save
temp_pos = op.join(tempdir, 'temp-pos.txt')
np.savetxt(temp_pos, np.c_[src[0]['rr'][v], src[0]['nn'][v]])
# let's try the spherical one (no bem or surf supplied)
run_subprocess(['mne_volume_source_space', '--meters',
'--pos', temp_pos, '--src', temp_name])
src_c = read_source_spaces(temp_name)
pos_dict = dict(rr=src[0]['rr'][v], nn=src[0]['nn'][v])
src_new = setup_volume_source_space('sample', None,
pos=pos_dict,
subjects_dir=subjects_dir)
_compare_source_spaces(src_c, src_new, mode='approx')
assert_allclose(src[0]['rr'][v], src_new[0]['rr'],
rtol=1e-3, atol=1e-6)
assert_allclose(src[0]['nn'][v], src_new[0]['nn'],
rtol=1e-3, atol=1e-6)
# now do writing
write_source_spaces(temp_name, src_c)
src_c2 = read_source_spaces(temp_name)
_compare_source_spaces(src_c, src_c2)
# now do MRI
assert_raises(ValueError, setup_volume_source_space, 'sample',
pos=pos_dict, mri=fname_mri)
finally:
if op.isfile(temp_name):
os.remove(temp_name)
@slow_test
@testing.requires_testing_data
def test_volume_source_space():
"""Test setting up volume source spaces
"""
tempdir = _TempDir()
src = read_source_spaces(fname_vol)
temp_name = op.join(tempdir, 'temp-src.fif')
surf = read_bem_surfaces(fname_bem, s_id=FIFF.FIFFV_BEM_SURF_ID_BRAIN)
surf['rr'] *= 1e3 # convert to mm
# The one in the testing dataset (uses bem as bounds)
for bem, surf in zip((fname_bem, None), (None, surf)):
src_new = setup_volume_source_space('sample', temp_name, pos=7.0,
bem=bem, surface=surf,
mri=fname_mri,
subjects_dir=subjects_dir)
_compare_source_spaces(src, src_new, mode='approx')
del src_new
src_new = read_source_spaces(temp_name)
_compare_source_spaces(src, src_new, mode='approx')
assert_raises(IOError, setup_volume_source_space, 'sample', temp_name,
pos=7.0, bem=None, surface='foo', # bad surf
mri=fname_mri, subjects_dir=subjects_dir)
@testing.requires_testing_data
@requires_mne
def test_other_volume_source_spaces():
"""Test setting up other volume source spaces"""
# these are split off because they require the MNE tools, and
# Travis doesn't seem to like them
# let's try the spherical one (no bem or surf supplied)
tempdir = _TempDir()
temp_name = op.join(tempdir, 'temp-src.fif')
run_subprocess(['mne_volume_source_space',
'--grid', '7.0',
'--src', temp_name,
'--mri', fname_mri])
src = read_source_spaces(temp_name)
src_new = setup_volume_source_space('sample', temp_name, pos=7.0,
mri=fname_mri,
subjects_dir=subjects_dir)
_compare_source_spaces(src, src_new, mode='approx')
del src
del src_new
assert_raises(ValueError, setup_volume_source_space, 'sample', temp_name,
pos=7.0, sphere=[1., 1.], mri=fname_mri, # bad sphere
subjects_dir=subjects_dir)
# now without MRI argument, it should give an error when we try
# to read it
run_subprocess(['mne_volume_source_space',
'--grid', '7.0',
'--src', temp_name])
assert_raises(ValueError, read_source_spaces, temp_name)
@testing.requires_testing_data
def test_triangle_neighbors():
"""Test efficient vertex neighboring triangles for surfaces"""
this = read_source_spaces(fname)[0]
this['neighbor_tri'] = [list() for _ in range(this['np'])]
for p in range(this['ntri']):
verts = this['tris'][p]
this['neighbor_tri'][verts[0]].append(p)
this['neighbor_tri'][verts[1]].append(p)
this['neighbor_tri'][verts[2]].append(p)
this['neighbor_tri'] = [np.array(nb, int) for nb in this['neighbor_tri']]
neighbor_tri = _triangle_neighbors(this['tris'], this['np'])
assert_true(np.array_equal(nt1, nt2)
for nt1, nt2 in zip(neighbor_tri, this['neighbor_tri']))
def test_accumulate_normals():
"""Test efficient normal accumulation for surfaces"""
# set up comparison
rng = np.random.RandomState(0)
n_pts = int(1.6e5) # approx number in sample source space
n_tris = int(3.2e5)
# use all positive to make a worst-case for cumulative summation
# (real "nn" vectors will have both positive and negative values)
tris = (rng.rand(n_tris, 1) * (n_pts - 2)).astype(int)
tris = np.c_[tris, tris + 1, tris + 2]
tri_nn = rng.rand(n_tris, 3)
this = dict(tris=tris, np=n_pts, ntri=n_tris, tri_nn=tri_nn)
# cut-and-paste from original code in surface.py:
# Find neighboring triangles and accumulate vertex normals
this['nn'] = np.zeros((this['np'], 3))
for p in range(this['ntri']):
# vertex normals
verts = this['tris'][p]
this['nn'][verts, :] += this['tri_nn'][p, :]
nn = _accumulate_normals(this['tris'], this['tri_nn'], this['np'])
# the moment of truth (or reckoning)
assert_allclose(nn, this['nn'], rtol=1e-7, atol=1e-7)
@slow_test
@testing.requires_testing_data
def test_setup_source_space():
"""Test setting up ico, oct, and all source spaces
"""
tempdir = _TempDir()
fname_ico = op.join(data_path, 'subjects', 'fsaverage', 'bem',
'fsaverage-ico-5-src.fif')
# first lets test some input params
assert_raises(ValueError, setup_source_space, 'sample', spacing='oct',
add_dist=False)
assert_raises(ValueError, setup_source_space, 'sample', spacing='octo',
add_dist=False)
assert_raises(ValueError, setup_source_space, 'sample', spacing='oct6e',
add_dist=False)
assert_raises(ValueError, setup_source_space, 'sample', spacing='7emm',
add_dist=False)
assert_raises(ValueError, setup_source_space, 'sample', spacing='alls',
add_dist=False)
assert_raises(IOError, setup_source_space, 'sample', spacing='oct6',
subjects_dir=subjects_dir, add_dist=False)
# ico 5 (fsaverage) - write to temp file
src = read_source_spaces(fname_ico)
temp_name = op.join(tempdir, 'temp-src.fif')
with warnings.catch_warnings(record=True): # sklearn equiv neighbors
warnings.simplefilter('always')
src_new = setup_source_space('fsaverage', temp_name, spacing='ico5',
subjects_dir=subjects_dir, add_dist=False,
overwrite=True)
_compare_source_spaces(src, src_new, mode='approx')
assert_array_equal(src[0]['vertno'], np.arange(10242))
assert_array_equal(src[1]['vertno'], np.arange(10242))
# oct-6 (sample) - auto filename + IO
src = read_source_spaces(fname)
temp_name = op.join(tempdir, 'temp-src.fif')
with warnings.catch_warnings(record=True): # sklearn equiv neighbors
warnings.simplefilter('always')
src_new = setup_source_space('sample', temp_name, spacing='oct6',
subjects_dir=subjects_dir,
overwrite=True, add_dist=False)
_compare_source_spaces(src, src_new, mode='approx')
src_new = read_source_spaces(temp_name)
_compare_source_spaces(src, src_new, mode='approx')
# all source points - no file writing
src_new = setup_source_space('sample', None, spacing='all',
subjects_dir=subjects_dir, add_dist=False)
assert_true(src_new[0]['nuse'] == len(src_new[0]['rr']))
assert_true(src_new[1]['nuse'] == len(src_new[1]['rr']))
# dense source space to hit surf['inuse'] lines of _create_surf_spacing
assert_raises(RuntimeError, setup_source_space, 'sample', None,
spacing='ico6', subjects_dir=subjects_dir, add_dist=False)
@testing.requires_testing_data
def test_read_source_spaces():
"""Test reading of source space meshes
"""
src = read_source_spaces(fname, patch_stats=True)
# 3D source space
lh_points = src[0]['rr']
lh_faces = src[0]['tris']
lh_use_faces = src[0]['use_tris']
rh_points = src[1]['rr']
rh_faces = src[1]['tris']
rh_use_faces = src[1]['use_tris']
assert_true(lh_faces.min() == 0)
assert_true(lh_faces.max() == lh_points.shape[0] - 1)
assert_true(lh_use_faces.min() >= 0)
assert_true(lh_use_faces.max() <= lh_points.shape[0] - 1)
assert_true(rh_faces.min() == 0)
assert_true(rh_faces.max() == rh_points.shape[0] - 1)
assert_true(rh_use_faces.min() >= 0)
assert_true(rh_use_faces.max() <= rh_points.shape[0] - 1)
@slow_test
@testing.requires_testing_data
def test_write_source_space():
"""Test reading and writing of source spaces
"""
tempdir = _TempDir()
src0 = read_source_spaces(fname, patch_stats=False)
write_source_spaces(op.join(tempdir, 'tmp-src.fif'), src0)
src1 = read_source_spaces(op.join(tempdir, 'tmp-src.fif'),
patch_stats=False)
_compare_source_spaces(src0, src1)
# test warnings on bad filenames
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
src_badname = op.join(tempdir, 'test-bad-name.fif.gz')
write_source_spaces(src_badname, src0)
read_source_spaces(src_badname)
assert_equal(len(w), 2)
@testing.requires_testing_data
@requires_fs_or_nibabel
def test_vertex_to_mni():
"""Test conversion of vertices to MNI coordinates
"""
# obtained using "tksurfer (sample) (l/r)h white"
vertices = [100960, 7620, 150549, 96761]
coords = np.array([[-60.86, -11.18, -3.19], [-36.46, -93.18, -2.36],
[-38.00, 50.08, -10.61], [47.14, 8.01, 46.93]])
hemis = [0, 0, 0, 1]
coords_2 = vertex_to_mni(vertices, hemis, 'sample', subjects_dir)
# less than 1mm error
assert_allclose(coords, coords_2, atol=1.0)
@testing.requires_testing_data
@requires_freesurfer
@requires_nibabel()
def test_vertex_to_mni_fs_nibabel():
"""Test equivalence of vert_to_mni for nibabel and freesurfer
"""
n_check = 1000
subject = 'sample'
vertices = np.random.randint(0, 100000, n_check)
hemis = np.random.randint(0, 1, n_check)
coords = vertex_to_mni(vertices, hemis, subject, subjects_dir,
'nibabel')
coords_2 = vertex_to_mni(vertices, hemis, subject, subjects_dir,
'freesurfer')
# less than 0.1 mm error
assert_allclose(coords, coords_2, atol=0.1)
@testing.requires_testing_data
@requires_freesurfer
@requires_nibabel()
def test_get_volume_label_names():
"""Test reading volume label names
"""
aseg_fname = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz')
label_names = get_volume_labels_from_aseg(aseg_fname)
assert_equal(label_names.count('Brain-Stem'), 1)
@testing.requires_testing_data
@requires_freesurfer
@requires_nibabel()
def test_source_space_from_label():
"""Test generating a source space from volume label
"""
tempdir = _TempDir()
aseg_fname = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz')
label_names = get_volume_labels_from_aseg(aseg_fname)
volume_label = label_names[int(np.random.rand() * len(label_names))]
# Test pos as dict
pos = dict()
assert_raises(ValueError, setup_volume_source_space, 'sample', pos=pos,
volume_label=volume_label, mri=aseg_fname)
# Test no mri provided
assert_raises(RuntimeError, setup_volume_source_space, 'sample', mri=None,
volume_label=volume_label)
# Test invalid volume label
assert_raises(ValueError, setup_volume_source_space, 'sample',
volume_label='Hello World!', mri=aseg_fname)
src = setup_volume_source_space('sample', subjects_dir=subjects_dir,
volume_label=volume_label, mri=aseg_fname,
add_interpolator=False)
assert_equal(volume_label, src[0]['seg_name'])
# test reading and writing
out_name = op.join(tempdir, 'temp-src.fif')
write_source_spaces(out_name, src)
src_from_file = read_source_spaces(out_name)
_compare_source_spaces(src, src_from_file, mode='approx')
@testing.requires_testing_data
@requires_freesurfer
@requires_nibabel()
def test_combine_source_spaces():
"""Test combining source spaces
"""
tempdir = _TempDir()
aseg_fname = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz')
label_names = get_volume_labels_from_aseg(aseg_fname)
volume_labels = [label_names[int(np.random.rand() * len(label_names))]
for ii in range(2)]
# get a surface source space (no need to test creation here)
srf = read_source_spaces(fname, patch_stats=False)
# setup 2 volume source spaces
vol = setup_volume_source_space('sample', subjects_dir=subjects_dir,
volume_label=volume_labels[0],
mri=aseg_fname, add_interpolator=False)
# setup a discrete source space
rr = np.random.randint(0, 20, (100, 3)) * 1e-3
nn = np.zeros(rr.shape)
nn[:, -1] = 1
pos = {'rr': rr, 'nn': nn}
disc = setup_volume_source_space('sample', subjects_dir=subjects_dir,
pos=pos, verbose='error')
# combine source spaces
src = srf + vol + disc
# test addition of source spaces
assert_equal(type(src), SourceSpaces)
assert_equal(len(src), 4)
# test reading and writing
src_out_name = op.join(tempdir, 'temp-src.fif')
src.save(src_out_name)
src_from_file = read_source_spaces(src_out_name)
_compare_source_spaces(src, src_from_file, mode='approx')
# test that all source spaces are in MRI coordinates
coord_frames = np.array([s['coord_frame'] for s in src])
assert_true((coord_frames == FIFF.FIFFV_COORD_MRI).all())
# test errors for export_volume
image_fname = op.join(tempdir, 'temp-image.mgz')
# source spaces with no volume
assert_raises(ValueError, srf.export_volume, image_fname, verbose='error')
# unrecognized source type
disc2 = disc.copy()
disc2[0]['type'] = 'kitty'
src_unrecognized = src + disc2
assert_raises(ValueError, src_unrecognized.export_volume, image_fname,
verbose='error')
# unrecognized file type
bad_image_fname = op.join(tempdir, 'temp-image.png')
assert_raises(ValueError, src.export_volume, bad_image_fname,
verbose='error')
# mixed coordinate frames
disc3 = disc.copy()
disc3[0]['coord_frame'] = 10
src_mixed_coord = src + disc3
assert_raises(ValueError, src_mixed_coord.export_volume, image_fname,
verbose='error')
run_tests_if_main()
# The following code was used to generate small-src.fif.gz.
# Unfortunately the C code bombs when trying to add source space distances,
# possibly due to incomplete "faking" of a smaller surface on our part here.
"""
# -*- coding: utf-8 -*-
import os
import numpy as np
import mne
data_path = mne.datasets.sample.data_path()
src = mne.setup_source_space('sample', fname=None, spacing='oct5')
hemis = ['lh', 'rh']
fnames = [data_path + '/subjects/sample/surf/%s.decimated' % h for h in hemis]
vs = list()
for s, fname in zip(src, fnames):
coords = s['rr'][s['vertno']]
vs.append(s['vertno'])
idx = -1 * np.ones(len(s['rr']))
idx[s['vertno']] = np.arange(s['nuse'])
faces = s['use_tris']
faces = idx[faces]
mne.write_surface(fname, coords, faces)
# we need to move sphere surfaces
spheres = [data_path + '/subjects/sample/surf/%s.sphere' % h for h in hemis]
for s in spheres:
os.rename(s, s + '.bak')
try:
for s, v in zip(spheres, vs):
coords, faces = mne.read_surface(s + '.bak')
coords = coords[v]
mne.write_surface(s, coords, faces)
src = mne.setup_source_space('sample', fname=None, spacing='oct4',
surface='decimated')
finally:
for s in spheres:
os.rename(s + '.bak', s)
fname = 'small-src.fif'
fname_gz = fname + '.gz'
mne.write_source_spaces(fname, src)
mne.utils.run_subprocess(['mne_add_patch_info', '--src', fname,
'--srcp', fname])
mne.write_source_spaces(fname_gz, mne.read_source_spaces(fname))
"""
|
bsd-3-clause
|
Myasuka/scikit-learn
|
examples/svm/plot_svm_regression.py
|
249
|
1451
|
"""
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
plt.scatter(X, y, c='k', label='data')
plt.hold('on')
plt.plot(X, y_rbf, c='g', label='RBF model')
plt.plot(X, y_lin, c='r', label='Linear model')
plt.plot(X, y_poly, c='b', label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
|
bsd-3-clause
|
detrout/debian-statsmodels
|
statsmodels/graphics/tests/test_boxplots.py
|
28
|
1257
|
import numpy as np
from numpy.testing import dec
from statsmodels.graphics.boxplots import violinplot, beanplot
from statsmodels.datasets import anes96
try:
import matplotlib.pyplot as plt
have_matplotlib = True
except:
have_matplotlib = False
@dec.skipif(not have_matplotlib)
def test_violinplot_beanplot():
# Test violinplot and beanplot with the same dataset.
data = anes96.load_pandas()
party_ID = np.arange(7)
labels = ["Strong Democrat", "Weak Democrat", "Independent-Democrat",
"Independent-Independent", "Independent-Republican",
"Weak Republican", "Strong Republican"]
age = [data.exog['age'][data.endog == id] for id in party_ID]
fig = plt.figure()
ax = fig.add_subplot(111)
violinplot(age, ax=ax, labels=labels,
plot_opts={'cutoff_val':5, 'cutoff_type':'abs',
'label_fontsize':'small',
'label_rotation':30})
plt.close(fig)
fig = plt.figure()
ax = fig.add_subplot(111)
beanplot(age, ax=ax, labels=labels,
plot_opts={'cutoff_val':5, 'cutoff_type':'abs',
'label_fontsize':'small',
'label_rotation':30})
plt.close(fig)
|
bsd-3-clause
|
GrumpyNounours/PySeidon
|
pyseidon/utilities/windrose.py
|
2
|
20431
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__version__ = '1.4'
__author__ = 'Lionel Roubeyrie'
__mail__ = 'lionel.roubeyrie@gmail.com'
__license__ = 'CeCILL-B'
import matplotlib
import matplotlib.cm as cm
import numpy as np
from matplotlib.patches import Rectangle, Polygon
from matplotlib.ticker import ScalarFormatter, AutoLocator
from matplotlib.text import Text, FontProperties
from matplotlib.projections.polar import PolarAxes
from numpy.lib.twodim_base import histogram2d
import matplotlib.pyplot as plt
from pylab import poly_between
RESOLUTION = 100
ZBASE = -1000 #The starting zorder for all drawing, negative to have the grid on
class WindroseAxes(PolarAxes):
"""
Create a windrose axes
"""
def __init__(self, *args, **kwargs):
"""
See Axes base class for args and kwargs documentation
"""
#Uncomment to have the possibility to change the resolution directly
#when the instance is created
#self.RESOLUTION = kwargs.pop('resolution', 100)
PolarAxes.__init__(self, *args, **kwargs)
self.set_aspect('equal', adjustable='box', anchor='C')
self.radii_angle = 67.5
self.cla()
def cla(self):
"""
Clear the current axes
"""
PolarAxes.cla(self)
self.theta_angles = np.arange(0, 360, 45)
self.theta_labels = ['E', 'N-E', 'N', 'N-W', 'W', 'S-W', 'S', 'S-E']
self.set_thetagrids(angles=self.theta_angles, labels=self.theta_labels)
self._info = {'dir' : list(),
'bins' : list(),
'table' : list()}
self.patches_list = list()
def _colors(self, cmap, n):
'''
Returns a list of n colors based on the colormap cmap
'''
return [cmap(i) for i in np.linspace(0.0, 1.0, n)]
def set_radii_angle(self, **kwargs):
"""
Set the radii labels angle
"""
null = kwargs.pop('labels', None)
angle = kwargs.pop('angle', None)
if angle is None:
angle = self.radii_angle
self.radii_angle = angle
radii = np.linspace(0.1, self.get_rmax(), 6)
radii_labels = [ "%.1f" %r for r in radii ]
radii_labels[0] = "" #Removing label 0
null = self.set_rgrids(radii=radii, labels=radii_labels,
angle=self.radii_angle, **kwargs)
def _update(self):
self.set_rmax(rmax=np.max(np.sum(self._info['table'], axis=0)))
self.set_radii_angle(angle=self.radii_angle)
def legend(self, loc='lower left', **kwargs):
"""
Sets the legend location and her properties.
The location codes are
'best' : 0,
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
If none of these are suitable, loc can be a 2-tuple giving x,y
in axes coords, ie,
loc = (0, 1) is left top
loc = (0.5, 0.5) is center, center
and so on. The following kwargs are supported:
isaxes=True # whether this is an axes legend
prop = FontProperties(size='smaller') # the font property
pad = 0.2 # the fractional whitespace inside the legend border
shadow # if True, draw a shadow behind legend
labelsep = 0.005 # the vertical space between the legend entries
handlelen = 0.05 # the length of the legend lines
handletextsep = 0.02 # the space between the legend line and legend text
axespad = 0.02 # the border between the axes and legend edge
"""
def get_handles():
handles = list()
for p in self.patches_list:
if isinstance(p, matplotlib.patches.Polygon) or \
isinstance(p, matplotlib.patches.Rectangle):
color = p.get_facecolor()
elif isinstance(p, matplotlib.lines.Line2D):
color = p.get_color()
else:
raise AttributeError("Can't handle patches")
handles.append(Rectangle((0, 0), 0.2, 0.2,
facecolor=color, edgecolor='black'))
return handles
def get_labels():
labels = np.copy(self._info['bins'])
labels = ["[%.1f : %0.1f[" %(labels[i], labels[i+1]) \
for i in range(len(labels)-1)]
return labels
null = kwargs.pop('labels', None)
null = kwargs.pop('handles', None)
handles = get_handles()
labels = get_labels()
self.legend_ = matplotlib.legend.Legend(self, handles, labels,
loc, **kwargs)
return self.legend_
def _init_plot(self, dir, var, **kwargs):
"""
Internal method used by all plotting commands
"""
#self.cla()
null = kwargs.pop('zorder', None)
#Init of the bins array if not set
bins = kwargs.pop('bins', None)
if bins is None:
bins = np.linspace(np.min(var), np.max(var), 6)
if isinstance(bins, int):
bins = np.linspace(np.min(var), np.max(var), bins)
bins = np.asarray(bins)
nbins = len(bins)
#Number of sectors
nsector = kwargs.pop('nsector', None)
if nsector is None:
nsector = 16
#Sets the colors table based on the colormap or the "colors" argument
colors = kwargs.pop('colors', None)
cmap = kwargs.pop('cmap', None)
if colors is not None:
if isinstance(colors, str):
colors = [colors]*nbins
if isinstance(colors, (tuple, list)):
if len(colors) != nbins:
raise ValueError("colors and bins must have same length")
else:
if cmap is None:
cmap = cm.jet
colors = self._colors(cmap, nbins)
#Building the angles list
angles = np.arange(0, -2*np.pi, -2*np.pi/nsector) + np.pi/2
normed = kwargs.pop('normed', False)
blowto = kwargs.pop('blowto', False)
#Set the global information dictionnary
self._info['dir'], self._info['bins'], self._info['table'] = histogram(dir, var, bins, nsector, normed, blowto)
return bins, nbins, nsector, colors, angles, kwargs
def contour(self, dir, var, **kwargs):
"""
Plot a windrose in linear mode. For each var bins, a line will be
draw on the axes, a segment between each sector (center to center).
Each line can be formated (color, width, ...) like with standard plot
pylab command.
Mandatory:
* dir : 1D array - directions the wind blows from, North centred
* var : 1D array - values of the variable to compute. Typically the wind
speeds
Optional:
* nsector: integer - number of sectors used to compute the windrose
table. If not set, nsectors=16, then each sector will be 360/16=22.5°,
and the resulting computed table will be aligned with the cardinals
points.
* bins : 1D array or integer- number of bins, or a sequence of
bins variable. If not set, bins=6, then
bins=linspace(min(var), max(var), 6)
* blowto : bool. If True, the windrose will be pi rotated,
to show where the wind blow to (usefull for pollutant rose).
* colors : string or tuple - one string color ('k' or 'black'), in this
case all bins will be plotted in this color; a tuple of matplotlib
color args (string, float, rgb, etc), different levels will be plotted
in different colors in the order specified.
* cmap : a cm Colormap instance from matplotlib.cm.
- if cmap == None and colors == None, a default Colormap is used.
others kwargs : see help(pylab.plot)
"""
bins, nbins, nsector, colors, angles, kwargs = self._init_plot(dir, var,
**kwargs)
#closing lines
angles = np.hstack((angles, angles[-1]-2*np.pi/nsector))
vals = np.hstack((self._info['table'],
np.reshape(self._info['table'][:,0],
(self._info['table'].shape[0], 1))))
offset = 0
for i in range(nbins):
val = vals[i,:] + offset
offset += vals[i, :]
zorder = ZBASE + nbins - i
patch = self.plot(angles, val, color=colors[i], zorder=zorder,
**kwargs)
self.patches_list.extend(patch)
self._update()
def contourf(self, dir, var, **kwargs):
"""
Plot a windrose in filled mode. For each var bins, a line will be
draw on the axes, a segment between each sector (center to center).
Each line can be formated (color, width, ...) like with standard plot
pylab command.
Mandatory:
* dir : 1D array - directions the wind blows from, North centred
* var : 1D array - values of the variable to compute. Typically the wind
speeds
Optional:
* nsector: integer - number of sectors used to compute the windrose
table. If not set, nsectors=16, then each sector will be 360/16=22.5°,
and the resulting computed table will be aligned with the cardinals
points.
* bins : 1D array or integer- number of bins, or a sequence of
bins variable. If not set, bins=6, then
bins=linspace(min(var), max(var), 6)
* blowto : bool. If True, the windrose will be pi rotated,
to show where the wind blow to (usefull for pollutant rose).
* colors : string or tuple - one string color ('k' or 'black'), in this
case all bins will be plotted in this color; a tuple of matplotlib
color args (string, float, rgb, etc), different levels will be plotted
in different colors in the order specified.
* cmap : a cm Colormap instance from matplotlib.cm.
- if cmap == None and colors == None, a default Colormap is used.
others kwargs : see help(pylab.plot)
"""
bins, nbins, nsector, colors, angles, kwargs = self._init_plot(dir, var,
**kwargs)
null = kwargs.pop('facecolor', None)
null = kwargs.pop('edgecolor', None)
#closing lines
angles = np.hstack((angles, angles[-1]-2*np.pi/nsector))
vals = np.hstack((self._info['table'],
np.reshape(self._info['table'][:,0],
(self._info['table'].shape[0], 1))))
offset = 0
for i in range(nbins):
val = vals[i,:] + offset
offset += vals[i, :]
zorder = ZBASE + nbins - i
xs, ys = poly_between(angles, 0, val)
patch = self.fill(xs, ys, facecolor=colors[i],
edgecolor=colors[i], zorder=zorder, **kwargs)
self.patches_list.extend(patch)
def bar(self, dir, var, **kwargs):
"""
Plot a windrose in bar mode. For each var bins and for each sector,
a colored bar will be draw on the axes.
Mandatory:
* dir : 1D array - directions the wind blows from, North centred
* var : 1D array - values of the variable to compute. Typically the wind
speeds
Optional:
* nsector: integer - number of sectors used to compute the windrose
table. If not set, nsectors=16, then each sector will be 360/16=22.5°,
and the resulting computed table will be aligned with the cardinals
points.
* bins : 1D array or integer- number of bins, or a sequence of
bins variable. If not set, bins=6 between min(var) and max(var).
* blowto : bool. If True, the windrose will be pi rotated,
to show where the wind blow to (usefull for pollutant rose).
* colors : string or tuple - one string color ('k' or 'black'), in this
case all bins will be plotted in this color; a tuple of matplotlib
color args (string, float, rgb, etc), different levels will be plotted
in different colors in the order specified.
* cmap : a cm Colormap instance from matplotlib.cm.
- if cmap == None and colors == None, a default Colormap is used.
edgecolor : string - The string color each edge bar will be plotted.
Default : no edgecolor
* opening : float - between 0.0 and 1.0, to control the space between
each sector (1.0 for no space)
"""
bins, nbins, nsector, colors, angles, kwargs = self._init_plot(dir, var,
**kwargs)
null = kwargs.pop('facecolor', None)
edgecolor = kwargs.pop('edgecolor', None)
if edgecolor is not None:
if not isinstance(edgecolor, str):
raise ValueError('edgecolor must be a string color')
opening = kwargs.pop('opening', None)
if opening is None:
opening = 0.8
dtheta = 2*np.pi/nsector
opening = dtheta*opening
for j in range(nsector):
offset = 0
for i in range(nbins):
if i > 0:
offset += self._info['table'][i-1, j]
val = self._info['table'][i, j]
zorder = ZBASE + nbins - i
patch = Rectangle((angles[j]-opening/2, offset), opening, val,
facecolor=colors[i], edgecolor=edgecolor, zorder=zorder,
**kwargs)
self.add_patch(patch)
if j == 0:
self.patches_list.append(patch)
self._update()
def box(self, dir, var, **kwargs):
"""
Plot a windrose in proportional bar mode. For each var bins and for each
sector, a colored bar will be draw on the axes.
Mandatory:
* dir : 1D array - directions the wind blows from, North centred
* var : 1D array - values of the variable to compute. Typically the wind
speeds
Optional:
* nsector: integer - number of sectors used to compute the windrose
table. If not set, nsectors=16, then each sector will be 360/16=22.5°,
and the resulting computed table will be aligned with the cardinals
points.
* bins : 1D array or integer- number of bins, or a sequence of
bins variable. If not set, bins=6 between min(var) and max(var).
* blowto : bool. If True, the windrose will be pi rotated,
to show where the wind blow to (usefull for pollutant rose).
* colors : string or tuple - one string color ('k' or 'black'), in this
case all bins will be plotted in this color; a tuple of matplotlib
color args (string, float, rgb, etc), different levels will be plotted
in different colors in the order specified.
* cmap : a cm Colormap instance from matplotlib.cm.
- if cmap == None and colors == None, a default Colormap is used.
edgecolor : string - The string color each edge bar will be plotted.
Default : no edgecolor
"""
bins, nbins, nsector, colors, angles, kwargs = self._init_plot(dir, var,
**kwargs)
null = kwargs.pop('facecolor', None)
edgecolor = kwargs.pop('edgecolor', None)
if edgecolor is not None:
if not isinstance(edgecolor, str):
raise ValueError('edgecolor must be a string color')
opening = np.linspace(0.0, np.pi/16, nbins)
for j in range(nsector):
offset = 0
for i in range(nbins):
if i > 0:
offset += self._info['table'][i-1, j]
val = self._info['table'][i, j]
zorder = ZBASE + nbins - i
patch = Rectangle((angles[j]-opening[i]/2, offset), opening[i],
val, facecolor=colors[i], edgecolor=edgecolor,
zorder=zorder, **kwargs)
self.add_patch(patch)
if j == 0:
self.patches_list.append(patch)
self._update()
def histogram(dir, var, bins, nsector, normed=False, blowto=False):
"""
Returns an array where, for each sector of wind
(centred on the north), we have the number of time the wind comes with a
particular var (speed, polluant concentration, ...).
* dir : 1D array - directions the wind blows from, North centred
* var : 1D array - values of the variable to compute. Typically the wind
speeds
* bins : list - list of var category against we're going to compute the table
* nsector : integer - number of sectors
* normed : boolean - The resulting table is normed in percent or not.
* blowto : boolean - Normaly a windrose is computed with directions
as wind blows from. If true, the table will be reversed (usefull for
pollutantrose)
"""
if len(var) != len(dir):
raise ValueError, "var and dir must have same length"
angle = 360./nsector
dir_bins = np.arange(-angle/2 ,360.+angle, angle, dtype=np.float)
dir_edges = dir_bins.tolist()
dir_edges.pop(-1)
dir_edges[0] = dir_edges.pop(-1)
dir_bins[0] = 0.
var_bins = bins.tolist()
var_bins.append(np.inf)
if blowto:
dir = dir + 180.
dir[dir>=360.] = dir[dir>=360.] - 360
table = histogram2d(x=var, y=dir, bins=[var_bins, dir_bins],
normed=False)[0]
# add the last value to the first to have the table of North winds
table[:,0] = table[:,0] + table[:,-1]
# and remove the last col
table = table[:, :-1]
if normed:
table = table*100/table.sum()
return dir_edges, var_bins, table
def wrcontour(dir, var, **kwargs):
fig = plt.figure()
rect = [0.1, 0.1, 0.8, 0.8]
ax = WindroseAxes(fig, rect)
fig.add_axes(ax)
ax.contour(dir, var, **kwargs)
l = ax.legend(axespad=-0.10)
plt.setp(l.get_texts(), fontsize=8)
plt.draw()
plt.show()
return ax
def wrcontourf(dir, var, **kwargs):
fig = plt.figure()
rect = [0.1, 0.1, 0.8, 0.8]
ax = WindroseAxes(fig, rect)
fig.add_axes(ax)
ax.contourf(dir, var, **kwargs)
l = ax.legend(axespad=-0.10)
plt.setp(l.get_texts(), fontsize=8)
plt.draw()
plt.show()
return ax
def wrbox(dir, var, **kwargs):
fig = plt.figure()
rect = [0.1, 0.1, 0.8, 0.8]
ax = WindroseAxes(fig, rect)
fig.add_axes(ax)
ax.box(dir, var, **kwargs)
l = ax.legend(axespad=-0.10)
plt.setp(l.get_texts(), fontsize=8)
plt.draw()
plt.show()
return ax
def wrbar(dir, var, **kwargs):
fig = plt.figure()
rect = [0.1, 0.1, 0.8, 0.8]
ax = WindroseAxes(fig, rect)
fig.add_axes(ax)
ax.bar(dir, var, **kwargs)
l = ax.legend(axespad=-0.10)
plt.setp(l.get_texts(), fontsize=8)
plt.draw()
plt.show()
return ax
def clean(dir, var):
"""
Remove masked values in the two arrays, where if a direction data is masked,
the var data will also be removed in the cleaning process (and vice-versa)
"""
dirmask = dir.mask==False
varmask = var.mask==False
ind = dirmask*varmask
return dir[ind], var[ind]
if __name__=='__main__':
from pylab import figure, show, setp, random, grid, draw
vv=random(500)*6
dv=random(500)*360
fig = figure(figsize=(8, 8), dpi=80, facecolor='w', edgecolor='w')
rect = [0.1, 0.1, 0.8, 0.8]
ax = WindroseAxes(fig, rect, axisbg='w')
fig.add_axes(ax)
# ax.contourf(dv, vv, bins=np.arange(0,8,1), cmap=cm.hot)
# ax.contour(dv, vv, bins=np.arange(0,8,1), colors='k')
# ax.bar(dv, vv, normed=True, opening=0.8, edgecolor='white')
ax.box(dv, vv, normed=True)
l = ax.legend(axespad=-0.10)
setp(l.get_texts(), fontsize=8)
draw()
#print ax._info
show()
|
agpl-3.0
|
0todd0000/spm1d
|
spm1d/examples/nonparam/1d/ex_anova3onerm.py
|
1
|
1117
|
import numpy as np
from matplotlib import pyplot
import spm1d
#(0) Load dataset:
dataset = spm1d.data.uv1d.anova3onerm.SPM1D_ANOVA3ONERM_2x2x2()
# dataset = spm1d.data.uv1d.anova3onerm.SPM1D_ANOVA3ONERM_2x3x4()
y,A,B,C,SUBJ = dataset.get_data()
#(1) Conduct non-parametric test:
np.random.seed(0)
alpha = 0.05
FFn = spm1d.stats.nonparam.anova3onerm(y, A, B, C, SUBJ)
FFni = FFn.inference(alpha, iterations=200)
print( FFni )
#(2) Compare with parametric result:
FF = spm1d.stats.anova3onerm(y, A, B, C, SUBJ, equal_var=True)
FFi = FF.inference(alpha)
print( FFi )
#(3) Plot
pyplot.close('all')
pyplot.figure(figsize=(15,10))
for i,(Fi,Fni) in enumerate( zip(FFi,FFni) ):
ax = pyplot.subplot(3,3,i+1)
Fni.plot(ax=ax)
Fni.plot_threshold_label(ax=ax, fontsize=8)
Fni.plot_p_values(ax=ax, size=10)
ax.axhline( Fi.zstar, color='orange', linestyle='--', label='Parametric threshold')
if (Fi.zstar > Fi.z.max()) and (Fi.zstar>Fni.zstar):
ax.set_ylim(0, Fi.zstar+1)
if i==0:
ax.legend(fontsize=10, loc='best')
ax.set_title( Fni.effect )
pyplot.show()
|
gpl-3.0
|
Weihonghao/ECM
|
Vpy34/lib/python3.5/site-packages/numpy/lib/tests/test_type_check.py
|
9
|
11493
|
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.compat import long
from numpy.testing import (
TestCase, assert_, assert_equal, assert_array_equal, run_module_suite
)
from numpy.lib.type_check import (
common_type, mintypecode, isreal, iscomplex, isposinf, isneginf,
nan_to_num, isrealobj, iscomplexobj, asfarray, real_if_close
)
def assert_all(x):
assert_(np.all(x), x)
class TestCommonType(TestCase):
def test_basic(self):
ai32 = np.array([[1, 2], [3, 4]], dtype=np.int32)
af16 = np.array([[1, 2], [3, 4]], dtype=np.float16)
af32 = np.array([[1, 2], [3, 4]], dtype=np.float32)
af64 = np.array([[1, 2], [3, 4]], dtype=np.float64)
acs = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.csingle)
acd = np.array([[1+5j, 2+6j], [3+7j, 4+8j]], dtype=np.cdouble)
assert_(common_type(ai32) == np.float64)
assert_(common_type(af16) == np.float16)
assert_(common_type(af32) == np.float32)
assert_(common_type(af64) == np.float64)
assert_(common_type(acs) == np.csingle)
assert_(common_type(acd) == np.cdouble)
class TestMintypecode(TestCase):
def test_default_1(self):
for itype in '1bcsuwil':
assert_equal(mintypecode(itype), 'd')
assert_equal(mintypecode('f'), 'f')
assert_equal(mintypecode('d'), 'd')
assert_equal(mintypecode('F'), 'F')
assert_equal(mintypecode('D'), 'D')
def test_default_2(self):
for itype in '1bcsuwil':
assert_equal(mintypecode(itype+'f'), 'f')
assert_equal(mintypecode(itype+'d'), 'd')
assert_equal(mintypecode(itype+'F'), 'F')
assert_equal(mintypecode(itype+'D'), 'D')
assert_equal(mintypecode('ff'), 'f')
assert_equal(mintypecode('fd'), 'd')
assert_equal(mintypecode('fF'), 'F')
assert_equal(mintypecode('fD'), 'D')
assert_equal(mintypecode('df'), 'd')
assert_equal(mintypecode('dd'), 'd')
#assert_equal(mintypecode('dF',savespace=1),'F')
assert_equal(mintypecode('dF'), 'D')
assert_equal(mintypecode('dD'), 'D')
assert_equal(mintypecode('Ff'), 'F')
#assert_equal(mintypecode('Fd',savespace=1),'F')
assert_equal(mintypecode('Fd'), 'D')
assert_equal(mintypecode('FF'), 'F')
assert_equal(mintypecode('FD'), 'D')
assert_equal(mintypecode('Df'), 'D')
assert_equal(mintypecode('Dd'), 'D')
assert_equal(mintypecode('DF'), 'D')
assert_equal(mintypecode('DD'), 'D')
def test_default_3(self):
assert_equal(mintypecode('fdF'), 'D')
#assert_equal(mintypecode('fdF',savespace=1),'F')
assert_equal(mintypecode('fdD'), 'D')
assert_equal(mintypecode('fFD'), 'D')
assert_equal(mintypecode('dFD'), 'D')
assert_equal(mintypecode('ifd'), 'd')
assert_equal(mintypecode('ifF'), 'F')
assert_equal(mintypecode('ifD'), 'D')
assert_equal(mintypecode('idF'), 'D')
#assert_equal(mintypecode('idF',savespace=1),'F')
assert_equal(mintypecode('idD'), 'D')
class TestIsscalar(TestCase):
def test_basic(self):
assert_(np.isscalar(3))
assert_(not np.isscalar([3]))
assert_(not np.isscalar((3,)))
assert_(np.isscalar(3j))
assert_(np.isscalar(long(10)))
assert_(np.isscalar(4.0))
class TestReal(TestCase):
def test_real(self):
y = np.random.rand(10,)
assert_array_equal(y, np.real(y))
def test_cmplx(self):
y = np.random.rand(10,)+1j*np.random.rand(10,)
assert_array_equal(y.real, np.real(y))
class TestImag(TestCase):
def test_real(self):
y = np.random.rand(10,)
assert_array_equal(0, np.imag(y))
def test_cmplx(self):
y = np.random.rand(10,)+1j*np.random.rand(10,)
assert_array_equal(y.imag, np.imag(y))
class TestIscomplex(TestCase):
def test_fail(self):
z = np.array([-1, 0, 1])
res = iscomplex(z)
assert_(not np.sometrue(res, axis=0))
def test_pass(self):
z = np.array([-1j, 1, 0])
res = iscomplex(z)
assert_array_equal(res, [1, 0, 0])
class TestIsreal(TestCase):
def test_pass(self):
z = np.array([-1, 0, 1j])
res = isreal(z)
assert_array_equal(res, [1, 1, 0])
def test_fail(self):
z = np.array([-1j, 1, 0])
res = isreal(z)
assert_array_equal(res, [0, 1, 1])
class TestIscomplexobj(TestCase):
def test_basic(self):
z = np.array([-1, 0, 1])
assert_(not iscomplexobj(z))
z = np.array([-1j, 0, -1])
assert_(iscomplexobj(z))
def test_scalar(self):
assert_(not iscomplexobj(1.0))
assert_(iscomplexobj(1+0j))
def test_list(self):
assert_(iscomplexobj([3, 1+0j, True]))
assert_(not iscomplexobj([3, 1, True]))
def test_duck(self):
class DummyComplexArray:
@property
def dtype(self):
return np.dtype(complex)
dummy = DummyComplexArray()
assert_(iscomplexobj(dummy))
def test_pandas_duck(self):
# This tests a custom np.dtype duck-typed class, such as used by pandas
# (pandas.core.dtypes)
class PdComplex(np.complex128):
pass
class PdDtype(object):
name = 'category'
names = None
type = PdComplex
kind = 'c'
str = '<c16'
base = np.dtype('complex128')
class DummyPd:
@property
def dtype(self):
return PdDtype
dummy = DummyPd()
assert_(iscomplexobj(dummy))
def test_custom_dtype_duck(self):
class MyArray(list):
@property
def dtype(self):
return complex
a = MyArray([1+0j, 2+0j, 3+0j])
assert_(iscomplexobj(a))
class TestIsrealobj(TestCase):
def test_basic(self):
z = np.array([-1, 0, 1])
assert_(isrealobj(z))
z = np.array([-1j, 0, -1])
assert_(not isrealobj(z))
class TestIsnan(TestCase):
def test_goodvalues(self):
z = np.array((-1., 0., 1.))
res = np.isnan(z) == 0
assert_all(np.all(res, axis=0))
def test_posinf(self):
with np.errstate(divide='ignore'):
assert_all(np.isnan(np.array((1.,))/0.) == 0)
def test_neginf(self):
with np.errstate(divide='ignore'):
assert_all(np.isnan(np.array((-1.,))/0.) == 0)
def test_ind(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isnan(np.array((0.,))/0.) == 1)
def test_integer(self):
assert_all(np.isnan(1) == 0)
def test_complex(self):
assert_all(np.isnan(1+1j) == 0)
def test_complex1(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isnan(np.array(0+0j)/0.) == 1)
class TestIsfinite(TestCase):
# Fixme, wrong place, isfinite now ufunc
def test_goodvalues(self):
z = np.array((-1., 0., 1.))
res = np.isfinite(z) == 1
assert_all(np.all(res, axis=0))
def test_posinf(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isfinite(np.array((1.,))/0.) == 0)
def test_neginf(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isfinite(np.array((-1.,))/0.) == 0)
def test_ind(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isfinite(np.array((0.,))/0.) == 0)
def test_integer(self):
assert_all(np.isfinite(1) == 1)
def test_complex(self):
assert_all(np.isfinite(1+1j) == 1)
def test_complex1(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isfinite(np.array(1+1j)/0.) == 0)
class TestIsinf(TestCase):
# Fixme, wrong place, isinf now ufunc
def test_goodvalues(self):
z = np.array((-1., 0., 1.))
res = np.isinf(z) == 0
assert_all(np.all(res, axis=0))
def test_posinf(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isinf(np.array((1.,))/0.) == 1)
def test_posinf_scalar(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isinf(np.array(1.,)/0.) == 1)
def test_neginf(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isinf(np.array((-1.,))/0.) == 1)
def test_neginf_scalar(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isinf(np.array(-1.)/0.) == 1)
def test_ind(self):
with np.errstate(divide='ignore', invalid='ignore'):
assert_all(np.isinf(np.array((0.,))/0.) == 0)
class TestIsposinf(TestCase):
def test_generic(self):
with np.errstate(divide='ignore', invalid='ignore'):
vals = isposinf(np.array((-1., 0, 1))/0.)
assert_(vals[0] == 0)
assert_(vals[1] == 0)
assert_(vals[2] == 1)
class TestIsneginf(TestCase):
def test_generic(self):
with np.errstate(divide='ignore', invalid='ignore'):
vals = isneginf(np.array((-1., 0, 1))/0.)
assert_(vals[0] == 1)
assert_(vals[1] == 0)
assert_(vals[2] == 0)
class TestNanToNum(TestCase):
def test_generic(self):
with np.errstate(divide='ignore', invalid='ignore'):
vals = nan_to_num(np.array((-1., 0, 1))/0.)
assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0]))
assert_(vals[1] == 0)
assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2]))
def test_integer(self):
vals = nan_to_num(1)
assert_all(vals == 1)
vals = nan_to_num([1])
assert_array_equal(vals, np.array([1], np.int))
def test_complex_good(self):
vals = nan_to_num(1+1j)
assert_all(vals == 1+1j)
def test_complex_bad(self):
with np.errstate(divide='ignore', invalid='ignore'):
v = 1 + 1j
v += np.array(0+1.j)/0.
vals = nan_to_num(v)
# !! This is actually (unexpectedly) zero
assert_all(np.isfinite(vals))
def test_complex_bad2(self):
with np.errstate(divide='ignore', invalid='ignore'):
v = 1 + 1j
v += np.array(-1+1.j)/0.
vals = nan_to_num(v)
assert_all(np.isfinite(vals))
# Fixme
#assert_all(vals.imag > 1e10) and assert_all(np.isfinite(vals))
# !! This is actually (unexpectedly) positive
# !! inf. Comment out for now, and see if it
# !! changes
#assert_all(vals.real < -1e10) and assert_all(np.isfinite(vals))
class TestRealIfClose(TestCase):
def test_basic(self):
a = np.random.rand(10)
b = real_if_close(a+1e-15j)
assert_all(isrealobj(b))
assert_array_equal(a, b)
b = real_if_close(a+1e-7j)
assert_all(iscomplexobj(b))
b = real_if_close(a+1e-7j, tol=1e-6)
assert_all(isrealobj(b))
class TestArrayConversion(TestCase):
def test_asfarray(self):
a = asfarray(np.array([1, 2, 3]))
assert_equal(a.__class__, np.ndarray)
assert_(np.issubdtype(a.dtype, np.float))
if __name__ == "__main__":
run_module_suite()
|
agpl-3.0
|
apbard/scipy
|
scipy/stats/_distn_infrastructure.py
|
6
|
119658
|
#
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
from scipy._lib.six import string_types, exec_, PY3
from scipy._lib._util import getargspec_no_self as _getargspec
import sys
import keyword
import re
import types
import warnings
from scipy.misc import doccer
from ._distr_params import distcont, distdiscrete
from scipy._lib._util import check_random_state, _lazywhere, _lazyselect
from scipy._lib._util import _valarray as valarray
from scipy.special import (comb, chndtr, entr, rel_entr, kl_div, xlogy, ive)
# for root finding for discrete distribution ppf, and max likelihood estimation
from scipy import optimize
# for functions of continuous distributions (e.g. moments, entropy, cdf)
from scipy import integrate
# to approximate the pdf of a continuous distribution given its cdf
from scipy.misc import derivative
from numpy import (arange, putmask, ravel, take, ones, shape, ndarray,
product, reshape, zeros, floor, logical_and, log, sqrt, exp)
from numpy import (place, argsort, argmax, vectorize,
asarray, nan, inf, isinf, NINF, empty)
import numpy as np
from ._constants import _XMAX
if PY3:
def instancemethod(func, obj, cls):
return types.MethodType(func, obj)
else:
instancemethod = types.MethodType
# These are the docstring parts used for substitution in specific
# distribution docstrings
docheaders = {'methods': """\nMethods\n-------\n""",
'notes': """\nNotes\n-----\n""",
'examples': """\nExamples\n--------\n"""}
_doc_rvs = """\
``rvs(%(shapes)s, loc=0, scale=1, size=1, random_state=None)``
Random variates.
"""
_doc_pdf = """\
``pdf(x, %(shapes)s, loc=0, scale=1)``
Probability density function.
"""
_doc_logpdf = """\
``logpdf(x, %(shapes)s, loc=0, scale=1)``
Log of the probability density function.
"""
_doc_pmf = """\
``pmf(k, %(shapes)s, loc=0, scale=1)``
Probability mass function.
"""
_doc_logpmf = """\
``logpmf(k, %(shapes)s, loc=0, scale=1)``
Log of the probability mass function.
"""
_doc_cdf = """\
``cdf(x, %(shapes)s, loc=0, scale=1)``
Cumulative distribution function.
"""
_doc_logcdf = """\
``logcdf(x, %(shapes)s, loc=0, scale=1)``
Log of the cumulative distribution function.
"""
_doc_sf = """\
``sf(x, %(shapes)s, loc=0, scale=1)``
Survival function (also defined as ``1 - cdf``, but `sf` is sometimes more accurate).
"""
_doc_logsf = """\
``logsf(x, %(shapes)s, loc=0, scale=1)``
Log of the survival function.
"""
_doc_ppf = """\
``ppf(q, %(shapes)s, loc=0, scale=1)``
Percent point function (inverse of ``cdf`` --- percentiles).
"""
_doc_isf = """\
``isf(q, %(shapes)s, loc=0, scale=1)``
Inverse survival function (inverse of ``sf``).
"""
_doc_moment = """\
``moment(n, %(shapes)s, loc=0, scale=1)``
Non-central moment of order n
"""
_doc_stats = """\
``stats(%(shapes)s, loc=0, scale=1, moments='mv')``
Mean('m'), variance('v'), skew('s'), and/or kurtosis('k').
"""
_doc_entropy = """\
``entropy(%(shapes)s, loc=0, scale=1)``
(Differential) entropy of the RV.
"""
_doc_fit = """\
``fit(data, %(shapes)s, loc=0, scale=1)``
Parameter estimates for generic data.
"""
_doc_expect = """\
``expect(func, args=(%(shapes_)s), loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)``
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_expect_discrete = """\
``expect(func, args=(%(shapes_)s), loc=0, lb=None, ub=None, conditional=False)``
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_median = """\
``median(%(shapes)s, loc=0, scale=1)``
Median of the distribution.
"""
_doc_mean = """\
``mean(%(shapes)s, loc=0, scale=1)``
Mean of the distribution.
"""
_doc_var = """\
``var(%(shapes)s, loc=0, scale=1)``
Variance of the distribution.
"""
_doc_std = """\
``std(%(shapes)s, loc=0, scale=1)``
Standard deviation of the distribution.
"""
_doc_interval = """\
``interval(alpha, %(shapes)s, loc=0, scale=1)``
Endpoints of the range that contains alpha percent of the distribution
"""
_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf,
_doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf,
_doc_logsf, _doc_ppf, _doc_isf, _doc_moment,
_doc_stats, _doc_entropy, _doc_fit,
_doc_expect, _doc_median,
_doc_mean, _doc_var, _doc_std, _doc_interval])
_doc_default_longsummary = """\
As an instance of the `rv_continuous` class, `%(name)s` object inherits from it
a collection of generic methods (see below for the full list),
and completes them with details specific for this particular distribution.
"""
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = %(name)s(%(shapes)s, loc=0, scale=1)
- Frozen RV object with the same methods but holding the given shape,
location, and scale fixed.
"""
_doc_default_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate a few first moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability density function (``pdf``):
>>> x = np.linspace(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s), 100)
>>> ax.plot(x, %(name)s.pdf(x, %(shapes)s),
... 'r-', lw=5, alpha=0.6, label='%(name)s pdf')
Alternatively, the distribution object can be called (as a function)
to fix the shape, location and scale parameters. This returns a "frozen"
RV object holding the given parameters fixed.
Freeze the distribution and display the frozen ``pdf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')
Check accuracy of ``cdf`` and ``ppf``:
>>> vals = %(name)s.ppf([0.001, 0.5, 0.999], %(shapes)s)
>>> np.allclose([0.001, 0.5, 0.999], %(name)s.cdf(vals, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
And compare the histogram:
>>> ax.hist(r, normed=True, histtype='stepfilled', alpha=0.2)
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
"""
_doc_default_locscale = """\
The probability density above is defined in the "standardized" form. To shift
and/or scale the distribution use the ``loc`` and ``scale`` parameters.
Specifically, ``%(name)s.pdf(x, %(shapes)s, loc, scale)`` is identically
equivalent to ``%(name)s.pdf(y, %(shapes)s) / scale`` with
``y = (x - loc) / scale``.
"""
_doc_default = ''.join([_doc_default_longsummary,
_doc_allmethods,
'\n',
_doc_default_example])
_doc_default_before_notes = ''.join([_doc_default_longsummary,
_doc_allmethods])
docdict = {
'rvs': _doc_rvs,
'pdf': _doc_pdf,
'logpdf': _doc_logpdf,
'cdf': _doc_cdf,
'logcdf': _doc_logcdf,
'sf': _doc_sf,
'logsf': _doc_logsf,
'ppf': _doc_ppf,
'isf': _doc_isf,
'stats': _doc_stats,
'entropy': _doc_entropy,
'fit': _doc_fit,
'moment': _doc_moment,
'expect': _doc_expect,
'interval': _doc_interval,
'mean': _doc_mean,
'std': _doc_std,
'var': _doc_var,
'median': _doc_median,
'allmethods': _doc_allmethods,
'longsummary': _doc_default_longsummary,
'frozennote': _doc_default_frozen_note,
'example': _doc_default_example,
'default': _doc_default,
'before_notes': _doc_default_before_notes,
'after_notes': _doc_default_locscale
}
# Reuse common content between continuous and discrete docs, change some
# minor bits.
docdict_discrete = docdict.copy()
docdict_discrete['pmf'] = _doc_pmf
docdict_discrete['logpmf'] = _doc_logpmf
docdict_discrete['expect'] = _doc_expect_discrete
_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf',
'ppf', 'isf', 'stats', 'entropy', 'expect', 'median',
'mean', 'var', 'std', 'interval']
for obj in _doc_disc_methods:
docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '')
_doc_disc_methods_err_varname = ['cdf', 'logcdf', 'sf', 'logsf']
for obj in _doc_disc_methods_err_varname:
docdict_discrete[obj] = docdict_discrete[obj].replace('(x, ', '(k, ')
docdict_discrete.pop('pdf')
docdict_discrete.pop('logpdf')
_doc_allmethods = ''.join([docdict_discrete[obj] for obj in _doc_disc_methods])
docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods
docdict_discrete['longsummary'] = _doc_default_longsummary.replace(
'rv_continuous', 'rv_discrete')
_doc_default_frozen_note = """
Alternatively, the object may be called (as a function) to fix the shape and
location parameters returning a "frozen" discrete RV object:
rv = %(name)s(%(shapes)s, loc=0)
- Frozen RV object with the same methods but holding the given shape and
location fixed.
"""
docdict_discrete['frozennote'] = _doc_default_frozen_note
_doc_default_discrete_example = """\
Examples
--------
>>> from scipy.stats import %(name)s
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
Calculate a few first moments:
%(set_vals_stmt)s
>>> mean, var, skew, kurt = %(name)s.stats(%(shapes)s, moments='mvsk')
Display the probability mass function (``pmf``):
>>> x = np.arange(%(name)s.ppf(0.01, %(shapes)s),
... %(name)s.ppf(0.99, %(shapes)s))
>>> ax.plot(x, %(name)s.pmf(x, %(shapes)s), 'bo', ms=8, label='%(name)s pmf')
>>> ax.vlines(x, 0, %(name)s.pmf(x, %(shapes)s), colors='b', lw=5, alpha=0.5)
Alternatively, the distribution object can be called (as a function)
to fix the shape and location. This returns a "frozen" RV object holding
the given parameters fixed.
Freeze the distribution and display the frozen ``pmf``:
>>> rv = %(name)s(%(shapes)s)
>>> ax.vlines(x, 0, rv.pmf(x), colors='k', linestyles='-', lw=1,
... label='frozen pmf')
>>> ax.legend(loc='best', frameon=False)
>>> plt.show()
Check accuracy of ``cdf`` and ``ppf``:
>>> prob = %(name)s.cdf(x, %(shapes)s)
>>> np.allclose(x, %(name)s.ppf(prob, %(shapes)s))
True
Generate random numbers:
>>> r = %(name)s.rvs(%(shapes)s, size=1000)
"""
_doc_default_discrete_locscale = """\
The probability mass function above is defined in the "standardized" form.
To shift distribution use the ``loc`` parameter.
Specifically, ``%(name)s.pmf(k, %(shapes)s, loc)`` is identically
equivalent to ``%(name)s.pmf(k - loc, %(shapes)s)``.
"""
docdict_discrete['example'] = _doc_default_discrete_example
docdict_discrete['after_notes'] = _doc_default_discrete_locscale
_doc_default_before_notes = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods']])
docdict_discrete['before_notes'] = _doc_default_before_notes
_doc_default_disc = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods'],
docdict_discrete['frozennote'],
docdict_discrete['example']])
docdict_discrete['default'] = _doc_default_disc
# clean up all the separate docstring elements, we do not need them anymore
for obj in [s for s in dir() if s.startswith('_doc_')]:
exec('del ' + obj)
del obj
try:
del s
except NameError:
# in Python 3, loop variables are not visible after the loop
pass
def _moment(data, n, mu=None):
if mu is None:
mu = data.mean()
return ((data - mu)**n).mean()
def _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args):
if (n == 0):
return 1.0
elif (n == 1):
if mu is None:
val = moment_func(1, *args)
else:
val = mu
elif (n == 2):
if mu2 is None or mu is None:
val = moment_func(2, *args)
else:
val = mu2 + mu*mu
elif (n == 3):
if g1 is None or mu2 is None or mu is None:
val = moment_func(3, *args)
else:
mu3 = g1 * np.power(mu2, 1.5) # 3rd central moment
val = mu3+3*mu*mu2+mu*mu*mu # 3rd non-central moment
elif (n == 4):
if g1 is None or g2 is None or mu2 is None or mu is None:
val = moment_func(4, *args)
else:
mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment
mu3 = g1*np.power(mu2, 1.5) # 3rd central moment
val = mu4+4*mu*mu3+6*mu*mu*mu2+mu*mu*mu*mu
else:
val = moment_func(n, *args)
return val
def _skew(data):
"""
skew is third central moment / variance**(1.5)
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m3 = ((data - mu)**3).mean()
return m3 / np.power(m2, 1.5)
def _kurtosis(data):
"""
kurtosis is fourth central moment / variance**2 - 3
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m4 = ((data - mu)**4).mean()
return m4 / m2**2 - 3
# Frozen RV class
class rv_frozen(object):
def __init__(self, dist, *args, **kwds):
self.args = args
self.kwds = kwds
# create a new instance
self.dist = dist.__class__(**dist._updated_ctor_param())
# a, b may be set in _argcheck, depending on *args, **kwds. Ouch.
shapes, _, _ = self.dist._parse_args(*args, **kwds)
self.dist._argcheck(*shapes)
self.a, self.b = self.dist.a, self.dist.b
@property
def random_state(self):
return self.dist._random_state
@random_state.setter
def random_state(self, seed):
self.dist._random_state = check_random_state(seed)
def pdf(self, x): # raises AttributeError in frozen discrete distribution
return self.dist.pdf(x, *self.args, **self.kwds)
def logpdf(self, x):
return self.dist.logpdf(x, *self.args, **self.kwds)
def cdf(self, x):
return self.dist.cdf(x, *self.args, **self.kwds)
def logcdf(self, x):
return self.dist.logcdf(x, *self.args, **self.kwds)
def ppf(self, q):
return self.dist.ppf(q, *self.args, **self.kwds)
def isf(self, q):
return self.dist.isf(q, *self.args, **self.kwds)
def rvs(self, size=None, random_state=None):
kwds = self.kwds.copy()
kwds.update({'size': size, 'random_state': random_state})
return self.dist.rvs(*self.args, **kwds)
def sf(self, x):
return self.dist.sf(x, *self.args, **self.kwds)
def logsf(self, x):
return self.dist.logsf(x, *self.args, **self.kwds)
def stats(self, moments='mv'):
kwds = self.kwds.copy()
kwds.update({'moments': moments})
return self.dist.stats(*self.args, **kwds)
def median(self):
return self.dist.median(*self.args, **self.kwds)
def mean(self):
return self.dist.mean(*self.args, **self.kwds)
def var(self):
return self.dist.var(*self.args, **self.kwds)
def std(self):
return self.dist.std(*self.args, **self.kwds)
def moment(self, n):
return self.dist.moment(n, *self.args, **self.kwds)
def entropy(self):
return self.dist.entropy(*self.args, **self.kwds)
def pmf(self, k):
return self.dist.pmf(k, *self.args, **self.kwds)
def logpmf(self, k):
return self.dist.logpmf(k, *self.args, **self.kwds)
def interval(self, alpha):
return self.dist.interval(alpha, *self.args, **self.kwds)
def expect(self, func=None, lb=None, ub=None, conditional=False, **kwds):
# expect method only accepts shape parameters as positional args
# hence convert self.args, self.kwds, also loc/scale
# See the .expect method docstrings for the meaning of
# other parameters.
a, loc, scale = self.dist._parse_args(*self.args, **self.kwds)
if isinstance(self.dist, rv_discrete):
return self.dist.expect(func, a, loc, lb, ub, conditional, **kwds)
else:
return self.dist.expect(func, a, loc, scale, lb, ub,
conditional, **kwds)
# This should be rewritten
def argsreduce(cond, *args):
"""Return the sequence of ravel(args[i]) where ravel(condition) is
True in 1D.
Examples
--------
>>> import numpy as np
>>> rand = np.random.random_sample
>>> A = rand((4, 5))
>>> B = 2
>>> C = rand((1, 5))
>>> cond = np.ones(A.shape)
>>> [A1, B1, C1] = argsreduce(cond, A, B, C)
>>> B1.shape
(20,)
>>> cond[2,:] = 0
>>> [A2, B2, C2] = argsreduce(cond, A, B, C)
>>> B2.shape
(15,)
"""
newargs = np.atleast_1d(*args)
if not isinstance(newargs, list):
newargs = [newargs, ]
expand_arr = (cond == cond)
return [np.extract(cond, arr1 * expand_arr) for arr1 in newargs]
parse_arg_template = """
def _parse_args(self, %(shape_arg_str)s %(locscale_in)s):
return (%(shape_arg_str)s), %(locscale_out)s
def _parse_args_rvs(self, %(shape_arg_str)s %(locscale_in)s, size=None):
return self._argcheck_rvs(%(shape_arg_str)s %(locscale_out)s, size=size)
def _parse_args_stats(self, %(shape_arg_str)s %(locscale_in)s, moments='mv'):
return (%(shape_arg_str)s), %(locscale_out)s, moments
"""
# Both the continuous and discrete distributions depend on ncx2.
# I think the function name ncx2 is an abbreviation for noncentral chi squared.
def _ncx2_log_pdf(x, df, nc):
# We use (xs**2 + ns**2)/2 = (xs - ns)**2/2 + xs*ns, and include the factor
# of exp(-xs*ns) into the ive function to improve numerical stability
# at large values of xs. See also `rice.pdf`.
df2 = df/2.0 - 1.0
xs, ns = np.sqrt(x), np.sqrt(nc)
res = xlogy(df2/2.0, x/nc) - 0.5*(xs - ns)**2
res += np.log(ive(df2, xs*ns) / 2.0)
return res
def _ncx2_pdf(x, df, nc):
return np.exp(_ncx2_log_pdf(x, df, nc))
def _ncx2_cdf(x, df, nc):
return chndtr(x, df, nc)
class rv_generic(object):
"""Class which encapsulates common functionality between rv_discrete
and rv_continuous.
"""
def __init__(self, seed=None):
super(rv_generic, self).__init__()
# figure out if _stats signature has 'moments' keyword
sign = _getargspec(self._stats)
self._stats_has_moments = ((sign[2] is not None) or
('moments' in sign[0]))
self._random_state = check_random_state(seed)
@property
def random_state(self):
""" Get or set the RandomState object for generating random variates.
This can be either None or an existing RandomState object.
If None (or np.random), use the RandomState singleton used by np.random.
If already a RandomState instance, use it.
If an int, use a new RandomState instance seeded with seed.
"""
return self._random_state
@random_state.setter
def random_state(self, seed):
self._random_state = check_random_state(seed)
def __getstate__(self):
return self._updated_ctor_param(), self._random_state
def __setstate__(self, state):
ctor_param, r = state
self.__init__(**ctor_param)
self._random_state = r
return self
def _construct_argparser(
self, meths_to_inspect, locscale_in, locscale_out):
"""Construct the parser for the shape arguments.
Generates the argument-parsing functions dynamically and attaches
them to the instance.
Is supposed to be called in __init__ of a class for each distribution.
If self.shapes is a non-empty string, interprets it as a
comma-separated list of shape parameters.
Otherwise inspects the call signatures of `meths_to_inspect`
and constructs the argument-parsing functions from these.
In this case also sets `shapes` and `numargs`.
"""
if self.shapes:
# sanitize the user-supplied shapes
if not isinstance(self.shapes, string_types):
raise TypeError('shapes must be a string.')
shapes = self.shapes.replace(',', ' ').split()
for field in shapes:
if keyword.iskeyword(field):
raise SyntaxError('keywords cannot be used as shapes.')
if not re.match('^[_a-zA-Z][_a-zA-Z0-9]*$', field):
raise SyntaxError(
'shapes must be valid python identifiers')
else:
# find out the call signatures (_pdf, _cdf etc), deduce shape
# arguments. Generic methods only have 'self, x', any further args
# are shapes.
shapes_list = []
for meth in meths_to_inspect:
shapes_args = _getargspec(meth) # NB: does not contain self
args = shapes_args.args[1:] # peel off 'x', too
if args:
shapes_list.append(args)
# *args or **kwargs are not allowed w/automatic shapes
if shapes_args.varargs is not None:
raise TypeError(
'*args are not allowed w/out explicit shapes')
if shapes_args.keywords is not None:
raise TypeError(
'**kwds are not allowed w/out explicit shapes')
if shapes_args.defaults is not None:
raise TypeError('defaults are not allowed for shapes')
if shapes_list:
shapes = shapes_list[0]
# make sure the signatures are consistent
for item in shapes_list:
if item != shapes:
raise TypeError('Shape arguments are inconsistent.')
else:
shapes = []
# have the arguments, construct the method from template
shapes_str = ', '.join(shapes) + ', ' if shapes else '' # NB: not None
dct = dict(shape_arg_str=shapes_str,
locscale_in=locscale_in,
locscale_out=locscale_out,
)
ns = {}
exec_(parse_arg_template % dct, ns)
# NB: attach to the instance, not class
for name in ['_parse_args', '_parse_args_stats', '_parse_args_rvs']:
setattr(self, name,
instancemethod(ns[name], self, self.__class__)
)
self.shapes = ', '.join(shapes) if shapes else None
if not hasattr(self, 'numargs'):
# allows more general subclassing with *args
self.numargs = len(shapes)
def _construct_doc(self, docdict, shapes_vals=None):
"""Construct the instance docstring with string substitutions."""
tempdict = docdict.copy()
tempdict['name'] = self.name or 'distname'
tempdict['shapes'] = self.shapes or ''
if shapes_vals is None:
shapes_vals = ()
vals = ', '.join('%.3g' % val for val in shapes_vals)
tempdict['vals'] = vals
tempdict['shapes_'] = self.shapes or ''
if self.shapes and self.numargs == 1:
tempdict['shapes_'] += ','
if self.shapes:
tempdict['set_vals_stmt'] = '>>> %s = %s' % (self.shapes, vals)
else:
tempdict['set_vals_stmt'] = ''
if self.shapes is None:
# remove shapes from call parameters if there are none
for item in ['default', 'before_notes']:
tempdict[item] = tempdict[item].replace(
"\n%(shapes)s : array_like\n shape parameters", "")
for i in range(2):
if self.shapes is None:
# necessary because we use %(shapes)s in two forms (w w/o ", ")
self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
self.__doc__ = doccer.docformat(self.__doc__, tempdict)
# correct for empty shapes
self.__doc__ = self.__doc__.replace('(, ', '(').replace(', )', ')')
def _construct_default_doc(self, longname=None, extradoc=None,
docdict=None, discrete='continuous'):
"""Construct instance docstring from the default template."""
if longname is None:
longname = 'A'
if extradoc is None:
extradoc = ''
if extradoc.startswith('\n\n'):
extradoc = extradoc[2:]
self.__doc__ = ''.join(['%s %s random variable.' % (longname, discrete),
'\n\n%(before_notes)s\n', docheaders['notes'],
extradoc, '\n%(example)s'])
self._construct_doc(docdict)
def freeze(self, *args, **kwds):
"""Freeze the distribution for the given arguments.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution. Should include all
the non-optional arguments, may include ``loc`` and ``scale``.
Returns
-------
rv_frozen : rv_frozen instance
The frozen distribution.
"""
return rv_frozen(self, *args, **kwds)
def __call__(self, *args, **kwds):
return self.freeze(*args, **kwds)
__call__.__doc__ = freeze.__doc__
# The actual calculation functions (no basic checking need be done)
# If these are defined, the others won't be looked at.
# Otherwise, the other set can be defined.
def _stats(self, *args, **kwds):
return None, None, None, None
# Central moments
def _munp(self, n, *args):
# Silence floating point warnings from integration.
olderr = np.seterr(all='ignore')
vals = self.generic_moment(n, *args)
np.seterr(**olderr)
return vals
def _argcheck_rvs(self, *args, **kwargs):
# Handle broadcasting and size validation of the rvs method.
# Subclasses should not have to override this method.
# The rule is that if `size` is not None, then `size` gives the
# shape of the result (integer values of `size` are treated as
# tuples with length 1; i.e. `size=3` is the same as `size=(3,)`.)
#
# `args` is expected to contain the shape parameters (if any), the
# location and the scale in a flat tuple (e.g. if there are two
# shape parameters `a` and `b`, `args` will be `(a, b, loc, scale)`).
# The only keyword argument expected is 'size'.
size = kwargs.get('size', None)
all_bcast = np.broadcast_arrays(*args)
def squeeze_left(a):
while a.ndim > 0 and a.shape[0] == 1:
a = a[0]
return a
# Eliminate trivial leading dimensions. In the convention
# used by numpy's random variate generators, trivial leading
# dimensions are effectively ignored. In other words, when `size`
# is given, trivial leading dimensions of the broadcast parameters
# in excess of the number of dimensions in size are ignored, e.g.
# >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]], size=3)
# array([ 1.00104267, 3.00422496, 4.99799278])
# If `size` is not given, the exact broadcast shape is preserved:
# >>> np.random.normal([[1, 3, 5]], [[[[0.01]]]])
# array([[[[ 1.00862899, 3.00061431, 4.99867122]]]])
#
all_bcast = [squeeze_left(a) for a in all_bcast]
bcast_shape = all_bcast[0].shape
bcast_ndim = all_bcast[0].ndim
if size is None:
size_ = bcast_shape
else:
size_ = tuple(np.atleast_1d(size))
# Check compatibility of size_ with the broadcast shape of all
# the parameters. This check is intended to be consistent with
# how the numpy random variate generators (e.g. np.random.normal,
# np.random.beta) handle their arguments. The rule is that, if size
# is given, it determines the shape of the output. Broadcasting
# can't change the output size.
# This is the standard broadcasting convention of extending the
# shape with fewer dimensions with enough dimensions of length 1
# so that the two shapes have the same number of dimensions.
ndiff = bcast_ndim - len(size_)
if ndiff < 0:
bcast_shape = (1,)*(-ndiff) + bcast_shape
elif ndiff > 0:
size_ = (1,)*ndiff + size_
# This compatibility test is not standard. In "regular" broadcasting,
# two shapes are compatible if for each dimension, the lengths are the
# same or one of the lengths is 1. Here, the length of a dimension in
# size_ must not be less than the corresponding length in bcast_shape.
ok = all([bcdim == 1 or bcdim == szdim
for (bcdim, szdim) in zip(bcast_shape, size_)])
if not ok:
raise ValueError("size does not match the broadcast shape of "
"the parameters.")
param_bcast = all_bcast[:-2]
loc_bcast = all_bcast[-2]
scale_bcast = all_bcast[-1]
return param_bcast, loc_bcast, scale_bcast, size_
## These are the methods you must define (standard form functions)
## NB: generic _pdf, _logpdf, _cdf are different for
## rv_continuous and rv_discrete hence are defined in there
def _argcheck(self, *args):
"""Default check for correct values on args and keywords.
Returns condition array of 1's where arguments are correct and
0's where they are not.
"""
cond = 1
for arg in args:
cond = logical_and(cond, (asarray(arg) > 0))
return cond
def _support_mask(self, x):
return (self.a <= x) & (x <= self.b)
def _open_support_mask(self, x):
return (self.a < x) & (x < self.b)
def _rvs(self, *args):
# This method must handle self._size being a tuple, and it must
# properly broadcast *args and self._size. self._size might be
# an empty tuple, which means a scalar random variate is to be
# generated.
## Use basic inverse cdf algorithm for RV generation as default.
U = self._random_state.random_sample(self._size)
Y = self._ppf(U, *args)
return Y
def _logcdf(self, x, *args):
return log(self._cdf(x, *args))
def _sf(self, x, *args):
return 1.0-self._cdf(x, *args)
def _logsf(self, x, *args):
return log(self._sf(x, *args))
def _ppf(self, q, *args):
return self._ppfvec(q, *args)
def _isf(self, q, *args):
return self._ppf(1.0-q, *args) # use correct _ppf for subclasses
# These are actually called, and should not be overwritten if you
# want to keep error checking.
def rvs(self, *args, **kwds):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional
Scale parameter (default=1).
size : int or tuple of ints, optional
Defining number of random variates (default is 1).
random_state : None or int or ``np.random.RandomState`` instance, optional
If int or RandomState, use it for drawing the random variates.
If None, rely on ``self.random_state``.
Default is None.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
discrete = kwds.pop('discrete', None)
rndm = kwds.pop('random_state', None)
args, loc, scale, size = self._parse_args_rvs(*args, **kwds)
cond = logical_and(self._argcheck(*args), (scale >= 0))
if not np.all(cond):
raise ValueError("Domain error in arguments.")
if np.all(scale == 0):
return loc*ones(size, 'd')
# extra gymnastics needed for a custom random_state
if rndm is not None:
random_state_saved = self._random_state
self._random_state = check_random_state(rndm)
# `size` should just be an argument to _rvs(), but for, um,
# historical reasons, it is made an attribute that is read
# by _rvs().
self._size = size
vals = self._rvs(*args)
vals = vals * scale + loc
# do not forget to restore the _random_state
if rndm is not None:
self._random_state = random_state_saved
# Cast to int if discrete
if discrete:
if size == ():
vals = int(vals)
else:
vals = vals.astype(int)
return vals
def stats(self, *args, **kwds):
"""
Some statistics of the given RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional (continuous RVs only)
scale parameter (default=1)
moments : str, optional
composed of letters ['mvsk'] defining which moments to compute:
'm' = mean,
'v' = variance,
's' = (Fisher's) skew,
'k' = (Fisher's) kurtosis.
(default is 'mv')
Returns
-------
stats : sequence
of requested moments.
"""
args, loc, scale, moments = self._parse_args_stats(*args, **kwds)
# scale = 1 by construction for discrete RVs
loc, scale = map(asarray, (loc, scale))
args = tuple(map(asarray, args))
cond = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = []
default = valarray(shape(cond), self.badvalue)
# Use only entries that are valid in calculation
if np.any(cond):
goodargs = argsreduce(cond, *(args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
if self._stats_has_moments:
mu, mu2, g1, g2 = self._stats(*goodargs,
**{'moments': moments})
else:
mu, mu2, g1, g2 = self._stats(*goodargs)
if g1 is None:
mu3 = None
else:
if mu2 is None:
mu2 = self._munp(2, *goodargs)
if g2 is None:
# (mu2**1.5) breaks down for nan and inf
mu3 = g1 * np.power(mu2, 1.5)
if 'm' in moments:
if mu is None:
mu = self._munp(1, *goodargs)
out0 = default.copy()
place(out0, cond, mu * scale + loc)
output.append(out0)
if 'v' in moments:
if mu2 is None:
mu2p = self._munp(2, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
mu2 = mu2p - mu * mu
if np.isinf(mu):
# if mean is inf then var is also inf
mu2 = np.inf
out0 = default.copy()
place(out0, cond, mu2 * scale * scale)
output.append(out0)
if 's' in moments:
if g1 is None:
mu3p = self._munp(3, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
mu3 = mu3p - 3 * mu * mu2 - mu**3
g1 = mu3 / np.power(mu2, 1.5)
out0 = default.copy()
place(out0, cond, g1)
output.append(out0)
if 'k' in moments:
if g2 is None:
mu4p = self._munp(4, *goodargs)
if mu is None:
mu = self._munp(1, *goodargs)
if mu2 is None:
mu2p = self._munp(2, *goodargs)
mu2 = mu2p - mu * mu
if mu3 is None:
mu3p = self._munp(3, *goodargs)
mu3 = mu3p - 3 * mu * mu2 - mu**3
mu4 = mu4p - 4 * mu * mu3 - 6 * mu * mu * mu2 - mu**4
g2 = mu4 / mu2**2.0 - 3.0
out0 = default.copy()
place(out0, cond, g2)
output.append(out0)
else: # no valid args
output = []
for _ in moments:
out0 = default.copy()
output.append(out0)
if len(output) == 1:
return output[0]
else:
return tuple(output)
def entropy(self, *args, **kwds):
"""
Differential entropy of the RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional (continuous distributions only).
Scale parameter (default=1).
Notes
-----
Entropy is defined base `e`:
>>> drv = rv_discrete(values=((0, 1), (0.5, 0.5)))
>>> np.allclose(drv.entropy(), np.log(2.0))
True
"""
args, loc, scale = self._parse_args(*args, **kwds)
# NB: for discrete distributions scale=1 by construction in _parse_args
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
output = zeros(shape(cond0), 'd')
place(output, (1-cond0), self.badvalue)
goodargs = argsreduce(cond0, *args)
place(output, cond0, self.vecentropy(*goodargs) + log(scale))
return output
def moment(self, n, *args, **kwds):
"""
n-th order non-central moment of distribution.
Parameters
----------
n : int, n >= 1
Order of moment.
arg1, arg2, arg3,... : float
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
"""
args, loc, scale = self._parse_args(*args, **kwds)
if not (self._argcheck(*args) and (scale > 0)):
return nan
if (floor(n) != n):
raise ValueError("Moment must be an integer.")
if (n < 0):
raise ValueError("Moment must be positive.")
mu, mu2, g1, g2 = None, None, None, None
if (n > 0) and (n < 5):
if self._stats_has_moments:
mdict = {'moments': {1: 'm', 2: 'v', 3: 'vs', 4: 'vk'}[n]}
else:
mdict = {}
mu, mu2, g1, g2 = self._stats(*args, **mdict)
val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args)
# Convert to transformed X = L + S*Y
# E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n, k)*(S/L)^k E[Y^k], k=0...n)
if loc == 0:
return scale**n * val
else:
result = 0
fac = float(scale) / float(loc)
for k in range(n):
valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args)
result += comb(n, k, exact=True)*(fac**k) * valk
result += fac**n * val
return result * loc**n
def median(self, *args, **kwds):
"""
Median of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter, Default is 0.
scale : array_like, optional
Scale parameter, Default is 1.
Returns
-------
median : float
The median of the distribution.
See Also
--------
stats.distributions.rv_discrete.ppf
Inverse of the CDF
"""
return self.ppf(0.5, *args, **kwds)
def mean(self, *args, **kwds):
"""
Mean of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
mean : float
the mean of the distribution
"""
kwds['moments'] = 'm'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def var(self, *args, **kwds):
"""
Variance of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
var : float
the variance of the distribution
"""
kwds['moments'] = 'v'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def std(self, *args, **kwds):
"""
Standard deviation of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
std : float
standard deviation of the distribution
"""
kwds['moments'] = 'v'
res = sqrt(self.stats(*args, **kwds))
return res
def interval(self, alpha, *args, **kwds):
"""
Confidence interval with equal areas around the median.
Parameters
----------
alpha : array_like of float
Probability that an rv will be drawn from the returned range.
Each value should be in the range [0, 1].
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
location parameter, Default is 0.
scale : array_like, optional
scale parameter, Default is 1.
Returns
-------
a, b : ndarray of float
end-points of range that contain ``100 * alpha %`` of the rv's
possible values.
"""
alpha = asarray(alpha)
if np.any((alpha > 1) | (alpha < 0)):
raise ValueError("alpha must be between 0 and 1 inclusive")
q1 = (1.0-alpha)/2
q2 = (1.0+alpha)/2
a = self.ppf(q1, *args, **kwds)
b = self.ppf(q2, *args, **kwds)
return a, b
## continuous random variables: implement maybe later
##
## hf --- Hazard Function (PDF / SF)
## chf --- Cumulative hazard function (-log(SF))
## psf --- Probability sparsity function (reciprocal of the pdf) in
## units of percent-point-function (as a function of q).
## Also, the derivative of the percent-point function.
class rv_continuous(rv_generic):
"""
A generic continuous random variable class meant for subclassing.
`rv_continuous` is a base class to construct specific distribution classes
and instances for continuous random variables. It cannot be used
directly as a distribution.
Parameters
----------
momtype : int, optional
The type of generic moment calculation to use: 0 for pdf, 1 (default)
for ppf.
a : float, optional
Lower bound of the support of the distribution, default is minus
infinity.
b : float, optional
Upper bound of the support of the distribution, default is plus
infinity.
xtol : float, optional
The tolerance for fixed point calculation for generic ppf.
badvalue : float, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example ``"m, n"`` for a
distribution that takes two integers as the two shape arguments for all
its methods. If not provided, shape parameters will be inferred from
the signature of the private methods, ``_pdf`` and ``_cdf`` of the
instance.
extradoc : str, optional, deprecated
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
seed : None or int or ``numpy.random.RandomState`` instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None (or np.random), the global np.random state is used.
If integer, it is used to seed the local RandomState instance.
Default is None.
Methods
-------
rvs
pdf
logpdf
cdf
logcdf
sf
logsf
ppf
isf
moment
stats
entropy
expect
median
mean
std
var
interval
__call__
fit
fit_loc_scale
nnlf
Notes
-----
Public methods of an instance of a distribution class (e.g., ``pdf``,
``cdf``) check their arguments and pass valid arguments to private,
computational methods (``_pdf``, ``_cdf``). For ``pdf(x)``, ``x`` is valid
if it is within the support of a distribution, ``self.a <= x <= self.b``.
Whether a shape parameter is valid is decided by an ``_argcheck`` method
(which defaults to checking that its arguments are strictly positive.)
**Subclassing**
New random variables can be defined by subclassing the `rv_continuous` class
and re-defining at least the ``_pdf`` or the ``_cdf`` method (normalized
to location 0 and scale 1).
If positive argument checking is not correct for your RV
then you will also need to re-define the ``_argcheck`` method.
Correct, but potentially slow defaults exist for the remaining
methods but for speed and/or accuracy you can over-ride::
_logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf
Rarely would you override ``_isf``, ``_sf`` or ``_logsf``, but you could.
**Methods that can be overwritten by subclasses**
::
_rvs
_pdf
_cdf
_sf
_ppf
_isf
_stats
_munp
_entropy
_argcheck
There are additional (internal and private) generic methods that can
be useful for cross-checking and for debugging, but might work in all
cases when directly called.
A note on ``shapes``: subclasses need not specify them explicitly. In this
case, `shapes` will be automatically deduced from the signatures of the
overridden methods (`pdf`, `cdf` etc).
If, for some reason, you prefer to avoid relying on introspection, you can
specify ``shapes`` explicitly as an argument to the instance constructor.
**Frozen Distributions**
Normally, you must provide shape parameters (and, optionally, location and
scale parameters to each call of a method of a distribution.
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = generic(<shape(s)>, loc=0, scale=1)
frozen RV object with the same methods but holding the given shape,
location, and scale fixed
**Statistics**
Statistics are computed using numerical integration by default.
For speed you can redefine this using ``_stats``:
- take shape parameters and return mu, mu2, g1, g2
- If you can't compute one of these, return it as None
- Can also be defined with a keyword argument ``moments``, which is a
string composed of "m", "v", "s", and/or "k".
Only the components appearing in string should be computed and
returned in the order "m", "v", "s", or "k" with missing values
returned as None.
Alternatively, you can override ``_munp``, which takes ``n`` and shape
parameters and returns the n-th non-central moment of the distribution.
Examples
--------
To create a new Gaussian distribution, we would do the following:
>>> from scipy.stats import rv_continuous
>>> class gaussian_gen(rv_continuous):
... "Gaussian distribution"
... def _pdf(self, x):
... return np.exp(-x**2 / 2.) / np.sqrt(2.0 * np.pi)
>>> gaussian = gaussian_gen(name='gaussian')
``scipy.stats`` distributions are *instances*, so here we subclass
`rv_continuous` and create an instance. With this, we now have
a fully functional distribution with all relevant methods automagically
generated by the framework.
Note that above we defined a standard normal distribution, with zero mean
and unit variance. Shifting and scaling of the distribution can be done
by using ``loc`` and ``scale`` parameters: ``gaussian.pdf(x, loc, scale)``
essentially computes ``y = (x - loc) / scale`` and
``gaussian._pdf(y) / scale``.
"""
def __init__(self, momtype=1, a=None, b=None, xtol=1e-14,
badvalue=None, name=None, longname=None,
shapes=None, extradoc=None, seed=None):
super(rv_continuous, self).__init__(seed)
# save the ctor parameters, cf generic freeze
self._ctor_param = dict(
momtype=momtype, a=a, b=b, xtol=xtol,
badvalue=badvalue, name=name, longname=longname,
shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
if name is None:
name = 'Distribution'
self.badvalue = badvalue
self.name = name
self.a = a
self.b = b
if a is None:
self.a = -inf
if b is None:
self.b = inf
self.xtol = xtol
self.moment_type = momtype
self.shapes = shapes
self._construct_argparser(meths_to_inspect=[self._pdf, self._cdf],
locscale_in='loc=0, scale=1',
locscale_out='loc, scale')
# nin correction
self._ppfvec = vectorize(self._ppf_single, otypes='d')
self._ppfvec.nin = self.numargs + 1
self.vecentropy = vectorize(self._entropy, otypes='d')
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self._cdfvec.nin = self.numargs + 1
self.extradoc = extradoc
if momtype == 0:
self.generic_moment = vectorize(self._mom0_sc, otypes='d')
else:
self.generic_moment = vectorize(self._mom1_sc, otypes='d')
# Because of the *args argument of _mom0_sc, vectorize cannot count the
# number of arguments correctly.
self.generic_moment.nin = self.numargs + 1
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc,
docdict=docdict,
discrete='continuous')
else:
dct = dict(distcont)
self._construct_doc(docdict, dct.get(self.name))
def _updated_ctor_param(self):
""" Return the current version of _ctor_param, possibly updated by user.
Used by freezing and pickling.
Keep this in sync with the signature of __init__.
"""
dct = self._ctor_param.copy()
dct['a'] = self.a
dct['b'] = self.b
dct['xtol'] = self.xtol
dct['badvalue'] = self.badvalue
dct['name'] = self.name
dct['shapes'] = self.shapes
dct['extradoc'] = self.extradoc
return dct
def _ppf_to_solve(self, x, q, *args):
return self.cdf(*(x, )+args)-q
def _ppf_single(self, q, *args):
left = right = None
if self.a > -np.inf:
left = self.a
if self.b < np.inf:
right = self.b
factor = 10.
if not left: # i.e. self.a = -inf
left = -1.*factor
while self._ppf_to_solve(left, q, *args) > 0.:
right = left
left *= factor
# left is now such that cdf(left) < q
if not right: # i.e. self.b = inf
right = factor
while self._ppf_to_solve(right, q, *args) < 0.:
left = right
right *= factor
# right is now such that cdf(right) > q
return optimize.brentq(self._ppf_to_solve,
left, right, args=(q,)+args, xtol=self.xtol)
# moment from definition
def _mom_integ0(self, x, m, *args):
return x**m * self.pdf(x, *args)
def _mom0_sc(self, m, *args):
return integrate.quad(self._mom_integ0, self.a, self.b,
args=(m,)+args)[0]
# moment calculated using ppf
def _mom_integ1(self, q, m, *args):
return (self.ppf(q, *args))**m
def _mom1_sc(self, m, *args):
return integrate.quad(self._mom_integ1, 0, 1, args=(m,)+args)[0]
def _pdf(self, x, *args):
return derivative(self._cdf, x, dx=1e-5, args=args, order=5)
## Could also define any of these
def _logpdf(self, x, *args):
return log(self._pdf(x, *args))
def _cdf_single(self, x, *args):
return integrate.quad(self._pdf, self.a, x, args=args)[0]
def _cdf(self, x, *args):
return self._cdfvec(x, *args)
## generic _argcheck, _logcdf, _sf, _logsf, _ppf, _isf, _rvs are defined
## in rv_generic
def pdf(self, x, *args, **kwds):
"""
Probability density function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
pdf : ndarray
Probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._support_mask(x) & (scale > 0)
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._pdf(*goodargs) / scale)
if output.ndim == 0:
return output[()]
return output
def logpdf(self, x, *args, **kwds):
"""
Log of the probability density function at x of the given RV.
This uses a more numerically accurate calculation if available.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logpdf : array_like
Log of the probability density function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._support_mask(x) & (scale > 0)
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
putmask(output, (1-cond0)+np.isnan(x), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._logpdf(*goodargs) - log(scale))
if output.ndim == 0:
return output[()]
return output
def cdf(self, x, *args, **kwds):
"""
Cumulative distribution function of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `x`
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x) & (scale > 0)
cond2 = (x >= self.b) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._cdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, x, *args, **kwds):
"""
Log of the cumulative distribution function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x) & (scale > 0)
cond2 = (x >= self.b) & cond0
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
place(output, (1-cond0)*(cond1 == cond1)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, x, *args, **kwds):
"""
Survival function (1 - `cdf`) at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
sf : array_like
Survival function evaluated at x
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x) & (scale > 0)
cond2 = cond0 & (x <= self.a)
cond = cond0 & cond1
output = zeros(shape(cond), dtyp)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._sf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logsf(self, x, *args, **kwds):
"""
Log of the survival function of the given RV.
Returns the log of the "survival function," defined as (1 - `cdf`),
evaluated at `x`.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `x`.
"""
args, loc, scale = self._parse_args(*args, **kwds)
x, loc, scale = map(asarray, (x, loc, scale))
args = tuple(map(asarray, args))
dtyp = np.find_common_type([x.dtype, np.float64], [])
x = np.asarray((x - loc)/scale, dtype=dtyp)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = self._open_support_mask(x) & (scale > 0)
cond2 = cond0 & (x <= self.a)
cond = cond0 & cond1
output = empty(shape(cond), dtyp)
output.fill(NINF)
place(output, (1-cond0)+np.isnan(x), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""
Percent point function (inverse of `cdf`) at q of the given RV.
Parameters
----------
q : array_like
lower tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : array_like
quantile corresponding to the lower tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 0)
cond3 = cond0 & (q == 1)
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue)
lower_bound = self.a * scale + loc
upper_bound = self.b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if np.any(cond): # call only if at least 1 entry
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._ppf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""
Inverse survival function (inverse of `sf`) at q of the given RV.
Parameters
----------
q : array_like
upper tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : ndarray or scalar
Quantile corresponding to the upper tail probability q.
"""
args, loc, scale = self._parse_args(*args, **kwds)
q, loc, scale = map(asarray, (q, loc, scale))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc == loc)
cond1 = (0 < q) & (q < 1)
cond2 = cond0 & (q == 1)
cond3 = cond0 & (q == 0)
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue)
lower_bound = self.a * scale + loc
upper_bound = self.b * scale + loc
place(output, cond2, argsreduce(cond2, lower_bound)[0])
place(output, cond3, argsreduce(cond3, upper_bound)[0])
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(scale, loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output, cond, self._isf(*goodargs) * scale + loc)
if output.ndim == 0:
return output[()]
return output
def _nnlf(self, x, *args):
return -np.sum(self._logpdf(x, *args), axis=0)
def _unpack_loc_scale(self, theta):
try:
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError:
raise ValueError("Not enough input arguments.")
return loc, scale, args
def nnlf(self, theta, x):
'''Return negative loglikelihood function.
Notes
-----
This is ``-sum(log pdf(x, theta), axis=0)`` where `theta` are the
parameters (including loc and scale).
'''
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
n_log_scale = len(x) * log(scale)
if np.any(~self._support_mask(x)):
return inf
return self._nnlf(x, *args) + n_log_scale
def _nnlf_and_penalty(self, x, args):
cond0 = ~self._support_mask(x)
n_bad = np.count_nonzero(cond0, axis=0)
if n_bad > 0:
x = argsreduce(~cond0, x)[0]
logpdf = self._logpdf(x, *args)
finite_logpdf = np.isfinite(logpdf)
n_bad += np.sum(~finite_logpdf, axis=0)
if n_bad > 0:
penalty = n_bad * log(_XMAX) * 100
return -np.sum(logpdf[finite_logpdf], axis=0) + penalty
return -np.sum(logpdf, axis=0)
def _penalized_nnlf(self, theta, x):
''' Return penalized negative loglikelihood function,
i.e., - sum (log pdf(x, theta), axis=0) + penalty
where theta are the parameters (including loc and scale)
'''
loc, scale, args = self._unpack_loc_scale(theta)
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
n_log_scale = len(x) * log(scale)
return self._nnlf_and_penalty(x, args) + n_log_scale
# return starting point for fit (shape arguments + loc + scale)
def _fitstart(self, data, args=None):
if args is None:
args = (1.0,)*self.numargs
loc, scale = self._fit_loc_scale_support(data, *args)
return args + (loc, scale)
# Return the (possibly reduced) function to optimize in order to find MLE
# estimates for the .fit method
def _reduce_func(self, args, kwds):
# First of all, convert fshapes params to fnum: eg for stats.beta,
# shapes='a, b'. To fix `a`, can specify either `f1` or `fa`.
# Convert the latter into the former.
if self.shapes:
shapes = self.shapes.replace(',', ' ').split()
for j, s in enumerate(shapes):
val = kwds.pop('f' + s, None) or kwds.pop('fix_' + s, None)
if val is not None:
key = 'f%d' % j
if key in kwds:
raise ValueError("Duplicate entry for %s." % key)
else:
kwds[key] = val
args = list(args)
Nargs = len(args)
fixedn = []
names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale']
x0 = []
for n, key in enumerate(names):
if key in kwds:
fixedn.append(n)
args[n] = kwds.pop(key)
else:
x0.append(args[n])
if len(fixedn) == 0:
func = self._penalized_nnlf
restore = None
else:
if len(fixedn) == Nargs:
raise ValueError(
"All parameters fixed. There is nothing to optimize.")
def restore(args, theta):
# Replace with theta for all numbers not in fixedn
# This allows the non-fixed values to vary, but
# we still call self.nnlf with all parameters.
i = 0
for n in range(Nargs):
if n not in fixedn:
args[n] = theta[i]
i += 1
return args
def func(theta, x):
newtheta = restore(args[:], theta)
return self._penalized_nnlf(newtheta, x)
return x0, func, restore, args
def fit(self, data, *args, **kwds):
"""
Return MLEs for shape (if applicable), location, and scale
parameters from data.
MLE stands for Maximum Likelihood Estimate. Starting estimates for
the fit are given by input arguments; for any arguments not provided
with starting estimates, ``self._fitstart(data)`` is called to generate
such.
One can hold some parameters fixed to specific values by passing in
keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters)
and ``floc`` and ``fscale`` (for location and scale parameters,
respectively).
Parameters
----------
data : array_like
Data to use in calculating the MLEs.
args : floats, optional
Starting value(s) for any shape-characterizing arguments (those not
provided will be determined by a call to ``_fitstart(data)``).
No default value.
kwds : floats, optional
Starting values for the location and scale parameters; no default.
Special keyword arguments are recognized as holding certain
parameters fixed:
- f0...fn : hold respective shape parameters fixed.
Alternatively, shape parameters to fix can be specified by name.
For example, if ``self.shapes == "a, b"``, ``fa``and ``fix_a``
are equivalent to ``f0``, and ``fb`` and ``fix_b`` are
equivalent to ``f1``.
- floc : hold location parameter fixed to specified value.
- fscale : hold scale parameter fixed to specified value.
- optimizer : The optimizer to use. The optimizer must take ``func``,
and starting position as the first two arguments,
plus ``args`` (for extra arguments to pass to the
function to be optimized) and ``disp=0`` to suppress
output as keyword arguments.
Returns
-------
mle_tuple : tuple of floats
MLEs for any shape parameters (if applicable), followed by those
for location and scale. For most random variables, shape statistics
will be returned, but there are exceptions (e.g. ``norm``).
Notes
-----
This fit is computed by maximizing a log-likelihood function, with
penalty applied for samples outside of range of the distribution. The
returned answer is not guaranteed to be the globally optimal MLE, it
may only be locally optimal, or the optimization may fail altogether.
Examples
--------
Generate some data to fit: draw random variates from the `beta`
distribution
>>> from scipy.stats import beta
>>> a, b = 1., 2.
>>> x = beta.rvs(a, b, size=1000)
Now we can fit all four parameters (``a``, ``b``, ``loc`` and ``scale``):
>>> a1, b1, loc1, scale1 = beta.fit(x)
We can also use some prior knowledge about the dataset: let's keep
``loc`` and ``scale`` fixed:
>>> a1, b1, loc1, scale1 = beta.fit(x, floc=0, fscale=1)
>>> loc1, scale1
(0, 1)
We can also keep shape parameters fixed by using ``f``-keywords. To
keep the zero-th shape parameter ``a`` equal 1, use ``f0=1`` or,
equivalently, ``fa=1``:
>>> a1, b1, loc1, scale1 = beta.fit(x, fa=1, floc=0, fscale=1)
>>> a1
1
Not all distributions return estimates for the shape parameters.
``norm`` for example just returns estimates for location and scale:
>>> from scipy.stats import norm
>>> x = norm.rvs(a, b, size=1000, random_state=123)
>>> loc1, scale1 = norm.fit(x)
>>> loc1, scale1
(0.92087172783841631, 2.0015750750324668)
"""
Narg = len(args)
if Narg > self.numargs:
raise TypeError("Too many input arguments.")
start = [None]*2
if (Narg < self.numargs) or not ('loc' in kwds and
'scale' in kwds):
# get distribution specific starting locations
start = self._fitstart(data)
args += start[Narg:-2]
loc = kwds.pop('loc', start[-2])
scale = kwds.pop('scale', start[-1])
args += (loc, scale)
x0, func, restore, args = self._reduce_func(args, kwds)
optimizer = kwds.pop('optimizer', optimize.fmin)
# convert string to function in scipy.optimize
if not callable(optimizer) and isinstance(optimizer, string_types):
if not optimizer.startswith('fmin_'):
optimizer = "fmin_"+optimizer
if optimizer == 'fmin_':
optimizer = 'fmin'
try:
optimizer = getattr(optimize, optimizer)
except AttributeError:
raise ValueError("%s is not a valid optimizer" % optimizer)
# by now kwds must be empty, since everybody took what they needed
if kwds:
raise TypeError("Unknown arguments: %s." % kwds)
vals = optimizer(func, x0, args=(ravel(data),), disp=0)
if restore is not None:
vals = restore(args, vals)
vals = tuple(vals)
return vals
def _fit_loc_scale_support(self, data, *args):
"""
Estimate loc and scale parameters from data accounting for support.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
data = np.asarray(data)
# Estimate location and scale according to the method of moments.
loc_hat, scale_hat = self.fit_loc_scale(data, *args)
# Compute the support according to the shape parameters.
self._argcheck(*args)
a, b = self.a, self.b
support_width = b - a
# If the support is empty then return the moment-based estimates.
if support_width <= 0:
return loc_hat, scale_hat
# Compute the proposed support according to the loc and scale estimates.
a_hat = loc_hat + a * scale_hat
b_hat = loc_hat + b * scale_hat
# Use the moment-based estimates if they are compatible with the data.
data_a = np.min(data)
data_b = np.max(data)
if a_hat < data_a and data_b < b_hat:
return loc_hat, scale_hat
# Otherwise find other estimates that are compatible with the data.
data_width = data_b - data_a
rel_margin = 0.1
margin = data_width * rel_margin
# For a finite interval, both the location and scale
# should have interesting values.
if support_width < np.inf:
loc_hat = (data_a - a) - margin
scale_hat = (data_width + 2 * margin) / support_width
return loc_hat, scale_hat
# For a one-sided interval, use only an interesting location parameter.
if a > -np.inf:
return (data_a - a) - margin, 1
elif b < np.inf:
return (data_b - b) + margin, 1
else:
raise RuntimeError
def fit_loc_scale(self, data, *args):
"""
Estimate loc and scale parameters from data using 1st and 2nd moments.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
mu, mu2 = self.stats(*args, **{'moments': 'mv'})
tmp = asarray(data)
muhat = tmp.mean()
mu2hat = tmp.var()
Shat = sqrt(mu2hat / mu2)
Lhat = muhat - Shat*mu
if not np.isfinite(Lhat):
Lhat = 0
if not (np.isfinite(Shat) and (0 < Shat)):
Shat = 1
return Lhat, Shat
def _entropy(self, *args):
def integ(x):
val = self._pdf(x, *args)
return entr(val)
# upper limit is often inf, so suppress warnings when integrating
olderr = np.seterr(over='ignore')
h = integrate.quad(integ, self.a, self.b)[0]
np.seterr(**olderr)
if not np.isnan(h):
return h
else:
# try with different limits if integration problems
low, upp = self.ppf([1e-10, 1. - 1e-10], *args)
if np.isinf(self.b):
upper = upp
else:
upper = self.b
if np.isinf(self.a):
lower = low
else:
lower = self.a
return integrate.quad(integ, lower, upper)[0]
def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None,
conditional=False, **kwds):
"""Calculate expected value of a function with respect to the
distribution.
The expected value of a function ``f(x)`` with respect to a
distribution ``dist`` is defined as::
ubound
E[x] = Integral(f(x) * dist.pdf(x))
lbound
Parameters
----------
func : callable, optional
Function for which integral is calculated. Takes only one argument.
The default is the identity mapping f(x) = x.
args : tuple, optional
Shape parameters of the distribution.
loc : float, optional
Location parameter (default=0).
scale : float, optional
Scale parameter (default=1).
lb, ub : scalar, optional
Lower and upper bound for integration. Default is set to the
support of the distribution.
conditional : bool, optional
If True, the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Default is False.
Additional keyword arguments are passed to the integration routine.
Returns
-------
expect : float
The calculated expected value.
Notes
-----
The integration behavior of this function is inherited from
`integrate.quad`.
"""
lockwds = {'loc': loc,
'scale': scale}
self._argcheck(*args)
if func is None:
def fun(x, *args):
return x * self.pdf(x, *args, **lockwds)
else:
def fun(x, *args):
return func(x) * self.pdf(x, *args, **lockwds)
if lb is None:
lb = loc + self.a * scale
if ub is None:
ub = loc + self.b * scale
if conditional:
invfac = (self.sf(lb, *args, **lockwds)
- self.sf(ub, *args, **lockwds))
else:
invfac = 1.0
kwds['args'] = args
# Silence floating point warnings from integration.
olderr = np.seterr(all='ignore')
vals = integrate.quad(fun, lb, ub, **kwds)[0] / invfac
np.seterr(**olderr)
return vals
# Helpers for the discrete distributions
def _drv2_moment(self, n, *args):
"""Non-central moment of discrete distribution."""
def fun(x):
return np.power(x, n) * self._pmf(x, *args)
return _expect(fun, self.a, self.b, self.ppf(0.5, *args), self.inc)
def _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm
b = self.b
a = self.a
if isinf(b): # Be sure ending point is > q
b = int(max(100*q, 10))
while 1:
if b >= self.b:
qb = 1.0
break
qb = self._cdf(b, *args)
if (qb < q):
b += 10
else:
break
else:
qb = 1.0
if isinf(a): # be sure starting point < q
a = int(min(-100*q, -10))
while 1:
if a <= self.a:
qb = 0.0
break
qa = self._cdf(a, *args)
if (qa > q):
a -= 10
else:
break
else:
qa = self._cdf(a, *args)
while 1:
if (qa == q):
return a
if (qb == q):
return b
if b <= a+1:
# testcase: return wrong number at lower index
# python -c "from scipy.stats import zipf;print zipf.ppf(0.01, 2)" wrong
# python -c "from scipy.stats import zipf;print zipf.ppf([0.01, 0.61, 0.77, 0.83], 2)"
# python -c "from scipy.stats import logser;print logser.ppf([0.1, 0.66, 0.86, 0.93], 0.6)"
if qa > q:
return a
else:
return b
c = int((a+b)/2.0)
qc = self._cdf(c, *args)
if (qc < q):
if a != c:
a = c
else:
raise RuntimeError('updating stopped, endless loop')
qa = qc
elif (qc > q):
if b != c:
b = c
else:
raise RuntimeError('updating stopped, endless loop')
qb = qc
else:
return c
def entropy(pk, qk=None, base=None):
"""Calculate the entropy of a distribution for given probability values.
If only probabilities `pk` are given, the entropy is calculated as
``S = -sum(pk * log(pk), axis=0)``.
If `qk` is not None, then compute the Kullback-Leibler divergence
``S = sum(pk * log(pk / qk), axis=0)``.
This routine will normalize `pk` and `qk` if they don't sum to 1.
Parameters
----------
pk : sequence
Defines the (discrete) distribution. ``pk[i]`` is the (possibly
unnormalized) probability of event ``i``.
qk : sequence, optional
Sequence against which the relative entropy is computed. Should be in
the same format as `pk`.
base : float, optional
The logarithmic base to use, defaults to ``e`` (natural logarithm).
Returns
-------
S : float
The calculated entropy.
"""
pk = asarray(pk)
pk = 1.0*pk / np.sum(pk, axis=0)
if qk is None:
vec = entr(pk)
else:
qk = asarray(qk)
if len(qk) != len(pk):
raise ValueError("qk and pk must have same length.")
qk = 1.0*qk / np.sum(qk, axis=0)
vec = rel_entr(pk, qk)
S = np.sum(vec, axis=0)
if base is not None:
S /= log(base)
return S
# Must over-ride one of _pmf or _cdf or pass in
# x_k, p(x_k) lists in initialization
class rv_discrete(rv_generic):
"""
A generic discrete random variable class meant for subclassing.
`rv_discrete` is a base class to construct specific distribution classes
and instances for discrete random variables. It can also be used
to construct an arbitrary distribution defined by a list of support
points and corresponding probabilities.
Parameters
----------
a : float, optional
Lower bound of the support of the distribution, default: 0
b : float, optional
Upper bound of the support of the distribution, default: plus infinity
moment_tol : float, optional
The tolerance for the generic calculation of moments.
values : tuple of two array_like, optional
``(xk, pk)`` where ``xk`` are integers with non-zero
probabilities ``pk`` with ``sum(pk) = 1``.
inc : integer, optional
Increment for the support of the distribution.
Default is 1. (other values have not been tested)
badvalue : float, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example "m, n" for a distribution
that takes two integers as the two shape arguments for all its methods
If not provided, shape parameters will be inferred from
the signatures of the private methods, ``_pmf`` and ``_cdf`` of
the instance.
extradoc : str, optional
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
seed : None or int or ``numpy.random.RandomState`` instance, optional
This parameter defines the RandomState object to use for drawing
random variates.
If None, the global np.random state is used.
If integer, it is used to seed the local RandomState instance.
Default is None.
Methods
-------
rvs
pmf
logpmf
cdf
logcdf
sf
logsf
ppf
isf
moment
stats
entropy
expect
median
mean
std
var
interval
__call__
Notes
-----
This class is similar to `rv_continuous`. Whether a shape parameter is
valid is decided by an ``_argcheck`` method (which defaults to checking
that its arguments are strictly positive.)
The main differences are:
- the support of the distribution is a set of integers
- instead of the probability density function, ``pdf`` (and the
corresponding private ``_pdf``), this class defines the
*probability mass function*, `pmf` (and the corresponding
private ``_pmf``.)
- scale parameter is not defined.
To create a new discrete distribution, we would do the following:
>>> from scipy.stats import rv_discrete
>>> class poisson_gen(rv_discrete):
... "Poisson distribution"
... def _pmf(self, k, mu):
... return exp(-mu) * mu**k / factorial(k)
and create an instance::
>>> poisson = poisson_gen(name="poisson")
Note that above we defined the Poisson distribution in the standard form.
Shifting the distribution can be done by providing the ``loc`` parameter
to the methods of the instance. For example, ``poisson.pmf(x, mu, loc)``
delegates the work to ``poisson._pmf(x-loc, mu)``.
**Discrete distributions from a list of probabilities**
Alternatively, you can construct an arbitrary discrete rv defined
on a finite set of values ``xk`` with ``Prob{X=xk} = pk`` by using the
``values`` keyword argument to the `rv_discrete` constructor.
Examples
--------
Custom made discrete distribution:
>>> from scipy import stats
>>> xk = np.arange(7)
>>> pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.0, 0.2)
>>> custm = stats.rv_discrete(name='custm', values=(xk, pk))
>>>
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots(1, 1)
>>> ax.plot(xk, custm.pmf(xk), 'ro', ms=12, mec='r')
>>> ax.vlines(xk, 0, custm.pmf(xk), colors='r', lw=4)
>>> plt.show()
Random number generation:
>>> R = custm.rvs(size=100)
"""
def __new__(cls, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
if values is not None:
# dispatch to a subclass
return super(rv_discrete, cls).__new__(rv_sample)
else:
# business as usual
return super(rv_discrete, cls).__new__(cls)
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
super(rv_discrete, self).__init__(seed)
# cf generic freeze
self._ctor_param = dict(
a=a, b=b, name=name, badvalue=badvalue,
moment_tol=moment_tol, values=values, inc=inc,
longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
self.badvalue = badvalue
self.a = a
self.b = b
self.moment_tol = moment_tol
self.inc = inc
self._cdfvec = vectorize(self._cdf_single, otypes='d')
self.vecentropy = vectorize(self._entropy)
self.shapes = shapes
if values is not None:
raise ValueError("rv_discrete.__init__(..., values != None, ...)")
self._construct_argparser(meths_to_inspect=[self._pmf, self._cdf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
# nin correction needs to be after we know numargs
# correct nin for generic moment vectorization
_vec_generic_moment = vectorize(_drv2_moment, otypes='d')
_vec_generic_moment.nin = self.numargs + 2
self.generic_moment = instancemethod(_vec_generic_moment,
self, rv_discrete)
# correct nin for ppf vectorization
_vppf = vectorize(_drv2_ppfsingle, otypes='d')
_vppf.nin = self.numargs + 2
self._ppfvec = instancemethod(_vppf,
self, rv_discrete)
# now that self.numargs is defined, we can adjust nin
self._cdfvec.nin = self.numargs + 1
self._construct_docstrings(name, longname, extradoc)
def _construct_docstrings(self, name, longname, extradoc):
if name is None:
name = 'Distribution'
self.name = name
self.extradoc = extradoc
# generate docstring for subclass instances
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if sys.flags.optimize < 2:
# Skip adding docstrings if interpreter is run with -OO
if self.__doc__ is None:
self._construct_default_doc(longname=longname,
extradoc=extradoc,
docdict=docdict_discrete,
discrete='discrete')
else:
dct = dict(distdiscrete)
self._construct_doc(docdict_discrete, dct.get(self.name))
# discrete RV do not have the scale parameter, remove it
self.__doc__ = self.__doc__.replace(
'\n scale : array_like, '
'optional\n scale parameter (default=1)', '')
@property
@np.deprecate(message="`return_integers` attribute is not used anywhere any "
" longer and is deprecated in scipy 0.18.")
def return_integers(self):
return 1
def _updated_ctor_param(self):
""" Return the current version of _ctor_param, possibly updated by user.
Used by freezing and pickling.
Keep this in sync with the signature of __init__.
"""
dct = self._ctor_param.copy()
dct['a'] = self.a
dct['b'] = self.b
dct['badvalue'] = self.badvalue
dct['moment_tol'] = self.moment_tol
dct['inc'] = self.inc
dct['name'] = self.name
dct['shapes'] = self.shapes
dct['extradoc'] = self.extradoc
return dct
def _nonzero(self, k, *args):
return floor(k) == k
def _pmf(self, k, *args):
return self._cdf(k, *args) - self._cdf(k-1, *args)
def _logpmf(self, k, *args):
return log(self._pmf(k, *args))
def _cdf_single(self, k, *args):
m = arange(int(self.a), k+1)
return np.sum(self._pmf(m, *args), axis=0)
def _cdf(self, x, *args):
k = floor(x)
return self._cdfvec(k, *args)
# generic _logcdf, _sf, _logsf, _ppf, _isf, _rvs defined in rv_generic
def rvs(self, *args, **kwargs):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
size : int or tuple of ints, optional
Defining number of random variates (Default is 1). Note that `size`
has to be given as keyword, not as positional argument.
random_state : None or int or ``np.random.RandomState`` instance, optional
If int or RandomState, use it for drawing the random variates.
If None, rely on ``self.random_state``.
Default is None.
Returns
-------
rvs : ndarray or scalar
Random variates of given `size`.
"""
kwargs['discrete'] = True
return super(rv_discrete, self).rvs(*args, **kwargs)
def pmf(self, k, *args, **kwds):
"""
Probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter (default=0).
Returns
-------
pmf : array_like
Probability mass function evaluated at k
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._pmf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logpmf(self, k, *args, **kwds):
"""
Log of the probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter. Default is 0.
Returns
-------
logpmf : array_like
Log of the probability mass function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k, *args)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logpmf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def cdf(self, k, *args, **kwds):
"""
Cumulative distribution function of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
cdf : ndarray
Cumulative distribution function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k >= self.b)
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2*(cond0 == cond0), 1.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._cdf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, k, *args, **kwds):
"""
Log of the cumulative distribution function at k of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k >= self.b)
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2*(cond0 == cond0), 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self, k, *args, **kwds):
"""
Survival function (1 - `cdf`) at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
sf : array_like
Survival function evaluated at k.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k < self.a) & cond0
cond = cond0 & cond1
output = zeros(shape(cond), 'd')
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 1.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, np.clip(self._sf(*goodargs), 0, 1))
if output.ndim == 0:
return output[()]
return output
def logsf(self, k, *args, **kwds):
"""
Log of the survival function of the given RV.
Returns the log of the "survival function," defined as 1 - `cdf`,
evaluated at `k`.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `k`.
"""
args, loc, _ = self._parse_args(*args, **kwds)
k, loc = map(asarray, (k, loc))
args = tuple(map(asarray, args))
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k < self.a) & cond0
cond = cond0 & cond1
output = empty(shape(cond), 'd')
output.fill(NINF)
place(output, (1-cond0) + np.isnan(k), self.badvalue)
place(output, cond2, 0.0)
if np.any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output, cond, self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self, q, *args, **kwds):
"""
Percent point function (inverse of `cdf`) at q of the given RV.
Parameters
----------
q : array_like
Lower tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : array_like
Quantile corresponding to the lower tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond = cond0 & cond1
output = valarray(shape(cond), value=self.badvalue, typecode='d')
# output type 'd' to handle nin and inf
place(output, (q == 0)*(cond == cond), self.a-1)
place(output, cond2, self.b)
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
place(output, cond, self._ppf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self, q, *args, **kwds):
"""
Inverse survival function (inverse of `sf`) at q of the given RV.
Parameters
----------
q : array_like
Upper tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : ndarray or scalar
Quantile corresponding to the upper tail probability, q.
"""
args, loc, _ = self._parse_args(*args, **kwds)
q, loc = map(asarray, (q, loc))
args = tuple(map(asarray, args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q == 1) & cond0
cond = cond0 & cond1
# same problem as with ppf; copied from ppf and changed
output = valarray(shape(cond), value=self.badvalue, typecode='d')
# output type 'd' to handle nin and inf
place(output, (q == 0)*(cond == cond), self.b)
place(output, cond2, self.a-1)
# call place only if at least 1 valid argument
if np.any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
# PB same as ticket 766
place(output, cond, self._isf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def _entropy(self, *args):
if hasattr(self, 'pk'):
return entropy(self.pk)
else:
return _expect(lambda x: entr(self.pmf(x, *args)),
self.a, self.b, self.ppf(0.5, *args), self.inc)
def expect(self, func=None, args=(), loc=0, lb=None, ub=None,
conditional=False, maxcount=1000, tolerance=1e-10, chunksize=32):
"""
Calculate expected value of a function with respect to the distribution
for discrete distribution.
Parameters
----------
func : callable, optional
Function for which the expectation value is calculated.
Takes only one argument.
The default is the identity mapping f(k) = k.
args : tuple, optional
Shape parameters of the distribution.
loc : float, optional
Location parameter.
Default is 0.
lb, ub : int, optional
Lower and upper bound for the summation, default is set to the
support of the distribution, inclusive (``ul <= k <= ub``).
conditional : bool, optional
If true then the expectation is corrected by the conditional
probability of the summation interval. The return value is the
expectation of the function, `func`, conditional on being in
the given interval (k such that ``ul <= k <= ub``).
Default is False.
maxcount : int, optional
Maximal number of terms to evaluate (to avoid an endless loop for
an infinite sum). Default is 1000.
tolerance : float, optional
Absolute tolerance for the summation. Default is 1e-10.
chunksize : int, optional
Iterate over the support of a distributions in chunks of this size.
Default is 32.
Returns
-------
expect : float
Expected value.
Notes
-----
For heavy-tailed distributions, the expected value may or may not exist,
depending on the function, `func`. If it does exist, but the sum converges
slowly, the accuracy of the result may be rather low. For instance, for
``zipf(4)``, accuracy for mean, variance in example is only 1e-5.
increasing `maxcount` and/or `chunksize` may improve the result, but may also
make zipf very slow.
The function is not vectorized.
"""
if func is None:
def fun(x):
# loc and args from outer scope
return (x+loc)*self._pmf(x, *args)
else:
def fun(x):
# loc and args from outer scope
return func(x+loc)*self._pmf(x, *args)
# used pmf because _pmf does not check support in randint and there
# might be problems(?) with correct self.a, self.b at this stage maybe
# not anymore, seems to work now with _pmf
self._argcheck(*args) # (re)generate scalar self.a and self.b
if lb is None:
lb = self.a
else:
lb = lb - loc # convert bound for standardized distribution
if ub is None:
ub = self.b
else:
ub = ub - loc # convert bound for standardized distribution
if conditional:
invfac = self.sf(lb-1, *args) - self.sf(ub, *args)
else:
invfac = 1.0
# iterate over the support, starting from the median
x0 = self.ppf(0.5, *args)
res = _expect(fun, lb, ub, x0, self.inc, maxcount, tolerance, chunksize)
return res / invfac
def _expect(fun, lb, ub, x0, inc, maxcount=1000, tolerance=1e-10,
chunksize=32):
"""Helper for computing the expectation value of `fun`."""
# short-circuit if the support size is small enough
if (ub - lb) <= chunksize:
supp = np.arange(lb, ub+1, inc)
vals = fun(supp)
return np.sum(vals)
# otherwise, iterate starting from x0
if x0 < lb:
x0 = lb
if x0 > ub:
x0 = ub
count, tot = 0, 0.
# iterate over [x0, ub] inclusive
for x in _iter_chunked(x0, ub+1, chunksize=chunksize, inc=inc):
count += x.size
delta = np.sum(fun(x))
tot += delta
if abs(delta) < tolerance * x.size:
break
if count > maxcount:
warnings.warn('expect(): sum did not converge', RuntimeWarning)
return tot
# iterate over [lb, x0)
for x in _iter_chunked(x0-1, lb-1, chunksize=chunksize, inc=-inc):
count += x.size
delta = np.sum(fun(x))
tot += delta
if abs(delta) < tolerance * x.size:
break
if count > maxcount:
warnings.warn('expect(): sum did not converge', RuntimeWarning)
break
return tot
def _iter_chunked(x0, x1, chunksize=4, inc=1):
"""Iterate from x0 to x1 in chunks of chunksize and steps inc.
x0 must be finite, x1 need not be. In the latter case, the iterator is infinite.
Handles both x0 < x1 and x0 > x1. In the latter case, iterates downwards
(make sure to set inc < 0.)
>>> [x for x in _iter_chunked(2, 5, inc=2)]
[array([2, 4])]
>>> [x for x in _iter_chunked(2, 11, inc=2)]
[array([2, 4, 6, 8]), array([10])]
>>> [x for x in _iter_chunked(2, -5, inc=-2)]
[array([ 2, 0, -2, -4])]
>>> [x for x in _iter_chunked(2, -9, inc=-2)]
[array([ 2, 0, -2, -4]), array([-6, -8])]
"""
if inc == 0:
raise ValueError('Cannot increment by zero.')
if chunksize <= 0:
raise ValueError('Chunk size must be positive; got %s.' % chunksize)
s = 1 if inc > 0 else -1
stepsize = abs(chunksize * inc)
x = x0
while (x - x1) * inc < 0:
delta = min(stepsize, abs(x - x1))
step = delta * s
supp = np.arange(x, x + step, inc)
x += step
yield supp
class rv_sample(rv_discrete):
"""A 'sample' discrete distribution defined by the support and values.
The ctor ignores most of the arguments, only needs the `values` argument.
"""
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8, values=None, inc=1, longname=None,
shapes=None, extradoc=None, seed=None):
super(rv_discrete, self).__init__(seed)
if values is None:
raise ValueError("rv_sample.__init__(..., values=None,...)")
# cf generic freeze
self._ctor_param = dict(
a=a, b=b, name=name, badvalue=badvalue,
moment_tol=moment_tol, values=values, inc=inc,
longname=longname, shapes=shapes, extradoc=extradoc, seed=seed)
if badvalue is None:
badvalue = nan
self.badvalue = badvalue
self.moment_tol = moment_tol
self.inc = inc
self.shapes = shapes
self.vecentropy = self._entropy
xk, pk = values
if len(xk) != len(pk):
raise ValueError("xk and pk need to have the same length.")
if not np.allclose(np.sum(pk), 1):
raise ValueError("The sum of provided pk is not 1.")
indx = np.argsort(np.ravel(xk))
self.xk = np.take(np.ravel(xk), indx, 0)
self.pk = np.take(np.ravel(pk), indx, 0)
self.a = self.xk[0]
self.b = self.xk[-1]
self.qvals = np.cumsum(self.pk, axis=0)
self.shapes = ' ' # bypass inspection
self._construct_argparser(meths_to_inspect=[self._pmf],
locscale_in='loc=0',
# scale=1 for discrete RVs
locscale_out='loc, 1')
self._construct_docstrings(name, longname, extradoc)
@property
@np.deprecate(message="`return_integers` attribute is not used anywhere any"
" longer and is deprecated in scipy 0.18.")
def return_integers(self):
return 0
def _pmf(self, x):
return np.select([x == k for k in self.xk],
[np.broadcast_arrays(p, x)[0] for p in self.pk], 0)
def _cdf(self, x):
xx, xxk = np.broadcast_arrays(x[:, None], self.xk)
indx = np.argmax(xxk > xx, axis=-1) - 1
return self.qvals[indx]
def _ppf(self, q):
qq, sqq = np.broadcast_arrays(q[..., None], self.qvals)
indx = argmax(sqq >= qq, axis=-1)
return self.xk[indx]
def _rvs(self):
# Need to define it explicitly, otherwise .rvs() with size=None
# fails due to explicit broadcasting in _ppf
U = self._random_state.random_sample(self._size)
if self._size is None:
U = np.array(U, ndmin=1)
Y = self._ppf(U)[0]
else:
Y = self._ppf(U)
return Y
def _entropy(self):
return entropy(self.pk)
def generic_moment(self, n):
n = asarray(n)
return np.sum(self.xk**n[np.newaxis, ...] * self.pk, axis=0)
@np.deprecate(message="moment_gen method is not used anywhere any more "
"and is deprecated in scipy 0.18.")
def moment_gen(self, t):
t = asarray(t)
return np.sum(exp(self.xk * t[np.newaxis, ...]) * self.pk, axis=0)
@property
@np.deprecate(message="F attribute is not used anywhere any longer and "
"is deprecated in scipy 0.18.")
def F(self):
return dict(zip(self.xk, self.qvals))
@property
@np.deprecate(message="Finv attribute is not used anywhere any longer and "
"is deprecated in scipy 0.18.")
def Finv(self):
decreasing_keys = sorted(self.F.keys(), reverse=True)
return dict((self.F[k], k) for k in decreasing_keys)
def get_distribution_names(namespace_pairs, rv_base_class):
"""
Collect names of statistical distributions and their generators.
Parameters
----------
namespace_pairs : sequence
A snapshot of (name, value) pairs in the namespace of a module.
rv_base_class : class
The base class of random variable generator classes in a module.
Returns
-------
distn_names : list of strings
Names of the statistical distributions.
distn_gen_names : list of strings
Names of the generators of the statistical distributions.
Note that these are not simply the names of the statistical
distributions, with a _gen suffix added.
"""
distn_names = []
distn_gen_names = []
for name, value in namespace_pairs:
if name.startswith('_'):
continue
if name.endswith('_gen') and issubclass(value, rv_base_class):
distn_gen_names.append(name)
if isinstance(value, rv_base_class):
distn_names.append(name)
return distn_names, distn_gen_names
|
bsd-3-clause
|
jakobworldpeace/scikit-learn
|
sklearn/metrics/classification.py
|
2
|
72558
|
"""Metrics to assess performance on classification task given class prediction
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# Jatin Shah <jatindshah@gmail.com>
# Saurabh Jha <saurabh.jhaa@gmail.com>
# Bernardo Stein <bernardovstein@gmail.com>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from ..preprocessing import LabelBinarizer, label_binarize
from ..preprocessing import LabelEncoder
from ..utils import assert_all_finite
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils.validation import _num_samples
from ..utils.sparsefuncs import count_nonzero
from ..utils.fixes import bincount
from ..exceptions import UndefinedMetricWarning
def _check_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same classification task
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d, while multilabel formats are returned
as CSR sparse label indicators.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array or indicator matrix
y_pred : array or indicator matrix
"""
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true)
type_pred = type_of_target(y_pred)
y_type = set([type_true, type_pred])
if y_type == set(["binary", "multiclass"]):
y_type = set(["multiclass"])
if len(y_type) > 1:
raise ValueError("Can't handle mix of {0} and {1}"
"".format(type_true, type_pred))
# We can't have more than one value on y_type => The set is no more needed
y_type = y_type.pop()
# No metrics support "multiclass-multioutput" format
if (y_type not in ["binary", "multiclass", "multilabel-indicator"]):
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_type.startswith('multilabel'):
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = 'multilabel-indicator'
return y_type, y_true, y_pred
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
"""Accuracy classification score.
In multilabel classification, this function computes subset accuracy:
the set of labels predicted for a sample must *exactly* match the
corresponding set of labels in y_true.
Read more in the :ref:`User Guide <accuracy_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the correctly classified samples
(float), else it returns the number of correctly classified samples
(int).
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
jaccard_similarity_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equal
to the ``jaccard_similarity_score`` function.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import accuracy_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> accuracy_score(y_true, y_pred)
0.5
>>> accuracy_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
differing_labels = count_nonzero(y_true - y_pred, axis=1)
score = differing_labels == 0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def confusion_matrix(y_true, y_pred, labels=None, sample_weight=None):
"""Compute confusion matrix to evaluate the accuracy of a classification
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` but
predicted to be in group :math:`j`.
Thus in binary classification, the count of true negatives is
:math:`C_{0,0}`, false negatives is :math:`C_{1,0}`, true positives is
:math:`C_{1,1}` and false positives is :math:`C_{0,1}`.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If none is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
C : array, shape = [n_classes, n_classes]
Confusion matrix
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<https://en.wikipedia.org/wiki/Confusion_matrix>`_
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
In the binary case, we can extract true positives, etc as follows:
>>> tn, fp, fn, tp = confusion_matrix([0, 1, 0, 1], [1, 1, 1, 0]).ravel()
>>> (tn, fp, fn, tp)
(0, 2, 1, 1)
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
if np.all([l not in y_true for l in labels]):
raise ValueError("At least one label specified must be in y_true")
if sample_weight is None:
sample_weight = np.ones(y_true.shape[0], dtype=np.int)
else:
sample_weight = np.asarray(sample_weight)
check_consistent_length(sample_weight, y_true, y_pred)
n_labels = labels.size
label_to_ind = dict((y, x) for x, y in enumerate(labels))
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
# also eliminate weights of eliminated items
sample_weight = sample_weight[ind]
CM = coo_matrix((sample_weight, (y_true, y_pred)),
shape=(n_labels, n_labels)
).toarray()
return CM
def cohen_kappa_score(y1, y2, labels=None, weights=None, sample_weight=None):
"""Cohen's kappa: a statistic that measures inter-annotator agreement.
This function computes Cohen's kappa [1]_, a score that expresses the level
of agreement between two annotators on a classification problem. It is
defined as
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement on the label
assigned to any sample (the observed agreement ratio), and :math:`p_e` is
the expected agreement when both annotators assign labels randomly.
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels [2]_.
Read more in the :ref:`User Guide <cohen_kappa>`.
Parameters
----------
y1 : array, shape = [n_samples]
Labels assigned by the first annotator.
y2 : array, shape = [n_samples]
Labels assigned by the second annotator. The kappa statistic is
symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to select a
subset of labels. If None, all labels that appear at least once in
``y1`` or ``y2`` are used.
weights : str, optional
List of weighting type to calculate the score. None means no weighted;
"linear" means linear weighted; "quadratic" means quadratic weighted.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
kappa : float
The kappa statistic, which is a number between -1 and 1. The maximum
value means complete agreement; zero or lower means chance agreement.
References
----------
.. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales".
Educational and Psychological Measurement 20(1):37-46.
doi:10.1177/001316446002000104.
.. [2] `R. Artstein and M. Poesio (2008). "Inter-coder agreement for
computational linguistics". Computational Linguistics 34(4):555-596.
<http://www.mitpressjournals.org/doi/abs/10.1162/coli.07-034-R2#.V0J1MJMrIWo>`_
.. [3] `Wikipedia entry for the Cohen's kappa.
<https://en.wikipedia.org/wiki/Cohen%27s_kappa>`_
"""
confusion = confusion_matrix(y1, y2, labels=labels,
sample_weight=sample_weight)
n_classes = confusion.shape[0]
sum0 = np.sum(confusion, axis=0)
sum1 = np.sum(confusion, axis=1)
expected = np.outer(sum0, sum1) / np.sum(sum0)
if weights is None:
w_mat = np.ones([n_classes, n_classes], dtype=np.int)
w_mat.flat[:: n_classes + 1] = 0
elif weights == "linear" or weights == "quadratic":
w_mat = np.zeros([n_classes, n_classes], dtype=np.int)
w_mat += np.arange(n_classes)
if weights == "linear":
w_mat = np.abs(w_mat - w_mat.T)
else:
w_mat = (w_mat - w_mat.T) ** 2
else:
raise ValueError("Unknown kappa weighting type.")
k = np.sum(w_mat * confusion) / np.sum(w_mat * expected)
return 1 - k
def jaccard_similarity_score(y_true, y_pred, normalize=True,
sample_weight=None):
"""Jaccard similarity coefficient score
The Jaccard index [1], or Jaccard similarity coefficient, defined as
the size of the intersection divided by the size of the union of two label
sets, is used to compare set of predicted labels for a sample to the
corresponding set of labels in ``y_true``.
Read more in the :ref:`User Guide <jaccard_similarity_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the sum of the Jaccard similarity coefficient
over the sample set. Otherwise, return the average of Jaccard
similarity coefficient.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the average Jaccard similarity
coefficient, else it returns the sum of the Jaccard similarity
coefficient over the sample set.
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equivalent
to the ``accuracy_score``. It differs in the multilabel classification
problem.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<https://en.wikipedia.org/wiki/Jaccard_index>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import jaccard_similarity_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> jaccard_similarity_score(y_true, y_pred)
0.5
>>> jaccard_similarity_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]),\
np.ones((2, 2)))
0.75
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
with np.errstate(divide='ignore', invalid='ignore'):
# oddly, we may get an "invalid" rather than a "divide" error here
pred_or_true = count_nonzero(y_true + y_pred, axis=1)
pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1)
score = pred_and_true / pred_or_true
score[pred_or_true == 0.0] = 1.0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def matthews_corrcoef(y_true, y_pred, sample_weight=None):
"""Compute the Matthews correlation coefficient (MCC) for binary classes
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary (two-class) classifications. It takes into
account true and false positives and negatives and is generally regarded as
a balanced measure which can be used even if the classes are of very
different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
Only in the binary case does this relate to information about true and
false positives and negatives. See references below.
Read more in the :ref:`User Guide <matthews_corrcoef>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
sample_weight : array-like of shape = [n_samples], default None
Sample weights.
Returns
-------
mcc : float
The Matthews correlation coefficient (+1 represents a perfect
prediction, 0 an average random prediction and -1 and inverse
prediction).
References
----------
.. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the
accuracy of prediction algorithms for classification: an overview
<http://dx.doi.org/10.1093/bioinformatics/16.5.412>`_
.. [2] `Wikipedia entry for the Matthews Correlation Coefficient
<https://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_
Examples
--------
>>> from sklearn.metrics import matthews_corrcoef
>>> y_true = [+1, +1, +1, -1]
>>> y_pred = [+1, -1, +1, +1]
>>> matthews_corrcoef(y_true, y_pred) # doctest: +ELLIPSIS
-0.33...
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type != "binary":
raise ValueError("%s is not supported" % y_type)
lb = LabelEncoder()
lb.fit(np.hstack([y_true, y_pred]))
y_true = lb.transform(y_true)
y_pred = lb.transform(y_pred)
mean_yt = np.average(y_true, weights=sample_weight)
mean_yp = np.average(y_pred, weights=sample_weight)
y_true_u_cent = y_true - mean_yt
y_pred_u_cent = y_pred - mean_yp
cov_ytyp = np.average(y_true_u_cent * y_pred_u_cent, weights=sample_weight)
var_yt = np.average(y_true_u_cent ** 2, weights=sample_weight)
var_yp = np.average(y_pred_u_cent ** 2, weights=sample_weight)
mcc = cov_ytyp / np.sqrt(var_yt * var_yp)
if np.isnan(mcc):
return 0.
else:
return mcc
def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None):
"""Zero-one classification loss.
If normalize is ``True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int). The best
performance is 0.
Read more in the :ref:`User Guide <zero_one_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of misclassifications.
Otherwise, return the fraction of misclassifications.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
If ``normalize == True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int).
Notes
-----
In multilabel classification, the zero_one_loss function corresponds to
the subset zero-one loss: for each sample, the entire set of labels must be
correctly predicted, otherwise the loss for that sample is equal to one.
See also
--------
accuracy_score, hamming_loss, jaccard_similarity_score
Examples
--------
>>> from sklearn.metrics import zero_one_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> zero_one_loss(y_true, y_pred)
0.25
>>> zero_one_loss(y_true, y_pred, normalize=False)
1
In the multilabel case with binary label indicators:
>>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
score = accuracy_score(y_true, y_pred,
normalize=normalize,
sample_weight=sample_weight)
if normalize:
return 1 - score
else:
if sample_weight is not None:
n_samples = np.sum(sample_weight)
else:
n_samples = _num_samples(y_true)
return n_samples - score
def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the weighted average of
the F1 score of each class.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
References
----------
.. [1] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> f1_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([ 0.8, 0. , 0. ])
"""
return fbeta_score(y_true, y_pred, 1, labels=labels,
pos_label=pos_label, average=average,
sample_weight=sample_weight)
def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the F-beta score
The F-beta score is the weighted harmonic mean of precision and recall,
reaching its optimal value at 1 and its worst value at 0.
The `beta` parameter determines the weight of precision in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors recall (``beta -> 0`` considers only precision, ``beta -> inf``
only recall).
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float
Weight of precision in harmonic mean.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
F-beta score of the positive class in binary classification or weighted
average of the F-beta score of each class for the multiclass task.
References
----------
.. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
Modern Information Retrieval. Addison Wesley, pp. 327-328.
.. [2] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import fbeta_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> fbeta_score(y_true, y_pred, average='macro', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average='micro', beta=0.5)
... # doctest: +ELLIPSIS
0.33...
>>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average=None, beta=0.5)
... # doctest: +ELLIPSIS
array([ 0.71..., 0. , 0. ])
"""
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
beta=beta,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('f-score',),
sample_weight=sample_weight)
return f
def _prf_divide(numerator, denominator, metric, modifier, average, warn_for):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements to zero
and raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
result = numerator / denominator
mask = denominator == 0.0
if not np.any(mask):
return result
# remove infs
result[mask] = 0.0
# build appropriate warning
# E.g. "Precision and F-score are ill-defined and being set to 0.0 in
# labels with no predicted samples"
axis0 = 'sample'
axis1 = 'label'
if average == 'samples':
axis0, axis1 = axis1, axis0
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
msg = ('{0} ill-defined and being set to 0.0 {{0}} '
'no {1} {2}s.'.format(msg_start, modifier, axis0))
if len(mask) == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
return result
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
pos_label=1, average=None,
warn_for=('precision', 'recall',
'f-score'),
sample_weight=None):
"""Compute precision, recall, F-measure and support for each class
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, 1.0 by default
The strength of recall versus precision in the F-score.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \
'weighted']
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
recall : float (if average is not None) or array of float, , shape =\
[n_unique_labels]
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
support : int (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<https://en.wikipedia.org/wiki/Precision_and_recall>`_
.. [2] `Wikipedia entry for the F1-score
<https://en.wikipedia.org/wiki/F1_score>`_
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`
Examples
--------
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
... # doctest: +ELLIPSIS
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
(array([ 0. , 0. , 0.66...]),
array([ 0., 0., 1.]),
array([ 0. , 0. , 0.8]),
array([2, 2, 2]))
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options and average != 'binary':
raise ValueError('average has to be one of ' +
str(average_options))
if beta <= 0:
raise ValueError("beta should be >0 in the F-beta score")
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
present_labels = unique_labels(y_true, y_pred)
if average == 'binary':
if y_type == 'binary':
if pos_label not in present_labels:
if len(present_labels) < 2:
# Only negative labels
return (0., 0., 0., 0)
else:
raise ValueError("pos_label=%r is not a valid label: %r" %
(pos_label, present_labels))
labels = [pos_label]
else:
raise ValueError("Target is %s but average='binary'. Please "
"choose another average setting." % y_type)
elif pos_label not in (None, 1):
warnings.warn("Note that pos_label (set to %r) is ignored when "
"average != 'binary' (got %r). You may use "
"labels=[pos_label] to specify a single positive class."
% (pos_label, average), UserWarning)
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels,
assume_unique=True)])
# Calculate tp_sum, pred_sum, true_sum ###
if y_type.startswith('multilabel'):
sum_axis = 1 if average == 'samples' else 0
# All labels are index integers for multilabel.
# Select labels:
if not np.all(labels == present_labels):
if np.max(labels) > np.max(present_labels):
raise ValueError('All labels must be in [0, n labels). '
'Got %d > %d' %
(np.max(labels), np.max(present_labels)))
if np.min(labels) < 0:
raise ValueError('All labels must be in [0, n labels). '
'Got %d < 0' % np.min(labels))
y_true = y_true[:, labels[:n_labels]]
y_pred = y_pred[:, labels[:n_labels]]
# calculate weighted counts
true_and_pred = y_true.multiply(y_pred)
tp_sum = count_nonzero(true_and_pred, axis=sum_axis,
sample_weight=sample_weight)
pred_sum = count_nonzero(y_pred, axis=sum_axis,
sample_weight=sample_weight)
true_sum = count_nonzero(y_true, axis=sum_axis,
sample_weight=sample_weight)
elif average == 'samples':
raise ValueError("Sample-based precision, recall, fscore is "
"not meaningful outside multilabel "
"classification. See the accuracy_score instead.")
else:
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = bincount(tp_bins, weights=tp_bins_weights,
minlength=len(labels))
else:
# Pathological case
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = bincount(y_pred, weights=sample_weight,
minlength=len(labels))
if len(y_true):
true_sum = bincount(y_true, weights=sample_weight,
minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
pred_sum = pred_sum[indices]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
# Finally, we have all our sufficient statistics. Divide! #
beta2 = beta ** 2
with np.errstate(divide='ignore', invalid='ignore'):
# Divide, and on zero-division, set scores to 0 and warn:
# Oddly, we may get an "invalid" rather than a "divide" error
# here.
precision = _prf_divide(tp_sum, pred_sum,
'precision', 'predicted', average, warn_for)
recall = _prf_divide(tp_sum, true_sum,
'recall', 'true', average, warn_for)
# Don't need to warn for F: either P or R warned, or tp == 0 where pos
# and true are nonzero, in which case, F is well-defined and zero
f_score = ((1 + beta2) * precision * recall /
(beta2 * precision + recall))
f_score[tp_sum == 0] = 0.0
# Average the results
if average == 'weighted':
weights = true_sum
if weights.sum() == 0:
return 0, 0, 0, None
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
assert average != 'binary' or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
def precision_score(y_true, y_pred, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the precision
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import precision_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> precision_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> precision_score(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average=None) # doctest: +ELLIPSIS
array([ 0.66..., 0. , 0. ])
"""
p, _, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('precision',),
sample_weight=sample_weight)
return p
def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the recall
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
.. versionchanged:: 0.17
parameter *labels* improved for multiclass problem.
pos_label : str or int, 1 by default
The class to report if ``average='binary'`` and the data is binary.
If the data are multiclass or multilabel, this will be ignored;
setting ``labels=[pos_label]`` and ``average != 'binary'`` will report
scores for that label only.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Recall of the positive class in binary classification or weighted
average of the recall of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import recall_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> recall_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average=None)
array([ 1., 0., 0.])
"""
_, r, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('recall',),
sample_weight=sample_weight)
return r
def classification_report(y_true, y_pred, labels=None, target_names=None,
sample_weight=None, digits=2):
"""Build a text report showing the main classification metrics
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array, shape = [n_labels]
Optional list of label indices to include in the report.
target_names : list of strings
Optional display names matching the labels (same order).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
digits : int
Number of digits for formatting output floating point values
Returns
-------
report : string
Text summary of the precision, recall, F1 score for each class.
The reported averages are a prevalence-weighted macro-average across
classes (equivalent to :func:`precision_recall_fscore_support` with
``average='weighted'``).
Note that in binary classification, recall of the positive class
is also known as "sensitivity"; recall of the negative class is
"specificity".
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
avg / total 0.70 0.60 0.61 5
<BLANKLINE>
"""
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
if target_names is not None and len(labels) != len(target_names):
warnings.warn(
"labels size, {0}, does not match size of target_names, {1}"
.format(len(labels), len(target_names))
)
last_line_heading = 'avg / total'
if target_names is None:
target_names = [u'%s' % l for l in labels]
name_width = max(len(cn) for cn in target_names)
width = max(name_width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support"]
head_fmt = u'{:>{width}s} ' + u' {:>9}' * len(headers)
report = head_fmt.format(u'', *headers, width=width)
report += u'\n\n'
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
average=None,
sample_weight=sample_weight)
row_fmt = u'{:>{width}s} ' + u' {:>9.{digits}f}' * 3 + u' {:>9}\n'
rows = zip(target_names, p, r, f1, s)
for row in rows:
report += row_fmt.format(*row, width=width, digits=digits)
report += u'\n'
# compute averages
report += row_fmt.format(last_line_heading,
np.average(p, weights=s),
np.average(r, weights=s),
np.average(f1, weights=s),
np.sum(s),
width=width, digits=digits)
return report
def hamming_loss(y_true, y_pred, labels=None, sample_weight=None,
classes=None):
"""Compute the average Hamming loss.
The Hamming loss is the fraction of labels that are incorrectly predicted.
Read more in the :ref:`User Guide <hamming_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
labels : array, shape = [n_labels], optional (default=None)
Integer array of labels. If not provided, labels will be inferred
from y_true and y_pred.
.. versionadded:: 0.18
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
.. versionadded:: 0.18
classes : array, shape = [n_labels], optional
(deprecated) Integer array of labels. This parameter has been
renamed to ``labels`` in version 0.18 and will be removed in 0.20.
Returns
-------
loss : float or int,
Return the average Hamming loss between element of ``y_true`` and
``y_pred``.
See Also
--------
accuracy_score, jaccard_similarity_score, zero_one_loss
Notes
-----
In multiclass classification, the Hamming loss correspond to the Hamming
distance between ``y_true`` and ``y_pred`` which is equivalent to the
subset ``zero_one_loss`` function.
In multilabel classification, the Hamming loss is different from the
subset zero-one loss. The zero-one loss considers the entire set of labels
for a given sample incorrect if it does entirely match the true set of
labels. Hamming loss is more forgiving in that it penalizes the individual
labels.
The Hamming loss is upperbounded by the subset zero-one loss. When
normalized over samples, the Hamming loss is always between 0 and 1.
References
----------
.. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification:
An Overview. International Journal of Data Warehousing & Mining,
3(3), 1-13, July-September 2007.
.. [2] `Wikipedia entry on the Hamming distance
<https://en.wikipedia.org/wiki/Hamming_distance>`_
Examples
--------
>>> from sklearn.metrics import hamming_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> hamming_loss(y_true, y_pred)
0.25
In the multilabel case with binary label indicators:
>>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))
0.75
"""
if classes is not None:
warnings.warn("'classes' was renamed to 'labels' in version 0.18 and "
"will be removed in 0.20.", DeprecationWarning)
labels = classes
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
if sample_weight is None:
weight_average = 1.
else:
weight_average = np.mean(sample_weight)
if y_type.startswith('multilabel'):
n_differences = count_nonzero(y_true - y_pred,
sample_weight=sample_weight)
return (n_differences /
(y_true.shape[0] * len(labels) * weight_average))
elif y_type in ["binary", "multiclass"]:
return _weighted_sum(y_true != y_pred, sample_weight, normalize=True)
else:
raise ValueError("{0} is not supported".format(y_type))
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None,
labels=None):
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of the true labels given a probabilistic classifier's
predictions. The log loss is only defined for two or more labels.
For a single sample with true label yt in {0,1} and
estimated probability yp that yt = 1, the log loss is
-log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes) or (n_samples,)
Predicted probabilities, as returned by a classifier's
predict_proba method. If ``y_pred.shape = (n_samples,)``
the probabilities provided are assumed to be that of the
positive class. The labels in ``y_pred`` are assumed to be
ordered alphabetically, as done by
:class:`preprocessing.LabelBinarizer`.
eps : float
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
labels : array-like, optional (default=None)
If not provided, labels will be inferred from y_true. If ``labels``
is ``None`` and ``y_pred`` has shape (n_samples,) the labels are
assumed to be binary and are inferred from ``y_true``.
.. versionadded:: 0.18
Returns
-------
loss : float
Examples
--------
>>> log_loss(["spam", "ham", "ham", "spam"], # doctest: +ELLIPSIS
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
y_pred = check_array(y_pred, ensure_2d=False)
check_consistent_length(y_pred, y_true)
lb = LabelBinarizer()
if labels is not None:
lb.fit(labels)
else:
lb.fit(y_true)
if len(lb.classes_) == 1:
if labels is None:
raise ValueError('y_true contains only one label ({0}). Please '
'provide the true labels explicitly through the '
'labels argument.'.format(lb.classes_[0]))
else:
raise ValueError('The labels array needs to contain at least two '
'labels for log_loss, '
'got {0}.'.format(lb.classes_))
transformed_labels = lb.transform(y_true)
if transformed_labels.shape[1] == 1:
transformed_labels = np.append(1 - transformed_labels,
transformed_labels, axis=1)
# Clipping
y_pred = np.clip(y_pred, eps, 1 - eps)
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if y_pred.ndim == 1:
y_pred = y_pred[:, np.newaxis]
if y_pred.shape[1] == 1:
y_pred = np.append(1 - y_pred, y_pred, axis=1)
# Check if dimensions are consistent.
transformed_labels = check_array(transformed_labels)
if len(lb.classes_) != y_pred.shape[1]:
if labels is None:
raise ValueError("y_true and y_pred contain different number of "
"classes {0}, {1}. Please provide the true "
"labels explicitly through the labels argument. "
"Classes found in "
"y_true: {2}".format(transformed_labels.shape[1],
y_pred.shape[1],
lb.classes_))
else:
raise ValueError('The number of classes in labels is different '
'from that in y_pred. Classes found in '
'labels: {0}'.format(lb.classes_))
# Renormalize
y_pred /= y_pred.sum(axis=1)[:, np.newaxis]
loss = -(transformed_labels * np.log(y_pred)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None):
"""Average hinge loss (non-regularized)
In binary class case, assuming labels in y_true are encoded with +1 and -1,
when a prediction mistake is made, ``margin = y_true * pred_decision`` is
always negative (since the signs disagree), implying ``1 - margin`` is
always greater than 1. The cumulated hinge loss is therefore an upper
bound of the number of mistakes made by the classifier.
In multiclass case, the function expects that either all the labels are
included in y_true or an optional labels argument is provided which
contains all the labels. The multilabel margin is calculated according
to Crammer-Singer's method. As in the binary case, the cumulated hinge loss
is an upper bound of the number of mistakes made by the classifier.
Read more in the :ref:`User Guide <hinge_loss>`.
Parameters
----------
y_true : array, shape = [n_samples]
True target, consisting of integers of two values. The positive label
must be greater than the negative label.
pred_decision : array, shape = [n_samples] or [n_samples, n_classes]
Predicted decisions, as output by decision_function (floats).
labels : array, optional, default None
Contains all the labels for the problem. Used in multiclass hinge loss.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] `Wikipedia entry on the Hinge loss
<https://en.wikipedia.org/wiki/Hinge_loss>`_
.. [2] Koby Crammer, Yoram Singer. On the Algorithmic
Implementation of Multiclass Kernel-based Vector
Machines. Journal of Machine Learning Research 2,
(2001), 265-292
.. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models
by Robert C. Moore, John DeNero.
<http://www.ttic.edu/sigml/symposium2011/papers/
Moore+DeNero_Regularization.pdf>`_
Examples
--------
>>> from sklearn import svm
>>> from sklearn.metrics import hinge_loss
>>> X = [[0], [1]]
>>> y = [-1, 1]
>>> est = svm.LinearSVC(random_state=0)
>>> est.fit(X, y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=0, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-2], [3], [0.5]])
>>> pred_decision # doctest: +ELLIPSIS
array([-2.18..., 2.36..., 0.09...])
>>> hinge_loss([-1, 1, 1], pred_decision) # doctest: +ELLIPSIS
0.30...
In the multiclass case:
>>> X = np.array([[0], [1], [2], [3]])
>>> Y = np.array([0, 1, 2, 3])
>>> labels = np.array([0, 1, 2, 3])
>>> est = svm.LinearSVC()
>>> est.fit(X, Y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=None, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-1], [2], [3]])
>>> y_true = [0, 2, 3]
>>> hinge_loss(y_true, pred_decision, labels) #doctest: +ELLIPSIS
0.56...
"""
check_consistent_length(y_true, pred_decision, sample_weight)
pred_decision = check_array(pred_decision, ensure_2d=False)
y_true = column_or_1d(y_true)
y_true_unique = np.unique(y_true)
if y_true_unique.size > 2:
if (labels is None and pred_decision.ndim > 1 and
(np.size(y_true_unique) != pred_decision.shape[1])):
raise ValueError("Please include all labels in y_true "
"or pass labels as third argument")
if labels is None:
labels = y_true_unique
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
mask = np.ones_like(pred_decision, dtype=bool)
mask[np.arange(y_true.shape[0]), y_true] = False
margin = pred_decision[~mask]
margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1),
axis=1)
else:
# Handles binary class case
# this code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
pred_decision = column_or_1d(pred_decision)
pred_decision = np.ravel(pred_decision)
lbin = LabelBinarizer(neg_label=-1)
y_true = lbin.fit_transform(y_true)[:, 0]
try:
margin = y_true * pred_decision
except TypeError:
raise TypeError("pred_decision should be an array of floats.")
losses = 1 - margin
# The hinge_loss doesn't penalize good enough predictions.
losses[losses <= 0] = 0
return np.average(losses, weights=sample_weight)
def _check_binary_probabilistic_predictions(y_true, y_prob):
"""Check that y_true is binary and y_prob contains valid probabilities"""
check_consistent_length(y_true, y_prob)
labels = np.unique(y_true)
if len(labels) > 2:
raise ValueError("Only binary classification is supported. "
"Provided labels %s." % labels)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
return label_binarize(y_true, labels)[:, 0]
def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None):
"""Compute the Brier score.
The smaller the Brier score, the better, hence the naming with "loss".
Across all items in a set N predictions, the Brier score measures the
mean squared difference between (1) the predicted probability assigned
to the possible outcomes for item i, and (2) the actual outcome.
Therefore, the lower the Brier score is for a set of predictions, the
better the predictions are calibrated. Note that the Brier score always
takes on a value between zero and one, since this is the largest
possible difference between a predicted probability (which must be
between zero and one) and the actual outcome (which can take on values
of only 0 and 1).
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another). Which label is considered to be the positive
label is controlled via the parameter pos_label, which defaults to 1.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
pos_label : int or str, default=None
Label of the positive class. If None, the maximum label is used as
positive class
Returns
-------
score : float
Brier score
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import brier_score_loss
>>> y_true = np.array([0, 1, 1, 0])
>>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"])
>>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])
>>> brier_score_loss(y_true, y_prob) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, 1-y_prob, pos_label=0) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true_categorical, y_prob, \
pos_label="ham") # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, np.array(y_prob) > 0.5)
0.0
References
----------
.. [1] `Wikipedia entry for the Brier score.
<https://en.wikipedia.org/wiki/Brier_score>`_
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
assert_all_finite(y_true)
assert_all_finite(y_prob)
if pos_label is None:
pos_label = y_true.max()
y_true = np.array(y_true == pos_label, int)
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
return np.average((y_true - y_prob) ** 2, weights=sample_weight)
|
bsd-3-clause
|
k1643/StratagusAI
|
projects/player-1/gamelogs.py
|
1
|
87828
|
import os.path
import csv
import datetime
import glob
import math
import matplotlib.pyplot as plt
import numpy as np
import os
import pickle
import random
import scipy
import scipy.stats # confidence intervals
import sqlite3
import sys
import yaml
# statistics CSV column names and indexes.
cols = {
"event":0, # plan or end (of game)
"player ID":1, # the number of player being evaluated
"player":2, # type of player being evaluated
"strategy":3, # strategy of player being evaluated
"simreplan":4, # is matrix switching player using simulation re-planning?
"opponent":5, # type of opponent
"predicted":6, # predicted winner
"predicted diff":7, #
"actual":8, # actual winner
"diff":9,
"cycle":10, # cycle of this event
"map":11
}
sim_maps = ['2bases-game',
'2bases_switched',
'the-right-strategy-game',
'the-right-strategy-game_switched'
]
# for games against built-in script
script_maps = [
['../../maps/2bases_PvC.smp','../../maps/2bases_switched_PvC.smp'],
['../../maps/the-right-strategy_PvC.smp','../../maps/the-right-strategy_switched_PvC.smp']
]
# planner vs. planner maps
# same order as sim_maps.
planner_maps = [
'../../maps/2bases.smp',
'../../maps/2bases_switched.smp',
'../../maps/the-right-strategy.smp',
'../../maps/the-right-strategy_switched.smp',
]
mapnames = ['2bases','the-right-strategy']
# planner vs. planner maps
engine_maps = [
['../../maps/2bases.smp','../../maps/2bases_switched.smp',],
['../../maps/the-right-strategy.smp','../../maps/the-right-strategy_switched.smp']
]
switching = ['Nash','maximin', 'monotone']
#epochs = [10000,20000,40000,80000] # divide game in 4 epochs
#epochs = [6030,12060,18090,24120,30150,36180,42210,48240,54270,60300,66330,72360,78390
epochs = [6030,12060,18090,24120,80000]
def write_table(data,fmt,rowhdr,colhdr,label,caption,filepath,hline=None,bolddiag=False,colspec=None):
"""write data matrix as LaTeX table"""
today = datetime.date.today()
tex = open(filepath,'w')
tex.write("% table written on {0} by {1}\n".format(today.strftime('%Y-%m-%d'),sys.argv[0]))
tex.write("\\begin{table}[!ht]\n")
tex.write("\\centering\n")
tex.write("\\begin{tabular}")
tex.write("{")
if colspec:
tex.write(colspec)
else:
tex.write("l |")
for j in range(len(colhdr)):
tex.write(" r ") # assume numbers in cells
tex.write("}\n")
# column header
for c in colhdr:
tex.write(" & " + c)
tex.write("\\cr\n")
tex.write("\\hline\n")
for i in range(len(rowhdr)):
tex.write(rowhdr[i])
for j in range(len(colhdr)):
x = data[i][j]
tex.write(" & ")
if bolddiag and i==j:
tex.write("\\textbf{")
if x:
tex.write(fmt(x))
elif x == 0:
tex.write("0")
if bolddiag and i==j:
tex.write("}")
tex.write("\\cr\n")
if hline == i:
tex.write("\\hline\n")
tex.write("\\end{tabular}\n")
tex.write("\\caption{" + caption + "}\n")
tex.write("\\label{" + label + "}\n")
tex.write("\\end{table}\n")
tex.close()
def print_table(data,fmt,rowhdr,colhdr,caption):
"""print data matrix to console"""
colwidth = max([len(c) for c in rowhdr])
colfmt = "{0:"+str(colwidth)+"}"
for c in colhdr:
print colfmt.format(c),
print
for i in range(len(rowhdr)):
print colfmt.format(rowhdr[i]),
for j in range(len(colhdr)):
x = data[i][j]
if x:
print fmt(x),
elif x == 0:
print "0",
print
print caption
def max_index(data):
"""get indexes of max values in data"""
m = max(data)
return [i for i,v in enumerate(data) if v==m]
def count_wins(v):
"""count wins in score sequence"""
return reduce(lambda v1,v2: v1+1 if v2 > 0 else v1,v,0)
def max_star(data):
"""get list with '*' where max value is in data"""
m = max(data)
return ['*' if v == m else ' ' for v in data]
def normal_approx_interval(p,n,bound):
"""get 95% confidence interval around sample success rate sp assuming n bernoulli trials, normal distribution"""
# for a 95% confidence level the error (\alpha) is 5%,
# so 1- \alpha /2=0.975 and z_{1- \alpha /2}=1.96.
z = 1.96 # z=1.0 for 85%, z=1.96 for 95%
n = float(n)
if bound == 'upper':
return p + z*math.sqrt(p*(1-p)/n)
elif bound == 'lower':
return p - z*math.sqrt(p*(1-p)/n)
else:
raise Exception("unknown bound " + bound)
def wilson_score_interval(p,n,bound):
"""get 95% confidence interval around sample success rate sp assuming n bernoulli trials"""
# for a 95% confidence level the error (\alpha) is 5%,
# so 1- \alpha /2=0.975 and z_{1- \alpha /2}=1.96.
z = 1.96 # z=1.0 for 85%, z=1.96 for 95%
n = float(n)
#return z*math.sqrt(sp*(1-sp)/float(n))
#
# Wilson score interval:
#
# z^2 p(1-p) z^2
# p + ---- (+-) z * sqrt( ------ + ------ )
# 2n n 4n^2
# ----------------------------------------------
# z^2
# 1 + ----
# n
if bound == 'upper':
return ((p + z*z/(2*n) + z * math.sqrt((p*(1-p)+z*z/(4*n))/n))/(1+z*z/n))
elif bound == 'lower':
return ((p + z*z/(2*n) - z * math.sqrt((p*(1-p)+z*z/(4*n))/n))/(1+z*z/n))
else:
raise Exception("unknown bound " + bound)
def bernoulli_confidence(v,formula='normal'):
"""turn score sequence into bernoulli trials. return win rate and confidence interval"""
nWins = count_wins(v)
n = len(v)
rate = nWins/float(len(v))
if formula == 'normal':
f = normal_approx_interval
elif formula == 'wilson':
f = wilson_score_interval
else:
raise Exception,"unknown interval formula"+formula
return [rate, [f(rate,n,'lower'),f(rate,n,'upper')]]
def validate_games(curs,scores_dict,strategies):
# calculate expected number of games
stratlist = "(" + reduce(lambda x, y: x+','+y,["'"+s+"'" for s in strategies]) + ")"
cmd = "select count(*) from event where event='end' and player=? and opponent in " + stratlist
# fixed vs. fixed strategy
for player in strategies:
curs.execute(cmd,(player,))
c = curs.fetchone()[0]
print c,"games for",player, "vs. fixed strategy"
# switching vs. fixed strategy
for player in switching:
curs.execute(cmd,(player,))
c = curs.fetchone()[0]
print c,"games for",player, "vs. fixed strategy"
# switching vs. switching
swlist = "(" + reduce(lambda x, y: x+','+y,["'"+s+"'" for s in switching]) + ")"
curs.execute("select count(*) from event where event='end' and player in " + swlist + " and opponent in " + swlist)
print curs.fetchone()[0],"switching vs. switching episodes"
# switching vs. built-in
curs.execute("select count(*) from event where event='end' and player in " + swlist + " and opponent = 'built-in'")
print curs.fetchone()[0],"switching vs. built-in episodes"
# total
curs.execute("select count(*) from event where event='end'")
print curs.fetchone()[0],"total episodes"
# validate scores dict.
total = 0
counts = [0,0,0,0]
for k,v in scores_dict.iteritems():
c = len(v)
if k[0] in strategies and k[1] in strategies:
counts[0] += c
elif (k[0] in switching and k[1] in strategies) or (k[1] in switching and k[0] in strategies):
counts[1] += c
elif k[0] in switching and k[1] in switching:
counts[2] += c
elif k[0] == 'built-in' or k[1] == 'built-in':
counts[3] += c
else:
print "no category for", k
total += c
print "scores dictionary"
print total,"episodes"
print counts[0], "strategy vs. strategy episodes"
print counts[1], "switching vs. strategy episodes"
print counts[2], "switching vs. switching episodes"
print counts[3], "switching vs. built-in"
def load_strategy_defs(d, strategy_set):
print "load_strategy_defs()"
filepath = os.path.join(d, 'sw_strategies_'+ strategy_set + '.yaml')
f = open(filepath,'rb')
strat_data = yaml.load(f)
f.close()
strs = strat_data[0]['matrix']
names = []
for s in strs:
names.append(s[0])
return names
def write_strategy_defs(d, strategy_set):
filepath = os.path.join(d, 'sw_strategies_'+ strategy_set + '.yaml')
f = open(filepath,'rb')
strat_data = yaml.load(f)
f.close()
strs = strat_data[0]['matrix']
for i in range(len(strs)):
strs[i] = strs[i][1:]
# write TEX strategy definitions
fmt = lambda x: str(x)
rowhdr = [str(j) + ". " + strategies[j].replace('_',' ') for j in range(len(strategies))]
colhdr = strat_data[0]['colhdr'][1:]
caption = strat_data[0]['caption']
label = strat_data[0]['label']
outfile = os.path.join(d, 'sw_strategies_'+strategy_set+'.tex')
write_table(strs,fmt,rowhdr,colhdr,label,caption,outfile)
return strategies # return strategy template names
class Medians:
"""calculate medians and confidence intervals"""
def __init__(self,scores,strategies,threshold=.95):
# strategy vs. strategy table of ConfidenceIntervals indexed by mapname
self.s_v_s_intervals = {}
# confidence interval for maximin of strategy vs. strategy table
# indexed by mappath
self.s_v_s_maximin_interval = {}
#
self.sw_v_s_intervals = {}
# switching planner vs. fixed strategy tables.
self.sw_v_s_min_intervals = {} # compare min of switching vs. strategy to maximin
# build tables.
#
self.strategies = strategies
# load median data
#
# load fixed strategy vs. fixed strategy games
for mapname in mapnames: # ['2bases','the-right-strategy']
table = [[None for player in strategies] for opponent in strategies]
interval_table = [[None for player in strategies] for opponent in strategies]
for i in range(len(strategies)):
opponent = strategies[i]
for j in range(len(strategies)):
player = strategies[j]
v = get_scores(player,opponent,mapname,scores)
if len(v) > 0:
interval = get_confidence_interval(v,threshold)
interval.player = player
interval.opponent = opponent
table[i][j] = interval.median
interval_table[i][j] = interval
self.s_v_s_intervals[mapname] = interval_table
# get confidence interval around maximin
mins = np.min(table,axis=0) # column mins
mins_indexes = np.argmin(table,axis=0) # row indexes
maximin_col = np.argmax(mins)
for i in mins_indexes:
if table[i][maximin_col] == mins[maximin_col]:
self.s_v_s_maximin_interval[mapname] = interval_table[i][maximin_col]
assert self.s_v_s_maximin_interval[mapname] and self.s_v_s_maximin_interval[mapname].median == mins[maximin_col]
# load switching planner vs. fixed strategy games
for mapname in mapnames:
self.sw_v_s_min_intervals[mapname] = {}
interval_table = [[None for player in switching] for opponent in strategies]
for j in range(len(switching)):
player = switching[j]
min_interval = None
for i in range(len(strategies)):
opponent = strategies[i]
v = get_scores(player,opponent,mapname,scores)
if len(v) > 0:
interval = get_confidence_interval(v,threshold)
interval.player = player
interval.opponent = opponent
interval_table[i][j] = interval
if (not min_interval) or min_interval.median > interval.median:
min_interval = interval
# get confidence interval around min
assert min_interval, "no minimum found for " + player
self.sw_v_s_min_intervals[mapname][player] = min_interval
self.sw_v_s_intervals[mapname] = interval_table
class Means:
"""calculate means"""
def __init__(self,scores,strategies):
# strategy vs. strategy table of ConfidenceIntervals indexed by mappath
self.s_v_s_means = {}
# maximin value and strategy pair for maximin of strategy vs. strategy table
# indexed by mappath
self.s_v_s_maximin_pair = {} #
# switching planner vs. fixed strategy tables.
self.sw_v_s_min = {} # compare min of switching vs. strategy to maximin
# build tables.
#
self.strategies = strategies
# load mean data
#
# load fixed strategy vs. fixed strategy games
for mapname in mapnames:
table = [[None for player in strategies] for opponent in strategies]
for i in range(len(strategies)):
opponent = strategies[i]
for j in range(len(strategies)):
player = strategies[j]
table[i][j] = get_mean(player,opponent,mapname,scores)
self.s_v_s_means[mapname] = table
# get maximin
mins = np.min(table,axis=0) # column mins
mins_indexes = np.argmin(table,axis=0) # row indexes
maximin_col = np.argmax(mins)
for i in mins_indexes:
# if row i has maximin value in column maximin_col
if table[i][maximin_col] == mins[maximin_col]:
maximin_row = i
self.s_v_s_maximin_pair[mapname] = (table[i][maximin_col],strategies[maximin_col],strategies[maximin_row])
assert self.s_v_s_maximin_pair[mapname] and self.s_v_s_maximin_pair[mapname][0] == mins[maximin_col]
# load switching planner vs. fixed strategy games
for mapname in mapnames:
self.sw_v_s_min[mapname] = {}
for j in range(len(switching)):
player = switching[j]
min_pair = None
for i in range(len(strategies)):
opponent = strategies[i]
v = get_mean(player,opponent,mapname,scores)
if (not min_pair) or min_pair[0] > v:
min_pair = (v,player,opponent)
# get confidence interval around min
assert min_pair, "no minimum found for " + player
self.sw_v_s_min[mapname][player] = min_pair
def show_maximin_compare_errorbars(d,medians):
print "show_maximin_compare_errorbars()"
x = [j+1 for j in range(len(switching)+1)]
xticklabels = ["fixed"]
xticklabels.extend(switching)
for mapname in mapnames:
mins = [] # minimum of medians
upper_conf = [] # difference between upper_confidence and median
lower_conf = []
# get fix strategy maximin of medians
conf = medians.s_v_s_maximin_interval[mapname]
upper_conf.append(conf.interval[1] - conf.median) # upper range
mins.append(conf.median)
lower_conf.append(conf.median - conf.interval[0]) # lower range
# get switching planner mins of medians
for j in range(len(switching)):
player = switching[j]
conf = medians.sw_v_s_min_intervals[mapname][player]
upper_conf.append(conf.interval[1] - conf.median) # upper range
mins.append(conf.median)
lower_conf.append(conf.median - conf.interval[0]) # lower range
y = mins
plt.figure()
plt.xticks(x, xticklabels)
plt.xlabel('Switching Planners')
plt.ylabel('Score')
plt.axvline(x=1.5, color='gray',linestyle='--') #axvline(x=0, ymin=0, ymax=1, **kwargs)
plt.xlim( (.5, len(x)+.5) ) # show results at 1,...,len(switching)
plt.errorbar(x, y, yerr=[lower_conf,upper_conf], fmt='bs')
#plt.show()
fname = os.path.join(d,"maxmin_compare_"+mapname+".png")
plt.savefig(fname, format='png') # png, pdf, ps, eps and svg.
def write_maximin_compare(d,medians):
print "write_maximin_compare()"
rowhdr = []
for mapname in mapnames:
rowhdr.append(mapname)
colhdr = ["Fixed"]
for sw in switching:
colhdr.append("\\texttt{"+sw+"}")
colsubhdr = ["Upper","Median","Lower"]
#colsubhdr = ["NA","Mean","NA"]
data = [[None for j in range(len(switching)+1)] for i in range(len(mapnames)*3)]
#stratgy vs. strategy maximin value
for i in range(len(mapnames)):
mapname = mapnames[i]
row = i*3
conf = medians.s_v_s_maximin_interval[mapname]
#print " maximin at",conf.player,"vs.",conf.opponent,conf
data[row][0] = conf.interval[1]
data[row+1][0] = conf.median
#data[row+1][0] = means.s_v_s_maximin_pair[mapname][0]
data[row+2][0] = conf.interval[0]
# switching player vs. strategy minimum value
for j in range(len(switching)):
player = switching[j]
conf = medians.sw_v_s_min_intervals[mapname][player]
#print " min median at",conf.player,"vs.",conf.opponent,conf
data[row][j+1] = conf.interval[1] # upper range
data[row+1][j+1] = conf.median
#data[row+1][j+1] = means.sw_v_s_min[mapname][player][0]
data[row+2][j+1] = conf.interval[0] # lower range
today = datetime.date.today()
filepath = os.path.join(d,"maximin_compare.tex")
tex = open(filepath,'w')
tex.write("% table written on {0} by {1}\n".format(today.strftime('%Y-%m-%d'),sys.argv[0]))
tex.write("\\begin{table}[!ht]\n")
tex.write("\\centering\n")
tex.write("\\begin{tabular}{l l | ")
for j in range(len(colhdr)):
tex.write(" r ") # assume numbers in cells
tex.write("}\n")
# column header
tex.write(" & ")
for c in colhdr:
tex.write(" & " + c)
tex.write("\\cr\n")
tex.write("\\hline\n")
# write Upper,Median,Lower on first map
for i in range(len(colsubhdr)):
if i == 0:
tex.write("\\texttt{{{0}}} & {1}".format(mapnames[0],colsubhdr[0]))
else:
tex.write(" & {0}".format(colsubhdr[i]))
for j in range(len(colhdr)):
x = data[i][j]
tex.write(" & {0:.0f}".format(x))
#tex.write(" & " + str(x))
tex.write("\\cr\n")
tex.write("\\hline\n")
for i in range(len(colsubhdr)):
if i == 0:
tex.write("\\texttt{{{0}}} & {1}".format(mapnames[1],colsubhdr[0]))
else:
tex.write(" & {0}".format(colsubhdr[i]))
for j in range(len(colhdr)):
x = data[3+i][j]
tex.write(" & {0:.0f}".format(x))
#tex.write(" & " + str(x))
tex.write("\\cr\n")
tex.write("\\hline\n")
tex.write("\end{tabular}\n")
tex.write("\\caption{Fixed Strategy Maximin and Switching Planner Minimum Intervals}\n")
tex.write("\\label{table:maximin_and_minimums}\n")
tex.write("\\end{table}\n")
tex.close()
def plot_rate_v_mean(d,scores):
"""show relationship between win rate and mean score"""
# scores = get_strat_v_strat_scores(curs,strategies)
means = {}
rates = {}
for k,v in scores.iteritems():
if len(v) == 0:
continue
means[k] = np.mean(v)
nWins = 0
nGames = len(v)
for score in v:
if score > 0:
nWins += 1
rates[k] = nWins/float(nGames)
for mapname in mapnames:
keys = rates.keys() # get [player,opponent,mappath]
x = [rates[k] for k in filter(lambda t: t[2] == mapname, keys)]
y = [means[k] for k in filter(lambda t: t[2] == mapname, keys)]
plt.figure() # new graph
plt.xlim( (0, 1) ) # 0-100%
plt.xlabel('win rate')
plt.ylabel('mean score')
plt.scatter(x,y)
plt.show()
fname = os.path.join(d,"rate_v_mean_"+mapname+".png")
#plt.savefig(fname, format='png') # png, pdf, ps, eps and svg.
def strat_vs_strat_rate(d,scores_dict,strategies):
"""write strategy vs. strategy win rate table."""
print "strat_vs_strat_rate()"
# setup Latex table
fmt = lambda x: x if x.__class__ == str else "{0:.0f}\%".format(x*100) # formatter.
rowhdr = [str(j) + "." for j in range(len(strategies))]
hline = None
colhdr = [str(i) + "." for i in range(len(strategies))]
for mapname in mapnames:
table = [[None for p in strategies] for o in strategies]
for i in range(len(strategies)):
o = strategies[i] # opponent
for j in range(len(strategies)):
p = strategies[j] # player
v = get_scores(p,o,mapname,scores_dict)
nWins = count_wins(v)
table[i][j] = nWins/float(len(v))
caption = 'Strategy Win Rate on \\texttt{' + mapname.replace('_',' ') + '}'
label = 'engine_rate_' + mapname
outfile = os.path.join(d, 'engine_rate_'+mapname+'.tex')
write_table(table,fmt,rowhdr,colhdr,label,caption,outfile,hline,bolddiag=True)
def strat_vs_strat_score_db(d,curs,strategies,summary='median'):
"""Debugging. write strategy vs. strategy score table from database"""
print "strat_vs_strat_score_db()"
def fmt(x):
if not x:
return " "
elif x.__class__ == str:
return "{0:6}".format(x)
else:
return "{0:6.0f}".format(x)
rowhdr = [str(i) + "." for i in range(len(strategies))]
rowhdr.append("min")
rowhdr.append("mxmn")
cmd = "select diff from event where event='end' and player=? and opponent=? and map=?"
for mappaths in engine_maps:
path,mapname = os.path.split(mappaths[0])
mapname = mapname.replace('.smp','')
table = [[0 for p in strategies] for o in strategies]
for i in range(len(strategies)):
p = strategies[i]
for j in range(i+1):
o = strategies[j]
curs.execute(cmd,(p,o,mappaths[0],)) # from north position
n_scores = [row[0] for row in curs.fetchall()]
scores = n_scores
curs.execute(cmd,(p,o,mappaths[1],)) # from south position
s_scores = [row[0] for row in curs.fetchall()]
scores.extend(s_scores)
if summary == 'median':
stats = np.median
elif summary == 'mean':
stats = np.mean
else:
raise Exception, "unknown summary function", summary
if i == j:
table[j][i] = stats(scores) # transpose for point of view of column player
else:
table[j][i] = stats(scores)
table[i][j] = -stats(scores)
mins = np.min(table,axis=0)
table.append(mins)
table.append(max_star(mins)) # mark the maximin columns)
print mapname
for i in range(len(table)):
print "{0:4}".format(rowhdr[i]),
row = table[i]
for cell in row:
print fmt(cell),
print
def strat_vs_strat_score(d,scores_dict,strategies):
"""write strategy vs. strategy mean score table."""
print "strat_vs_strat_score()"
# setup Latex table
fmt = lambda x: x if x.__class__ == str else "{0:.0f}".format(x) # formatter.
rowhdr = [str(j) + "." for j in range(len(strategies))]
rowhdr.append("min.")
rowhdr.append("maxmin")
hline = len(strategies) - 1 # add horizontal line to table
colhdr = [str(i) + "." for i in range(len(strategies))]
for mapname in mapnames:
table = [[None for p in strategies] for o in strategies]
for i in range(len(strategies)):
o = strategies[i] # opponent
for j in range(len(strategies)):
p = strategies[j] # player
#v = get_scores(p,o,mapname,scores_dict)
table[i][j] = get_mean(p,o,mapname,scores_dict)
mins = np.min(table,axis=0)
table.append(mins)
table.append(max_star(mins)) # mark the maximin columns)
caption = 'Strategy Mean Scores on \\texttt{' + mapname.replace('_',' ') + '}'
label = 'engine_scores_' + mapname
outfile = os.path.join(d, 'engine_scores_'+mapname+'.tex')
write_table(table,fmt,rowhdr,colhdr,label,caption,outfile,hline,bolddiag=True)
def strat_vs_strat_median_score(d,medians,strategies):
"""write strategy vs. strategy median score table."""
print "strat_vs_strat_median_score()"
# setup Latex table
fmt = lambda x: x if x.__class__ == str else "{0:.0f}".format(x) # formatter.
rowhdr = [str(j) + "." for j in range(len(strategies))]
rowhdr.append("minimum")
rowhdr.append("maximin")
hline = len(strategies) - 1 # add horizontal line to table
colhdr = [str(i) + "." for i in range(len(strategies))]
for mapname in mapnames:
table = [[None for p in strategies] for o in strategies]
confidence_table = medians.s_v_s_intervals[mapname]
for i in range(len(strategies)): # opponent i
for j in range(len(strategies)): # player j
confidence = confidence_table[i][j]
table[i][j] = confidence.median
mins = np.min(table,axis=0)
table.append(mins)
table.append(max_star(mins)) # mark the maximin columns)
caption = 'Strategy Median Scores on \\texttt{' + mapname.replace('_',' ') + '}'
label = 'engine_median_scores_' + mapname
outfile = os.path.join(d, 'engine_median_scores_'+mapname+'.tex')
write_table(table,fmt,rowhdr,colhdr,label,caption,outfile,hline,bolddiag=True)
def sw_vs_strat_scores(d,scores_dict,strategies):
"""write switcher vs. strategy score table."""
print "sw_vs_strat_scores()"
for mapname in mapnames:
sw_vs_strat_map_scores(d,scores_dict,strategies,mapname)
def sw_vs_strat_map_scores(d,scores_dict,strategies,mapname):
"""write switcher vs. strategy score table."""
print "sw_vs_strat_map_scores(" + mapname + ")"
means_table = [[None for p in switching] for o in strategies]
rates_table = [[None for p in switching] for o in strategies]
for i in range(len(strategies)):
o = strategies[i] # opponent
for j in range(len(switching)):
p = switching[j] # player
# get averge for games from both positions on map
means_table[i][j] = get_mean(p,o,mapname,scores_dict)
rates_table[i][j] = get_rate(p,o,mapname,scores_dict)
# add row for mean results
means = np.mean(means_table,axis=0)
mins = np.min(means_table,axis=0)
means_table.append(means)
means_table.append(mins)
rates_table.append([None for p in switching])
rates_table.append([None for p in switching])
write_sw_vs_strat_map_table(d,means_table,rates_table,strategies,mapname)
def write_sw_vs_strat_map_table(d,data,rates,strategies,mapname):
fmt = lambda x: "{0:.0f}".format(x) # formatter.
rowhdr = [str(i)+'. \\texttt{'+strategies[i].replace('_',' ')+'}' for i in range(len(strategies))]
rowhdr.append("mean")
rowhdr.append("minimum")
hline = len(strategies) - 1
colhdr = ['\\texttt{'+s+'}' for s in switching]
label = 'sw_scores_' + mapname
caption = 'Switching Planner Mean Scores on \\texttt{' + mapname + '}'
fn = 'sw_scores_'+mapname+'.tex'
filepath = os.path.join(d, fn)
#write_table(table,fmt,rowhdr,colhdr,label,caption,outfile,hline)
"""write data matrix as LaTeX table"""
today = datetime.date.today()
tex = open(filepath,'w')
tex.write("% table written on {0} by {1}\n".format(today.strftime('%Y-%m-%d'),sys.argv[0]))
tex.write("\\begin{table}[!ht]\n")
tex.write("\\centering\n")
tex.write("\\begin{tabular}")
tex.write("{")
tex.write("l |")
for j in range(len(colhdr)):
tex.write(" r ") # assume numbers in cells
tex.write("|")
for j in range(len(colhdr)):
tex.write(" r ") # assume numbers in cells
tex.write("}\n")
# column header
tex.write(" & \multicolumn{3}{l}{Mean Scores} & \multicolumn{3}{l}{Win Rates}\\cr\n")
for c in colhdr:
tex.write(" & " + c)
for c in colhdr:
tex.write(" & " + c)
tex.write("\\cr\n")
tex.write("\\hline\n")
for i in range(len(rowhdr)):
tex.write(rowhdr[i])
# score table
for j in range(len(colhdr)):
x = data[i][j]
tex.write(" & ")
if x:
tex.write(fmt(x))
elif x == 0:
tex.write("0")
# rate table
for j in range(len(colhdr)):
x = rates[i][j]
tex.write(" & ")
if x:
tex.write(fmt(x*100) + "\%")
elif x == 0:
tex.write("0")
tex.write("\\cr\n")
if hline == i:
tex.write("\\hline\n")
tex.write("\\end{tabular}\n")
tex.write("\\caption{" + caption + "}\n")
tex.write("\\label{" + label + "}\n")
tex.write("\\end{table}\n")
tex.close()
def sw_vs_strat_median_scores(d,medians):
"""write switcher vs. strategy median score table."""
print "sw_vs_strat_median_scores()"
for mapname in mapnames:
sw_vs_strat_median_map_scores(d,medians,mapname)
def sw_vs_strat_median_map_scores(d,medians,mapname):
"""write switcher vs. strategy score table."""
print "sw_vs_strat_map_scores(" + mapname + ")"
table = [[None for p in switching] for o in medians.strategies]
interval_table = medians.sw_v_s_intervals[mapname]
for i in range(len(medians.strategies)):
for j in range(len(switching)):
table[i][j] = interval_table[i][j].median
# add row for min results
mins = np.min(table,axis=0)
table.append(mins)
fmt = lambda x: "{0:.0f}".format(x)
rowhdr = ['\\texttt{'+s.replace('_',' ')+'}' for s in medians.strategies]
rowhdr.append("minimum")
hline = len(medians.strategies) - 1
colhdr = ['\\texttt{'+s+'}' for s in switching]
label = 'sw_median_scores_' + mapname
caption = 'Switching Planner Median Scores on \\texttt{' + mapname + '}'
fn = 'sw_median_scores_'+mapname+".tex"
outfile = os.path.join(d, fn)
write_table(table,fmt,rowhdr,colhdr,label,caption,outfile,hline)
def sw_vs_strat_rates(d,scores_dict,strategies):
"""write switcher vs. strategy win rate table."""
print "sw_vs_strat_rates()"
for mapname in mapnames:
sw_vs_strat_map_rates(d,scores_dict,strategies,mapname)
def sw_vs_strat_map_rates(d,scores_dict,strategies,mapname):
"""write switcher vs. strategy win rate table."""
print "sw_vs_strat_map_rates(" + mapname + ")"
table = [[None for p in switching] for o in strategies]
for i in range(len(strategies)):
o = strategies[i] # opponent
for j in range(len(switching)):
p = switching[j] # player
v = get_scores(p,o,mapname,scores_dict)
nWins = count_wins(v)
table[i][j] = 100*nWins/float(len(v))
fmt = lambda x: "{0:.0f}\%".format(x) # formatter.
rowhdr = [str(i)+'. \\texttt{'+strategies[i].replace('_',' ')+'}' for i in range(len(strategies))]
hline = None
colhdr = ['\\texttt{'+s+'}' for s in switching]
label = 'sw_rates_' + mapname
caption = 'Switching Planner Win Rates on \\texttt{' + mapname + '}'
fn = 'sw_rates_'+mapname+'.tex'
outfile = os.path.join(d, fn)
write_table(table,fmt,rowhdr,colhdr,label,caption,outfile,hline)
def game_duration(d,curs):
"""how many games last thru each period"""
outfile = os.path.join(d, 'game_duration_barchart.tex')
tex = open(outfile,'w')
tex.write("\\begin{figure}[!ht]\n")
tex.write("\\begin{tikzpicture}\n")
tex.write("""\\begin{axis}[ybar stacked,
area legend,
cycle list={
% see pgfplots.pdf barcharts and
% see pgfmanual.pdf 41 Pattern Library
% patterns: crosshatch, north east lines, north west lines,...
{fill=blue},{fill=red},{fill=teal},{fill=gray},{fill=white},{fill=orange},{fill=black},{fill=violet},{pattern color=red,pattern=north east lines},{pattern color=blue,pattern=north west lines},{fill=brown}
},
legend style={at={(2,.95)}}
]
""")
replans = range(0,80000,6000)
tex.write(" \\addplot coordinates\n")
tex.write(" {")
for t in replans:
nGames = curs.execute("select count(*) from event where event='end' and cycle > ?",(t,)).fetchone()[0]
tex.write(" ({0},{1})".format(t,nGames))
tex.write("};\n")
tex.write(" \\legend{")
for i in range(len(replans)):
if i > 0:
tex.write(", ")
tex.write(str(replans[i]))
tex.write("}\n")
tex.write("\\end{axis}\n")
tex.write("\\end{tikzpicture}\n")
tex.write("\\caption{Game Durations}\n")
tex.write("\\label{game_duration}\n")
tex.write("\\end{figure}\n")
def sw_vs_sw(d,scores_dict):
"""write switcher vs. switcher win rate table."""
print "switcher_vs_switcher()"
for mapname in mapnames:
sw_vs_sw_by_map(d,scores_dict,mapname)
def sw_vs_sw_by_map(d,scores_dict,mapname):
players = switching
opponents = switching[:] # copy
opponents.append('built-in')
counts = [[None for p in players] for o in opponents]
for i in range(len(opponents)):
o = opponents[i] # opponent
for j in range(len(players)):
p = players[j] # player
if p != o:
scores = get_scores(p,o,mapname,scores_dict) # combine scores from N. and S. maps
nWins = 0
for score in scores:
if score > 0:
nWins += 1
counts[i][j] = nWins/float(len(scores))
fmt = lambda x: "{0:.0f}\\%".format(100 * x) # formatter. show as percent.
rowhdr = [s for s in opponents]
colhdr = [s for s in players]
outfile = os.path.join(d, 'sw_vs_sw_win_rate_' + mapname + '.tex')
label = 'sw_vs_sw_win_rate_' + mapname
caption = 'Switching vs.~Switching Win Rates on \\texttt{' + mapname + '}'
write_table(counts,fmt,rowhdr,colhdr,label,caption,outfile)
def switcher_choices(d,curs,strategies):
print "switcher_choices()"
counts = [[0 for p in switching] for s in strategies]
nEvents = [0 for p in switching] # number of planning events for switcher
inclause = "("
for i in range(len(strategies)):
if i > 0:
inclause += ","
inclause += "'" + strategies[i] + "'"
inclause += ")"
#print inclause
for j in range(len(switching)):
p = switching[j]
cmd = "select count(*) from event where event='plan' and simreplan=1 and player=? and opponent in " + inclause
c = curs.execute(cmd,(p,)).fetchone()[0]
nEvents[j] = c
# for each fixed strategy, for each switching planner
for i in range(len(strategies)):
s = strategies[i]
for j in range(len(switching)):
if nEvents[j]:
p = switching[j]
cmd = "select count(*) from event where event='plan' and simreplan=1 and player=? and strategy=? and opponent in " + inclause
nUse = curs.execute(cmd,(p,s,)).fetchone()[0]
counts[i][j] = nUse / float(nEvents[j])
fmt = lambda x: "{0:.0f}\%".format(100 * x) # formatter. show as percent.
colhdr = ['\\texttt{'+s.replace('_',' ')+'}' for s in switching]
rowhdr = [str(i)+'. \\texttt{'+strategies[i].replace('_',' ')+'}' for i in range(len(strategies))]
outfile = os.path.join(d, 'switcher_choices.tex')
write_table(counts,fmt,rowhdr,colhdr,'switcher_choices','Strategy Choices of Switching Planners',outfile)
def switcher_choices_by_epoch(d,curs,strategies):
print "switcher_choices_by_epoch()"
table = [[0 for epoch in epochs] for s in strategies]
inclause = "("
for i in range(len(strategies)):
if i > 0:
inclause += ","
inclause += "'" + strategies[i] + "'"
inclause += ")"
#print inclause
player = 'maximin'
for epoch in range(len(epochs)):
if epoch == 0:
start = 0
else:
start = epochs[epoch-1]
end = epochs[epoch]
# total planning events of epoch
cmd = "select count(*) from event where player=? and simreplan=1 and cycle > ? and cycle <= ? " + \
" and opponent in " + inclause
nEvents = curs.execute(cmd,(player,start,end,)).fetchone()[0]
# for each fixed strategy
for i in range(len(strategies)):
s = strategies[i]
if nEvents:
cmd = "select count(*) from event where player=? and simreplan=1 and cycle >= ? and cycle < ? " + \
" and strategy=? and opponent in " + inclause
nUsed = curs.execute(cmd,(player,start,end,s,)).fetchone()[0]
table[i][epoch] = nUsed / float(nEvents)
fmt = lambda x: "{0:.0f}\%".format(100 * x) # formatter. show as percent.
rowhdr = [str(i)+'. \\texttt{'+strategies[i].replace('_',' ')+'}' for i in range(len(strategies))]
colhdr = ['{:,}'.format(e) for e in epochs]
caption = '\\texttt{maximin} Choices by Epoch'
outfile = os.path.join(d, 'maximin_choices_by_epoch.tex')
write_table(table,fmt,rowhdr,colhdr,'maximin_choices_by_epoch',caption,outfile)
def switcher_choices_by_opponent_map_epoch(d,curs,strategies,player,opponent,mapname):
print "switcher_choices_by_opponent_map_epoch()"
i = mapnames.index(mapname)
mappaths = engine_maps[i]
table = [[0 for epoch in epochs] for s in strategies]
for epoch in range(len(epochs)):
if epoch == 0:
start = 0
else:
start = epochs[epoch-1]
end = epochs[epoch]
# for each fixed strategy
nEvents = 0
for i in range(len(strategies)):
s = strategies[i]
cmd = "select count(*) from event where player=? and simreplan=1 " + \
" and opponent=? " + \
" and cycle >= ? and cycle < ? " + \
" and strategy=? " + \
" and (map=? or map=?)"
nUsed = curs.execute(cmd,(player,opponent,start,end,s,mappaths[0],mappaths[1],)).fetchone()[0]
table[i][epoch] = nUsed
nEvents += nUsed
if nEvents:
for i in range(len(strategies)):
table[i][epoch] = table[i][epoch] / float(nEvents)
fmt = lambda x: "{0:.0f}\%".format(100 * x) # formatter. show as percent.
rowhdr = [str(i)+'. \\texttt{'+strategies[i].replace('_',' ')+'}' for i in range(len(strategies))]
colhdr = ['{:,}'.format(e) for e in epochs]
caption = '\\texttt{{{0}}} vs.~\\texttt{{{1}}} Choices'.format(player,opponent.replace('_',' '))
outfile = os.path.join(d, '{0}_v_{1}_{2}_choices_by_epoch.tex'.format(player,opponent,mapname))
label = '{0}_v_{1}_{2}_choices_by_epoch'.format(player,opponent,mapname)
write_table(table,fmt,rowhdr,colhdr,label,caption,outfile)
def switcher_choice_sequence(d,curs,sw,opponent):
"""print sequence of strategy choices"""
cmd = "select event,strategy,predicted_diff,diff,cycle,map from event where player=? and simreplan=1 " + \
" and opponent=? order by map,game,cycle"
curs.execute(cmd,(sw,opponent,))
m = None
nGames = 0
for row in curs.fetchall():
if m != row[5]:
m = row[5]
print m
if row[0] == 'plan':
print "{0} prediction: {1} at: {2}".format(row[1],row[2],row[4])
else:
print row[0],"score",row[3]
nGames += 1
print nGames,"games"
def switcher_choices_sim(d,strategies,mapname,filename):
print "switcher_choices_sim()"
table = [[0 for epoch in epochs] for s in strategies]
sim_epochs = [6000,12000,18000,24000,80000]
player = None
opponent = None
file = open(os.path.join(d,filename), 'rb')
rd = csv.reader(file)
for row in rd:
event = row[0]
if not player:
player = row[cols["player"]]
opponent = row[cols["opponent"]]
else:
assert player == row[cols["player"]]
assert opponent == row[cols["opponent"]]
assert mapname in row[cols["map"]]
if event == "plan":
# count number of strategy choices in epoch
i = strategies.index(row[cols["strategy"]])
cycle = int(row[cols["cycle"]])
for epoch in range(len(sim_epochs)):
if epoch == 0:
start = 0
else:
start = sim_epochs[epoch-1]
end = sim_epochs[epoch]
if cycle >= start and cycle < end:
break
#print player, "choose strategy",strategies[i],"at cycle",row[cols["cycle"]],"epoch", epoch
table[i][epoch] += 1
# normalize
sums = np.sum(table,axis=0)
for j in range(len(sums)):
if sums[j] != 0:
for i in range(len(table)):
table[i][j] = table[i][j]/float(sums[j])
#for i in range(len(table)):
# for j in range(len(table[i])):
# print "{0:2.0f}\% ".format(100*table[i][j]),
# print
fmt = lambda x: "{0:.0f}\%".format(100 * x) # formatter. show as percent.
rowhdr = [str(i)+'. \\texttt{'+strategies[i].replace('_',' ')+'}' for i in range(len(strategies))]
colhdr = ['{:,}'.format(e) for e in epochs]
caption = '\\texttt{{{0}}} vs.~\\texttt{{{1}}} Choices on \\texttt{{{2}}} in Simulation'.format(player,opponent.replace('_',' '),mapname)
outfile = os.path.join(d, '{0}_v_{1}_{2}_sim_choices_by_epoch.tex'.format(player,opponent,mapname))
label = '{0}_v_{1}_{2}_sim_choices_by_epoch'.format(player,opponent,mapname)
write_table(table,fmt,rowhdr,colhdr,label,caption,outfile)
def switcher_win_loss_choices(d,curs):
players = switching
data = [[0 for p in players]*2 for s in strategies] # two columns (win,lose) for each player
for j in range(len(players)):
p = players[j]
nEvents = curs.execute("select count(*) from event where player=? and event='plan'",(p,)).fetchone()[0]
if nEvents == 0:
print "No planning events for player", p
continue
# get game IDs of won games.
for i in range(len(strategies)):
s = strategies[i]
n = curs.execute("select count(*) from event where strategy=? and game in (select game from event where player=? and simreplan=1 and actual=0)",(s,p)).fetchone()[0]
data[i][j*2] = n/float(nEvents)
# get game IDs of lost games
n = curs.execute("select count(*) from event where strategy=? and game in (select game from event where player=? and simreplan=1 and actual=1)",(s,p)).fetchone()[0]
data[i][j*2+1] = n/float(nEvents)
fmt = lambda x: "{0:.1f}".format(100 * x) # formatter. show as percent.
colhdr = players
colhdr2 = ['Win','Lose','Win','Lose','Win','Lose']
rowhdr = [s.replace('_',' ') for s in strategies]
filepath = os.path.join(d, 'switcher_win_choices.tex')
caption = 'Strategy Choices of Switching Planners in Winning Games (with Re-Planning)'
label = 'table:switcher_win_choices'
# copied from write_table. We need a different version of table header.
today = datetime.date.today()
tex = open(filepath,'w')
tex.write("% table written on {0} by {1}\n".format(today.strftime('%Y-%m-%d'),sys.argv[0]))
tex.write("\\begin{table}[!ht]\n")
tex.write("\\begin{tabular}{l | ")
for j in range(len(colhdr2)):
tex.write(" r ") # assume numbers in cells
tex.write("}\n")
# column header
for c in colhdr:
tex.write(" & \multicolumn{2}{c}{" + c + "}")
tex.write("\\cr\n")
for c in colhdr2:
tex.write(" & " + c)
tex.write("\\cr\n")
tex.write("\\hline\n")
for i in range(len(rowhdr)):
tex.write(rowhdr[i])
for j in range(len(colhdr2)):
x = data[i][j]
if x:
tex.write(" & " + fmt(x))
elif x == 0:
tex.write(" & 0 ")
else: # None
tex.write(" & ")
tex.write("\\cr\n")
tex.write("\end{tabular}\n")
tex.write("\\caption{" + caption + "}\n")
tex.write("\\label{" + label + "}\n")
tex.write("\\end{table}\n")
tex.close()
def switcher_choices_barchart(d,curs,strategies):
print "switcher_choices_barchart()"
players = switching
for p in players:
for s in ['balanced_7_mass','balanced_9','balanced_9_mass','rush_9']: # strongest strategies.
tex_choices_barchart(d,curs,p,s,strategies)
tex_choices_barchart(d,curs,p,'built-in',strategies)
def tex_choices_barchart(d,curs, player, opponent,strategies):
"""show choices at each planning event"""
print "tex_choices_barchart(" + player + "," + opponent + ")"
label = '{0}_choices_vs_{1}_barchart'.format(player,opponent)
filepath = os.path.join(d, label+'.tex')
tex = open(filepath,'w')
tex.write("\\begin{figure}[!ht]\n")
tex.write("\\begin{tikzpicture}\n")
# need at least 11 bar styles for 11 strategies
tex.write("""\\begin{axis}[ybar stacked,
area legend,
cycle list={
% see pgfplots.pdf barcharts and
% see pgfmanual.pdf 41 Pattern Library
% patterns: crosshatch, north east lines, north west lines,...
{fill=blue},{fill=red},{fill=teal},{fill=gray},{fill=white},{fill=orange},{fill=black},{fill=violet},{pattern color=red,pattern=north east lines},{pattern color=blue,pattern=north west lines},{fill=brown}
},
legend style={at={(2,.95)}}
]
""")
for s in strategies:
tex.write(" \\addplot coordinates\n")
tex.write(" {")
for epoch in range(len(epochs)):
if epoch == 0:
start = 0
else:
start = epochs[epoch-1]
end = epochs[epoch]
c = curs.execute("select count(*) from event where player=? and opponent=? and strategy=? and simreplan=1 and cycle >= ? and cycle < ?",
(player,opponent,s,start,end,)).fetchone()[0]
tex.write(" ({0},{1})".format(start,c))
tex.write("};\n")
tex.write(" \\legend{")
for i in range(len(strategies)):
if i > 0:
tex.write(", ")
tex.write(strategies[i].replace('_',' '))
tex.write("}\n")
tex.write("\\end{axis}\n")
tex.write("\\end{tikzpicture}\n")
caption = player + " Choices vs. " + opponent.replace("_",' ')
tex.write("\\caption{" + caption + "}\n")
tex.write("\\label{"+ label + "}\n")
tex.write("\\end{figure}\n")
tex.close()
def get_bias(d, curs, strategies):
"""get avg. scores for fixed strategy vs. self games."""
print "get_bias()"
colhdr = []
rowhdr = [str(i)+". "+strategies[i].replace('_',' ') for i in range(len(strategies))]
table = []
for mappaths in engine_maps:
for mappath in mappaths:
path,mapname = os.path.split(mappath)
mapname = mapname.replace('_',' ')
mapname = mapname.replace('.smp','')
if 'switched' in mapname:
mapname = mapname.replace('switched','S.')
else:
mapname = mapname + " N."
colhdr.append(mapname)
bias = get_bias_by_map(curs, strategies, mappath)
table.append(bias)
#print "avg. ", np.mean(bias)
table = np.transpose(table)
fmt = lambda x: "{0:.0f}".format(x) # formatter.
hline = None
label = 'map_bias'
caption = 'Bias by Map and Position'
filename = os.path.join(d, 'map_bias.tex')
write_table(table,fmt,rowhdr,colhdr,label,caption,filename,hline)
def get_bias_by_map(curs,strategies,map):
"""get avg. scores for fixed strategy vs. self games."""
cmd = "select diff from event where event='end' and player=? and opponent=player and map=?"
bias = [None for s in strategies]
for i in range(len(strategies)):
curs.execute(cmd,(strategies[i],map,))
scores = [row[0] for row in curs.fetchall()]
bias[i] = np.median(scores)
return bias
class ConfidenceInterval:
def __init__(self,median,confidence,interval):
self.player = None
self.opponent = None
self.median = median
self.confidence = confidence
self.interval = interval
def __str__(self):
return "{0} {1:.4f} [{2},{3}]".format(self.median,self.confidence,self.interval[0],self.interval[1])
def get_confidence_interval(x,threshold=.95):
"""get tightest interval arount median that exceeds .95 confidence."""
x = x[:] # get a copy and sort it.
x.sort()
n = len(x)
median = np.median(x)
cs = []
for k in range(int(math.floor(n/2.0))):
c = 1 - (2 * scipy.stats.binom.cdf(k,n,0.5)) # binomial CDF of k successes in n samples
if c < .999 and c > threshold:
cs.append(ConfidenceInterval(median,c,[x[k],x[-k-1]]))
if len(cs) > 0:
return cs[-1]
else:
raise Exception("no confidence interval meets requirements")
def get_bernoulli_confidence_intervals(scores,episodes):
intervals = []
for n in episodes:
player_scores = random.sample(scores, n)
intervals.append(bernoulli_confidence(player_scores))
return intervals
def compare_sim_engine(d, scores_dict, strategy_set,strategies):
"""compare strategy performace in simulation to performance in engine"""
print "compare_sim_engine()"
for mapname in mapnames:
compare_sim_engine_by_map(d,scores_dict,strategy_set,strategies,mapname)
def compare_sim_engine_by_map(d, scores_dict, strategy_set,strategies,mapname):
# get simulation scores
fn = "sim_scores_{0}_{1}-game.yaml".format(strategy_set, mapname)
filepath = os.path.join(d, fn)
f = open(filepath,'rb')
simdata = yaml.load(f)
f.close()
sv = simdata[0]['matrix']
s = np.mean(sv,axis=0) # mean of columns
sim_coords = ""
for j in range(len(s)):
sim_coords += "({0},{1}) ".format(j, s[j])
# get mean engine scores
coords = ""
for j in range(len(strategies)):
player = strategies[j]
v = []
for opponent in strategies:
v.extend(get_scores(player,opponent,mapname,scores_dict))
assert len(v) > 0, "no scores for " + strategies[j] + " on " + mapname
coords += " ({0},{1:.2f})\n".format(j, np.mean(v))
# write LaTeX graph
label = "compare_sim_engine_" + mapname
caption = "Scores in Simulation and Engine on \\texttt{" + mapname.replace("_"," ") + "}"
filepath = os.path.join(d, 'compare_sim_engine_'+mapname+'.tex')
tex = open(filepath,'w')
# "sharp plot" or "const plot"
# xticklabel={<command>} or xticklabels={<label list>}
#
# error bars/.cd,y explicit. Need "explicit" to put +- range in coordinates.
#
xtick = "{" + reduce(lambda x, y: str(x)+','+str(y), range(len(strategies))) + "}"
xticklabels= "{" + reduce(lambda x, y: str(x)+'.,'+str(y), range(len(strategies))) + ".}"
txt = """
\\begin{figure}[!ht]
\\centering
\\begin{tikzpicture}
\\begin{axis}[
scaled ticks=false, % disallow scaling tick labels in powers of 10
legend entries={Simulation Mean,Engine Mean},
legend style={at={(1.5,.95)}},
ymajorgrids=true,
xlabel=Strategy,
ylabel=Score,
xtick=""" + xtick + "," + """
xticklabels=""" +xticklabels + """
]
\\addplot+[const plot mark mid] coordinates
{""" + sim_coords + """};
\\addplot+[const plot mark mid] coordinates
{""" + coords + """};
\\end{axis}
\\end{tikzpicture}
\\caption{"""+caption+"""}
\\label{"""+label+"""}
\\end{figure}
"""
tex.write(txt)
tex.close()
# \\addplot+[const plot mark mid,mark=none,style=dashed,draw=brown] coordinates
# {""" + coords_plus + """};
# \\addplot+[const plot mark mid,mark=none,style=dashdotted,draw=black] coordinates
# {""" + coords_minus + """};
# \\addplot+[const plot mark mid,mark=none,style=loosely dotted,draw=green] coordinates
# {""" + median + """};
class MatrixTK:
"""game matrix parser states"""
START=0
CYCLE=1
VALUES_KEYWORD=2
ROWS_KEYWORD=3
ROWS=4
COLS_KEYWORD=5
COLS=6
VALUES=7
SOLN_KEYWORD=8
LENGTH_KEYWORD=9
LENGTH=10
SOLN=11
class MatrixHistory:
def __init__(self,maxCycle):
self.maxCycle = maxCycle
self.nEvents = 0
self.values = None
def games_matrices():
"""show average game matrix values over time."""
epoch_matrices = [MatrixHistory(epoch) for epoch in epochs]
gmfiles = glob.glob("*game_matrix0.txt")
for fn in gmfiles:
f = open(fn,'rb')
for line in f.readlines():
update_game_matrices(line,epoch_matrices)
f.close()
# write game matrix TEX files
fmt = lambda x: "{0:.0f}".format(x) # formatter.
rowhdr = [str(i) + "." for i in range(len(strategies))]
colhdr = [str(i) + "." for i in range(len(strategies))]
for i in range(len(epoch_matrices)):
mh = epoch_matrices[i]
if mh.nEvents > 0:
caption = 'Avg. Game Matrix to Cycle ' + str(epochs[i])
filepath = os.path.join(d, 'matrix_history_' + str(i) + '.tex')
write_table(mh.values,fmt,rowhdr,colhdr,'label',caption,filepath)
def update_game_matrices(line,epoch_matrices):
# parse
# cycle 0 values: rows 11 columns 11 -1.00... solution: length 12 0.00 ...
fields = line.split()
state = MatrixTK.START
row = 0
col = 0
solni = 0
matrixHistory = None
for tk in fields:
if state == MatrixTK.START:
assert tk == "cycle"
state = MatrixTK.CYCLE
elif state == MatrixTK.CYCLE:
cycle = int(tk)
state = MatrixTK.VALUES_KEYWORD
elif state == MatrixTK.VALUES_KEYWORD:
assert tk == "values:"
state = MatrixTK.ROWS_KEYWORD
elif state == MatrixTK.ROWS_KEYWORD:
assert tk == "rows"
state = MatrixTK.ROWS
elif state == MatrixTK.ROWS:
rows = int(tk)
state = MatrixTK.COLS_KEYWORD
elif state == MatrixTK.COLS_KEYWORD:
assert tk == 'columns'
state = MatrixTK.COLS
elif state == MatrixTK.COLS:
cols = int(tk)
for i in range(len(epochs)):
if cycle < epochs[i]:
matrixHistory = epoch_matrices[0]
break
if matrixHistory.values:
assert len(matrixHistory.values) == rows
assert len(matrixHistory.values[0]) == cols
else:
matrixHistory.values = [[0 for j in range(cols)] for i in range(rows)]
state = MatrixTK.VALUES
elif state == MatrixTK.VALUES:
matrixHistory.values[row][col] = float(tk)
col += 1
if col >= cols:
col = 0
row += 1
if row >= rows:
state = MatrixTK.SOLN_KEYWORD
elif state == MatrixTK.SOLN_KEYWORD:
assert tk == "solution:"
state = MatrixTK.LENGTH_KEYWORD
elif state == MatrixTK.LENGTH_KEYWORD:
assert tk == "length"
state = MatrixTK.LENGTH
elif state == MatrixTK.LENGTH:
soln_len = int(tk)
soln = [0 for i in range(soln_len)]
state = MatrixTK.SOLN
elif state == MatrixTK.SOLN:
soln[solni] = float(tk)
solni += 1
matrixHistory.nEvents += 1
print "values", matrixHistory.values
def strat_vs_strat_sim_scores(d, strategy_set, strategies):
"""simulated strategy final scores"""
print "strat_vs_strat_sim_scores()"
for mapname in mapnames:
fn = 'sim_scores_'+strategy_set + '_' + mapname+'-game.yaml'
filepath = os.path.join(d, fn)
f = open(filepath,'rb')
simdata = yaml.load(f)
f.close()
strat_vs_strat_sim_scores_map(d,simdata, strategy_set, strategies, mapname)
def strat_vs_strat_sim_scores_map(d,simdata,strategy_set,strategies,mapname):
"""simulated strategy final scores"""
# get YAML source files by running sim-matrix.bat. The BAT file runs
# stratsim WriteGameMatrix.java.
#
# get simulated strategy value matrix
#
sv = simdata[0]['matrix']
mins = np.min(sv,axis=0)
sv.append(mins)
sv.append(max_star(mins)) # mark the maximin columns
fmt = lambda x: "{0:.0f}".format(x) if x.__class__ == float else str(x) # formatter.
rowhdr = [str(j) + "." for j in range(len(strategies))]
#rowhdr.append("average")
rowhdr.append("min.")
rowhdr.append("maxmin")
hline = len(strategies) - 1
colhdr = [str(j) + "." for j in range(len(strategies))]
label = simdata[0]['label']
#caption = simdata[0]['caption']
caption = "Strategy Simulation Scores on \\texttt{" + mapname + "}"
filename = os.path.join(d, 'sim_scores_' + strategy_set + '_' + mapname+'.tex')
write_table(sv,fmt,rowhdr,colhdr,label,caption,filename,hline,bolddiag=True)
def get_sw_vs_strat_sim_scores(d,mapname,position='both'):
"""get sw_vs_strat scores averaged for map and switched position map"""
# the file names aren't systematic, so just map them here.
sim_maps = {
'2bases' :
['sw_vs_strat_sim_2bases-game.yaml',
'sw_vs_strat_sim_2bases_switched.yaml'],
'the-right-strategy' :
['sw_vs_strat_sim_the-right-strategy-game.yaml',
'sw_vs_strat_sim_the-right-strategy-game_switched.yaml']
}
# get map
if position == 'both' or position == 'top':
fn = sim_maps[mapname][0]
else:
fn = sim_maps[mapname][1]
filepath = os.path.join(d, fn)
f = open(filepath,'rb')
simdata = yaml.load(f)
f.close()
sv = simdata[0]['matrix'] # sv: sim values
if position == 'both':
# get switched position map and take average
fn = sim_maps[mapname][1]
filepath = os.path.join(d, fn)
f = open(filepath,'rb')
simdata_switched = yaml.load(f)
f.close()
sv_switched = simdata_switched[0]['matrix']
assert simdata[0]['colhdr'] == simdata_switched[0]['colhdr']
assert len(sv) == len(sv_switched)
for i in range(len(sv)):
for j in range(len(sv[i])):
sv[i][j] = (sv[i][j] + sv_switched[i][j])/2.0
return simdata[0]
def sim_maximin(d, strategy_set):
"""get maximin values for simulated fixed strategies and switching planners"""
print "sim_maximin()"
# table of strategy maximin and switching planner minimums for each map
table = [[None for j in range(len(switching)+1)] for i in range(len(mapnames))]
for i in range(len(mapnames)):
mapname = mapnames[i]
# get strat vs. strat maximin
filepath = os.path.join(d, 'sim_scores_' + strategy_set + '_' + mapname + '-game.yaml')
f = open(filepath,'rb')
simdata = yaml.load(f)
f.close()
sv = simdata[0]['matrix']
table[i][0] = get_maximin(sv)
# get switcher vs. strat mins
simdata = get_sw_vs_strat_sim_scores(d,mapname)
mins = np.min(simdata['matrix'],axis=0)
for j in range(len(switching)):
table[i][j+1] = mins[j]
fmt = lambda x: "{0:.0f}".format(x) # formatter.
rowhdr = ['\\texttt{'+m+'}' for m in mapnames]
hline = None
colhdr = ['Fixed']
colhdr.extend(['\\texttt{'+sw+'}' for sw in switching])
label = 'sim_maximin'
caption = 'Fixed Strategy Maximin and Switching Planner Minimums in Simulation'
filename = os.path.join(d, 'sim_maximin_' + strategy_set + '.tex')
write_table(table,fmt,rowhdr,colhdr,label,caption,filename,hline)
def get_maximin(table):
"""get column-wise maximin value"""
mins = np.min(table,axis=0)
return max(mins)
def engine_maximin(d,means):
"""get maximin values for fixed strategies and switching planners on games played in engine"""
print "engine_maximin()"
# fixed Nash maximin monotone
# 2bases x x x x
# the-right-strategy x x x x
table = [[None for j in range(len(switching)+1)] for i in range(len(mapnames))]
for i in range(len(mapnames)):
mapname = mapnames[i]
table[i][0] = means.s_v_s_maximin_pair[mapname][0]
for j in range(len(switching)):
player = switching[j]
table[i][j+1] = means.sw_v_s_min[mapname][player][0]
fmt = lambda x: "{0:.0f}".format(x) if x else "" # formatter.
rowhdr = ['\\texttt{'+m+'}' for m in mapnames]
hline = None
colhdr = ['Fixed']
colhdr.extend(['\\texttt{'+sw+'}' for sw in switching])
label = 'engine_maximin_means'
caption = 'Switching Planner Minimum Means in Engine'
filename = os.path.join(d, 'engine_maximin_means.tex')
if True:
write_table(table,fmt,rowhdr,colhdr,label,caption,filename,hline)
else:
print_table(table,fmt,rowhdr,colhdr,caption)
def engine_maximin_medians(d,medians):
"""get maximin values for fixed strategies and min values for switching planners on games played in engine"""
print "engine_maximin_medians()"
# Fixed Nash maximin monotone
# 2bases x x x x
# the-right-strategy x x x x
table = [[None for j in range(len(switching)+1)] for i in range(len(mapnames))]
for i in range(len(mapnames)):
mapname = mapnames[i]
interval = medians.s_v_s_maximin_interval[mapname]
table[i][0] = interval.median
for j in range(len(switching)):
player = switching[j]
interval = medians.sw_v_s_min_intervals[mapname][player]
table[i][j+1] = interval.median
fmt = lambda x: "{0:.0f}".format(x) if x else "" # formatter.
rowhdr = ['\\texttt{'+m+'}' for m in mapnames]
hline = None
colhdr = ['Fixed']
colhdr.extend(['\\texttt{'+sw+'}' for sw in switching])
label = 'engine_maximin_medians'
caption = 'Fixed Strategy Maximin and Switching Planner Minimum Medians in Engine'
filename = os.path.join(d, 'engine_maximin_medians.tex')
write_table(table,fmt,rowhdr,colhdr,label,caption,filename,hline)
def engine_maximin_pairs(d,means,score_dict):
"""get maximin values for fixed strategies and min values for switching planners on games played in engine"""
print "engine_maximin_pairs()"
#
#
# player opponent value confidence
# ------------------------------------------
# maximin x x x
# ------------------------------------------
# minimums Nash x x
# maximin x x
# monotone x x
#
fmt = lambda x: "{0}".format(x) if x.__class__ == str else "{0:.0f}".format(x) # formatter.
rowhdr = ['maximin','minimums','','']
hline = 0
colspec = " l | l l r r"
colhdr = ['Player','Opponent','Score','Rate Confidence']
for i in range(len(mapnames)):
table = [[""]*4 for j in range(len(switching)+1)]
mapname = mapnames[i]
c = means.s_v_s_maximin_pair[mapname]
table[0][0] = c[1].replace('_',' ') # player
table[0][1] = c[2].replace('_',' ') # opponent
table[0][2] = c[0] # mean
# calculate confidence interval
v = get_scores(c[1],c[2],mapname,score_dict)
nWins = count_wins(v)
print "mean of scores",np.mean(v)
print nWins,"wins in",len(v)
interval = bernoulli_confidence(v,'wilson')
table[0][3] = "{0:.0f}\% ({1:.0f}\%,{2:.0f}\%)".format(interval[0]*100,
interval[1][0]*100
,interval[1][1]*100)
for j in range(len(switching)):
player = switching[j]
c = means.sw_v_s_min[mapname][player]
table[j+1][0] = c[1] #player
table[j+1][1] = c[2].replace('_',' ') #opponent
table[j+1][2] = c[0] # mean
# calculate confidence interval
v = get_scores(c[1],c[2],mapname,score_dict)
interval = bernoulli_confidence(v,'wilson')
table[j+1][3] = "{0:.0f}\% ({1:.0f}\%,{2:.0f}\%)".format(interval[0]*100,
interval[1][0]*100
,interval[1][1]*100)
filepath = os.path.join(d, 'engine_maximin_pairs_'+mapname+'.tex')
label = 'engine_maximin_pairs_'+mapname
caption = 'Strategy Pairs on \\texttt{'+mapname+'}'
write_table(table,fmt,rowhdr,colhdr,label,caption,filepath,hline,colspec=colspec)
def sw_vs_strat_sim_scores(d):
"""translate game points YAML tables into LaTeX tables."""
print "sw_vs_strat_sim_score()"
# get YAML source files by running orst.stratagusai.stratsim.analysis.SwitchingPlannerSimulation
#
for m in range(len(mapnames)):
mapname = mapnames[m]
# get score averaged for playing from top and bottom of map
simdata = get_sw_vs_strat_sim_scores(d,mapname,position='both')
sw_vs_strat_sim_scores_by_map(d,simdata,mapname,position='both')
# get score for playing from top of map
simdata = get_sw_vs_strat_sim_scores(d,mapname,position='top')
sw_vs_strat_sim_scores_by_map(d,simdata,mapname,position='top')
# get scores for playing from bottom of map
simdata = get_sw_vs_strat_sim_scores(d,mapname,position='bottom')
sw_vs_strat_sim_scores_by_map(d,simdata,mapname,position='bottom')
def sw_vs_strat_sim_scores_by_map(d, simdata, mapname, position):
rowhdr = [str(i)+'. \\texttt{'+simdata['rowhdr'][i]+'}' for i in range(len(simdata['rowhdr']))]
colhdr = ['\\texttt{'+s+'}' for s in simdata['colhdr']]
sv = simdata['matrix']
means = np.mean(sv,axis=0)
mins = np.min(sv,axis=0)
sv.append(means)
sv.append(mins)
fmt = lambda x: "{0:.0f}".format(x) # formatter. show as percent.
hline = len(rowhdr) - 1
rowhdr.append("mean")
rowhdr.append("minimum")
caption = 'Switching Planner Scores in Simulation on \\texttt{' + mapname + "}"
if position == 'top':
caption += ' from North'
elif position == 'bottom':
caption += ' from South'
label = 'sw_vs_strat_sim_score_' + mapname
fn = 'sw_vs_strat_sim_score_' + mapname
if position == 'top' or position == 'bottom':
label += '_' + position
fn += '_' + position
fn += '.tex'
filepath = os.path.join(d, fn)
write_table(sv,fmt,rowhdr,colhdr,label,caption,filepath,hline)
def sw_vs_strat_scores_by_epoch(d,curs,player,opponent,mapname):
i = mapnames.index(mapname)
mappaths = engine_maps[i]
table = [[0 for epoch in epochs] for i in range(len(mappaths))]
rowhdr = []
for i in range(len(mappaths)):
mappath = mappaths[i]
p,m = os.path.split(mappath)
m = m.replace('_',' ')
rowhdr.append("\\texttt{"+player+"} on " + m)
for epoch in range(len(epochs)):
if epoch == 0:
start = 0
else:
start = epochs[epoch-1]
end = epochs[epoch]
cmd = "select avg(diff) from event where player=? and simreplan=1 " + \
" and opponent=? " + \
" and cycle >= ? and cycle < ? " + \
" and map=? "
mean = curs.execute(cmd,(player,opponent,start,end,mappath,)).fetchone()[0]
table[i][epoch] = mean
fmt = lambda x: "{0:.0f}".format(x) # formatter.
colhdr = ["{0:,}".format(s) for s in epochs]
caption = '\\texttt{{{0}}} vs.~\\texttt{{{1}}} Score by Epoch on \\texttt{{{2}}}'.format(player,opponent.replace('_',' '),mapname)
label = '{0}_v_{1}_score_by_epoch_on_{2}'.format(player,opponent,mapname)
filepath = os.path.join(d, label + '.tex')
write_table(table,fmt,rowhdr,colhdr,label,caption,filepath,None)
def sim_minus_engine_scores(d,curs,strategy_set,strategies):
"""sim score matrix - engine score matrix"""
sim_minus_engine_scores_map(d,curs,strategy_set,strategies,None,None)
for i in range(len(planner_maps)):
simmap = sim_maps[i]
mappath = planner_maps[i]
sim_minus_engine_scores_map(d,curs,strategy_set,strategies,simmap,mappath)
def sim_minus_engine_scores_map(d,curs,strategy_set,strategies,simmap, mappath):
# simulation data
if simmap:
fn = 'sim_scores_'+strategy_set+'_'+simmap+'.yaml'
else:
fn = 'sim_scores_'+strategy_set + '.yaml'
filepath = os.path.join(d, fn)
f = open(filepath,'rb')
simdata = yaml.load(f)
f.close()
sv = simdata[0]['matrix']
# engine data
hp = strat_vs_strat_avg_score_data(curs,strategies,mappath)
data = [row[:] for row in sv] # copy sim matrix
for i in range(len(hp)):
for j in range(len(hp[i])):
data[i][j] = data[i][j] - hp[i][j] # minus engine data
fmt = lambda x: "{0:.0f}".format(x) # formatter. show as percent.
rowhdr = [s.replace('_',' ') for s in strategies]
hline = None
colhdr = [str(i) + '.' for i in range(len(strategies))]
if mappath:
path, mapname = os.path.split(mappath)
mapname = mapname.replace('.smp','')
caption = 'Simulation Minus Engine Scores on ' + mapname.replace('_',' ')
label = 'sim_minus_engine_'+mapname
outpath = os.path.join(d,'sim_minus_engine_scores_'+mapname+'.tex')
else:
caption = 'Simulation Minus Engine Scores'
label = 'sim_minus_engine'
outpath = os.path.join(d,'sim_minus_engine_scores.tex')
write_table(data,fmt,rowhdr,colhdr,label,caption,outpath,hline)
def write_game_matrices(d,filename):
f = open(filename,'rb')
matrices = yaml.load(f)
f.close()
for m in matrices:
write_game_matrix(d,m,filename)
def write_game_matrix(d,data,filename):
cycle = data['cycle']
caption = data['caption'].replace("_"," ")
label = data['label']
matrix = data['matrix']
mins = np.min(matrix,axis=0)
matrix.append(mins)
matrix.append(max_star(mins)) # mark the maximin columns
fmt = lambda x: str(x) # formatter.
rowhdr = data['rowhdr']
colhdr = data['colhdr']
hline = len(rowhdr)
rowhdr.append('mins')
rowhdr.append('maximin')
filepath = os.path.join(d, filename.replace(".yaml",'') + "_" + str(cycle) + ".tex")
print filepath
write_table(matrix,fmt,rowhdr,colhdr,label,caption,filepath,hline)
def write_game_choices(d, curs, player, opponent, map):
print "write_game_choices({0},{1},{2})".format(player,opponent,map)
cmd = """select cycle,strategy from event
where player=? and opponent=? and map=? and event='plan' order by cycle"""
curs.execute(cmd,(player,opponent,map+".txt",))
label = "{0}_{1}_choices_{2}".format(player,opponent,map)
filepath = os.path.join(d,label + ".tex")
tex = open(filepath,'w')
today = datetime.date.today()
tex.write("% table written on {0} by {1}\n".format(today.strftime('%Y-%m-%d'),sys.argv[0]))
tex.write("""\\begin{table}[!ht]
\\centering
\\begin{tabular}{l | l}
cycle & strategy\\cr
\\hline
""")
for row in curs.fetchall():
tex.write("{0} & {1}\\cr\n".format(row[0],row[1].replace('_',' ')))
tex.write("""
\\end{tabular}
\\caption{""" + "{0} Choices against {1} on {2}".format(player,opponent.replace('_',' '),map.replace('_',' ')) + """}
\\label{""" + label + """}
\\end{table}
""")
tex.close()
def write_confidence_tables(d, medians):
print "write_confidence_tables()"
for mapname in mapnames:
write_confidence_table(d,medians,mapname)
write_sw_confidence_table(d,medians,mapname)
def write_confidence_table(d, medians, mapname):
"""for each fixed strategy vs. fixed strategy write confidence around mean"""
# using multirows, so can't use write_table()
rowhdr = [str(j) + "." for j in range(len(medians.strategies))]
colhdr = rowhdr
filepath = os.path.join(d, 's_v_s_confidence_' + mapname + '.tex')
today = datetime.date.today()
tex = open(filepath,'w')
tex.write("% table written on {0} by {1}\n".format(today.strftime('%Y-%m-%d'),sys.argv[0]))
tex.write("\\begin{table}[!ht]\n")
tex.write("\\centering\n")
tex.write("\\begin{tabular}{l | ")
for j in range(len(colhdr)):
tex.write(" r ") # assume numbers in cells
tex.write("}\n")
# column header
for c in colhdr:
tex.write(" & " + c)
tex.write("\\cr\n")
tex.write("\\hline\n")
interval_table = medians.s_v_s_intervals[mapname]
median_table = [[None for o in medians.strategies] for p in medians.strategies]
for i in range(len(medians.strategies)):
tex.write("\\multirow{3}{*}{"+ rowhdr[i] + "}")
# write high of confidence interval
for j in range(len(medians.strategies)):
confidence = interval_table[i][j]
tex.write("& {0:.0f}".format(confidence.interval[1]))
tex.write("\\\\")
# write median of confidence interval
for j in range(len(medians.strategies)):
confidence = interval_table[i][j]
median_table[i][j] = confidence.median
tex.write(" & {0:.0f}".format(confidence.median))
tex.write("\\\\")
# write low of confidence interval
for j in range(len(medians.strategies)):
confidence = interval_table[i][j]
tex.write(" & {0:.0f}".format(confidence.interval[0]))
tex.write("\\\\")
tex.write("\n")
tex.write("\\hline\n")
# add minimum
mins = np.min(median_table,axis=0) # column mins
tex.write("\\hline\n")
tex.write("minimums")
for m in mins:
tex.write(" & {0:.0f}".format(m))
tex.write("\\cr\n")
tex.write("maximin")
for m in max_star(mins):
tex.write(" & {0}".format(m))
tex.write("\\cr\n")
label = 's_v_s_confidence_' + mapname
caption = 'Fixed Strategy Confidence on ' + mapname
tex.write("\end{tabular}\n")
tex.write("\\caption{" + caption + "}\n")
tex.write("\\label{" + label + "}\n")
tex.write("\\end{table}\n")
tex.close()
print '\\input{' + filepath.replace('.tex','') + '}'
def write_sw_confidence_table(d, medians, mapname):
"""for each switching vs. fixed strategy write confidence around mean"""
# using multirows, so can't use write_table()
rowhdr = [str(j) + ". " + medians.strategies[j].replace('_',' ') for j in range(len(medians.strategies))]
colhdr = ["\\texttt{"+sw+"}" for sw in switching]
filepath = os.path.join(d, 'sw_v_s_confidence_' + mapname + '.tex')
today = datetime.date.today()
tex = open(filepath,'w')
tex.write("% table written on {0} by {1}\n".format(today.strftime('%Y-%m-%d'),sys.argv[0]))
tex.write("\\begin{table}[!ht]\n")
tex.write("\\centering\n")
tex.write("\\begin{tabular}{l | ")
for j in range(len(colhdr)):
tex.write(" r ") # assume numbers in cells
tex.write("}\n")
# column header
for c in colhdr:
tex.write(" & " + c)
tex.write("\\cr\n")
tex.write("\\hline\n")
interval_table = medians.sw_v_s_intervals[mapname]
median_table = [[None for sw in switching] for s in medians.strategies]
for i in range(len(medians.strategies)):
tex.write("\\multirow{3}{*}{"+ rowhdr[i] + "}")
# write high of confidence interval
for j in range(len(switching)):
confidence = interval_table[i][j]
tex.write("& {0:.0f}".format(confidence.interval[1]))
tex.write("\\\\")
# write median of confidence interval
for j in range(len(switching)):
confidence = interval_table[i][j]
median_table[i][j] = confidence.median
tex.write(" & {0:.0f}".format(confidence.median))
tex.write("\\\\")
# write low of confidence interval
for j in range(len(switching)):
confidence = interval_table[i][j]
tex.write(" & {0:.0f}".format(confidence.interval[0]))
tex.write("\\\\")
tex.write("\n")
tex.write("\\hline\n")
# add minimum
mins = np.min(median_table,axis=0) # column mins
tex.write("\\hline\n")
tex.write("minimums")
for m in mins:
tex.write(" & {0:.0f}".format(m))
tex.write("\\cr\n")
label = 'sw_v_s_confidence_' + mapname
caption = 'Switching Planner Confidence on ' + mapname
tex.write("\end{tabular}\n")
tex.write("\\caption{" + caption + "}\n")
tex.write("\\label{" + label + "}\n")
tex.write("\\end{table}\n")
tex.close()
print '\\input{' + filepath.replace('.tex','') + '}'
def get_classification_rate(scores_dict,strategies):
"""what percentage of confidence intervals fall fully positive or fully negative?"""
n = 0 # number of confidence intervals fall fully positive or fully negative
nIntervals = 0
for player in strategies:
for opponent in strategies:
for mapname in mapnames:
scores = scores_dict[(player,opponent,mapname)] # get_strat_v_strat_scores2(curs,player,opponent,mappath)
assert len(scores) == 50, str(len(scores))+" scores for "+player+" vs. "+opponent+" on " + mapname
#intervals = get_confidence_intervals(player,scores,[50])
intervals = [] # fix
assert len(intervals) == 1
i = intervals[0]
nIntervals += 1
if np.sign(i.interval[0]) == np.sign(i.interval[1]):
n += 1
print "percent of confidence intervals fall fully positive or fully negative is {0:.2f}.".format(n/float(nIntervals))
def get_scores(player,opponent,mapname,scores_dict,combine=True):
"""get scores on forward and switched maps"""
v = scores_dict[(player,opponent,mapname)][:] # make copy
assert v, "No games for {0} vs. {1} on {2}".format(player,opponent,mapname)
if combine and player != opponent:
v_switched = scores_dict[(opponent,player,mapname)]
assert v_switched
v.extend([-x for x in v_switched])
return v
def get_mean(player,opponent,mapname,scores_dict,combine=True):
v = get_scores(player,opponent,mapname,scores_dict,combine)
return np.mean(v)
def get_median(player,opponent,mapname,scores_dict,combine=True):
v = get_scores(player,opponent,mapname,scores_dict,combine)
return np.median(v)
def get_rate(player,opponent,mapname,scores_dict,combine=True):
v = get_scores(player,opponent,mapname,scores_dict,combine)
return count_wins(v)/float(len(v))
def get_score_dict(curs,strategies):
"""get dictionary of scores for player vs. opponent on map """
scores = {}
cmd = "select diff from event where event='end' and player=? and opponent=? and map=?"
for mappaths in engine_maps:
path,mapname = os.path.split(mappaths[0])
mapname = mapname.replace('.smp','')
# fixed strat vs. fixed strat
# match pairs defined in configs.py
for i in range(len(strategies)):
player = strategies[i]
for j in range(i+1):
opponent = strategies[j]
# get player vs. opponent on map scores
curs.execute(cmd,(player,opponent,mappaths[0],))
pair_scores = [row[0] for row in curs.fetchall()]
scores[(player,opponent,mapname)] = pair_scores
# get player vs. opponent on switched map scores
curs.execute(cmd,(player,opponent,mappaths[1],))
if player == opponent:
pair_scores = [row[0] for row in curs.fetchall()]
scores[(opponent,player,mapname)].extend(pair_scores)
else:
pair_scores = [-row[0] for row in curs.fetchall()]
scores[(opponent,player,mapname)] = pair_scores
# switching vs. fixed strat games
for player in switching:
for opponent in strategies:
# get player vs. opponent on map scores
curs.execute(cmd,(player,opponent,mappaths[0],))
pair_scores = [row[0] for row in curs.fetchall()]
scores[(player,opponent,mapname)] = pair_scores
# get player vs. opponent on switched map scores
curs.execute(cmd,(player,opponent,mappaths[1],))
pair_scores = [-row[0] for row in curs.fetchall()]
scores[(opponent,player,mapname)] = pair_scores
# switching vs. switching
for i in range(len(switching)):
player = switching[i]
for j in range(i): # [0,...,i-1]
opponent = switching[j]
# get player vs. opponent on map scores
curs.execute(cmd,(player,opponent,mappaths[0],))
pair_scores = [row[0] for row in curs.fetchall()]
key = (player,opponent,mapname)
scores[key] = pair_scores
# get player vs. opponent on switched map scores
curs.execute(cmd,(player,opponent,mappaths[1],))
pair_scores = [-row[0] for row in curs.fetchall()]
key = (opponent,player,mapname)
scores[key] = pair_scores
# switching vs. builtin
for mappaths in script_maps:
path,mapname = os.path.split(mappaths[0])
mapname = mapname.replace('_PvC.smp','')
for player in switching:
opponent = 'built-in'
# get player vs. opponent on map scores
curs.execute(cmd,(player,opponent,mappaths[0],))
pair_scores = [row[0] for row in curs.fetchall()]
scores[(player,opponent,mapname)] = pair_scores
# get player vs. opponent on switched map scores
curs.execute(cmd,(player,opponent,mappaths[1],))
pair_scores = [-row[0] for row in curs.fetchall()]
scores[(opponent,player,mapname)] = pair_scores
return scores
def build_db(d):
"""open event database and return connection."""
dbpath = os.path.join(d, 'events.db')
# connect to database and create table.
if os.path.exists(dbpath):
os.remove(dbpath)
conn = sqlite3.connect(dbpath)
curs = conn.cursor()
curs.execute('''create table event
(game int,
event text,
playerId text,
player text,
strategy text,
simreplan int,
opponent text,
predicted text,
predicted_diff int,
actual text,
diff int,
cycle int,
map text)''')
csvfiles = glob.glob(d + '/*_0.csv') # non-simulation files. sim files end in *_sim.csv
if len(csvfiles) == 0:
msg = "No input files found."
raise Exception(msg)
game = 0
for filename in csvfiles:
file = open(filename, 'rb')
rd = csv.reader(file)
for row in rd:
event = row[0]
row.insert(0,game) # add game ID
curs.execute("""insert into event
values (?,?,?,?,?,?,?,?,?,?,?,?,?)""", row)
if event == 'end':
game += 1
file.close()
conn.commit()
return conn
def open_db(d):
"""open event database and return connection."""
dbpath = os.path.join(d, 'events.db')
if not os.path.exists(dbpath):
msg = "Error: database file", dbpath, "does not exist."
raise Error(msg)
conn = sqlite3.connect(dbpath)
return conn
def build_score_dictionary(d,curs,strategies):
# get dictionary of score arrays indexed by (player,opponent,mappath) tuples
scores = get_score_dict(curs,strategies)
mfile = open(os.path.join(d,'score_dict.pkl'),'wb')
pickle.dump(scores,mfile)
mfile.close()
return scores
def open_score_dictionary(d,curs,strategies):
fn = os.path.join(d,'score_dict.pkl')
if not os.path.exists(fn):
return build_score_dictionary(d,curs,strategies)
else:
mfile = open(fn,'rb')
scores = pickle.load(mfile)
mfile.close()
return scores
|
apache-2.0
|
rjonaitis/opengreenhouse
|
server.py
|
2
|
6191
|
#!/usr/bin/env python3
import os
import http.server
import bottle
import json
import urllib.parse
import numpy
import pandas
from serial import Serial
from serial.serialutil import SerialException
import time
from threading import Thread
from queue import Queue, Empty
import sys
SERIAL_DEVICE = "/dev/arduino"
SERIAL_RATE = 115200
HTTP_HOST = '127.0.0.1'
HTTP_PORT = 8000
WINDOW_OPEN_POSITION = 5000
WINDOW_CLOSED_POSITION = 0
DOOR_OPEN_POSITION = -9500
DOOR_CLOSED_POSITION = 0
WIND_THRESHOLD = 200
WIND_MEAN_TIME = 10
ROOT = os.path.dirname(os.path.realpath(__file__))
WEBROOT = os.path.join(ROOT, "webroot")
class Arduino:
def __init__(self, mock):
self.mock = mock
self.pipe = None
self.thread = Thread(target=self.interact)
self.sendq = Queue()
self.recvq = Queue()
self.state = {}
self.wind_fir = []
self.window_close = False
self.window_prev = 0
self.door_prev = 0
def log_filename(self, key):
return os.path.join(ROOT, 'log', key)
def log_value(self, key, value):
with open(self.log_filename(key), "a") as f:
f.write("{} {}\n".format(time.time(), value))
def handle(self):
wind = self.state.get('wind', 0)
if not self.window_close and wind > WIND_THRESHOLD:
self.window_close = True
self.window_prev = self.state.get('window', 0)
self.door_prev = self.state.get('door', 0)
self.put('window', 0)
self.put('door', 0)
elif self.window_close and wind < WIND_THRESHOLD / 4:
self.window_close = False
self.put('window', self.window_prev)
self.put('door', self.door_prev)
def interact(self):
serial = None
while True:
try:
if serial is None and not self.mock:
try:
serial = Serial(SERIAL_DEVICE, SERIAL_RATE)
print("Connecting to Arduino")
except SerialException:
print("Error opening serial. To use without arduino: ./server.py --mock")
os._exit(1)
try:
while True:
cmd = self.sendq.get(block=False)
if self.mock:
print("SERIAL WRITE:", cmd)
else:
serial.write(cmd.encode('ascii'))
serial.write(b'\n')
except Empty:
pass
if self.mock:
time.sleep(1)
line = b'temp 18\n'
else:
line = serial.readline()
try:
key, value = line.decode('ascii').strip().split(' ')
value = self.from_arduino(key, int(value))
self.log_value(key, value)
self.state[key] = value
self.handle()
self.recvq.put((key, value))
except ValueError:
print("Malformed input from arduino:", line)
continue
except UnicodeDecodeError:
print("Malformed input from arduino:", line)
continue
except SerialException:
if serial is not None:
serial.close()
serial = None
print("Disconnecting from Arduino")
def start(self):
self.thread.start()
def from_arduino(self, key, value):
if key == 'window':
return int(100 * (value - WINDOW_CLOSED_POSITION) / (WINDOW_OPEN_POSITION - WINDOW_CLOSED_POSITION))
if key == 'door':
return int(100 * (value - DOOR_CLOSED_POSITION) / (DOOR_OPEN_POSITION - DOOR_CLOSED_POSITION))
if key == 'wind':
if value < 0:
value = 0
self.wind_fir.append(int(value))
self.wind_fir = self.wind_fir[-WIND_MEAN_TIME:]
return int(numpy.mean(self.wind_fir))
return int(value)
def to_arduino(self, key, value):
if key == 'window':
return int((WINDOW_OPEN_POSITION - WINDOW_CLOSED_POSITION) * value / 100) + WINDOW_CLOSED_POSITION
if key == 'door':
return int((DOOR_OPEN_POSITION - DOOR_CLOSED_POSITION) * value / 100) + DOOR_CLOSED_POSITION
return int(value)
def get(self, key):
return self.state.get(key, None)
def put(self, key, value):
self.sendq.put("{} {}".format(key, self.to_arduino(key, value)))
return value
def timeseries(self, name, start, end=None, resolution=None):
if end is None:
end = time.time()
start = float(start)
end = float(end)
log = pandas.read_csv(self.log_filename(name), sep=' ', names=('time', 'value'))
log = log.dropna()
log.time = log.time.astype(float)
log.value = log.value.astype(int)
log = log[(start <= log.time) & (log.time <= end)]
value = log.value
value.index = pandas.to_datetime(log.time, unit='s')
if len(value) and resolution is not None and resolution != 1:
value = value.resample('{}s'.format(resolution)).dropna()
return {
'time': [int(t.timestamp()) for t in value.index],
'value': [int(x) for x in value],
}
ARDUINO = Arduino(mock='--mock' in sys.argv)
def ok(value):
return {"ok": True, "value": value}
@bottle.get('/rpc/series/<key>/')
def series(key):
q = bottle.request.query
return ok(ARDUINO.timeseries(key, q.start, q.end, q.resolution))
@bottle.get('/rpc/<key>/')
def get(key):
return ok(ARDUINO.get(key))
@bottle.put('/rpc/<key>/')
def put(key):
q = bottle.request.query
value = int(q.value)
return ok(ARDUINO.put(key, value))
def main():
print("Starting up...")
os.chdir(WEBROOT)
ARDUINO.start()
sys.argv = [sys.argv[0]]
bottle.run(host=HTTP_HOST, port=HTTP_PORT, server='cherrypy')
if __name__ == "__main__":
main()
|
gpl-3.0
|
toastedcornflakes/scikit-learn
|
sklearn/manifold/tests/test_locally_linear.py
|
27
|
5247
|
from itertools import product
from nose.tools import assert_true
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from scipy import linalg
from sklearn import neighbors, manifold
from sklearn.manifold.locally_linear import barycenter_kneighbors_graph
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
eigen_solvers = ['dense', 'arpack']
#----------------------------------------------------------------------
# Test utility routines
def test_barycenter_kneighbors_graph():
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = barycenter_kneighbors_graph(X, 1)
assert_array_almost_equal(
A.toarray(),
[[0., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
A = barycenter_kneighbors_graph(X, 2)
# check that columns sum to one
assert_array_almost_equal(np.sum(A.toarray(), 1), np.ones(3))
pred = np.dot(A.toarray(), X)
assert_less(linalg.norm(pred - X) / X.shape[0], 1)
#----------------------------------------------------------------------
# Test LLE by computing the reconstruction error on some manifolds.
def test_lle_simple_grid():
# note: ARPACK is numerically unstable, so this test will fail for
# some random seeds. We choose 2 because the tests pass.
rng = np.random.RandomState(2)
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(5), repeat=2)))
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
clf = manifold.LocallyLinearEmbedding(n_neighbors=5,
n_components=n_components,
random_state=rng)
tol = 0.1
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X, 'fro')
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
assert_less(reconstruction_error, tol)
assert_almost_equal(clf.reconstruction_error_,
reconstruction_error, decimal=1)
# re-embed a noisy version of X using the transform method
noise = rng.randn(*X.shape) / 100
X_reembedded = clf.transform(X + noise)
assert_less(linalg.norm(X_reembedded - clf.embedding_), tol)
def test_lle_manifold():
rng = np.random.RandomState(0)
# similar test on a slightly more complex manifold
X = np.array(list(product(np.arange(18), repeat=2)))
X = np.c_[X, X[:, 0] ** 2 / 18]
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(n_neighbors=6,
n_components=n_components,
method=method, random_state=0)
tol = 1.5 if method == "standard" else 3
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X)
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
details = ("solver: %s, method: %s" % (solver, method))
assert_less(reconstruction_error, tol, msg=details)
assert_less(np.abs(clf.reconstruction_error_ -
reconstruction_error),
tol * reconstruction_error, msg=details)
# Test the error raised when parameter passed to lle is invalid
def test_lle_init_parameters():
X = np.random.rand(5, 3)
clf = manifold.LocallyLinearEmbedding(eigen_solver="error")
msg = "unrecognized eigen_solver 'error'"
assert_raise_message(ValueError, msg, clf.fit, X)
clf = manifold.LocallyLinearEmbedding(method="error")
msg = "unrecognized method 'error'"
assert_raise_message(ValueError, msg, clf.fit, X)
def test_pipeline():
# check that LocallyLinearEmbedding works fine as a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
from sklearn import pipeline, datasets
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('filter', manifold.LocallyLinearEmbedding(random_state=0)),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
# Test the error raised when the weight matrix is singular
def test_singular_matrix():
from nose.tools import assert_raises
M = np.ones((10, 3))
f = ignore_warnings
assert_raises(ValueError, f(manifold.locally_linear_embedding),
M, 2, 1, method='standard', eigen_solver='arpack')
|
bsd-3-clause
|
xuewei4d/scikit-learn
|
sklearn/utils/tests/test_sparsefuncs.py
|
8
|
31943
|
import pytest
import numpy as np
import scipy.sparse as sp
from scipy import linalg
from numpy.testing import assert_array_almost_equal, assert_array_equal
from numpy.random import RandomState
from sklearn.datasets import make_classification
from sklearn.utils.sparsefuncs import (mean_variance_axis,
incr_mean_variance_axis,
inplace_column_scale,
inplace_row_scale,
inplace_swap_row, inplace_swap_column,
min_max_axis,
count_nonzero, csc_median_axis_0)
from sklearn.utils.sparsefuncs_fast import (assign_rows_csr,
inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2,
csr_row_norms)
from sklearn.utils._testing import assert_allclose
def test_mean_variance_axis0():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
with pytest.raises(TypeError):
mean_variance_axis(X_lil, axis=0)
X_csr = sp.csr_matrix(X_lil)
X_csc = sp.csc_matrix(X_lil)
expected_dtypes = [(np.float32, np.float32),
(np.float64, np.float64),
(np.int32, np.float64),
(np.int64, np.float64)]
for input_dtype, output_dtype in expected_dtypes:
X_test = X.astype(input_dtype)
for X_sparse in (X_csr, X_csc):
X_sparse = X_sparse.astype(input_dtype)
X_means, X_vars = mean_variance_axis(X_sparse, axis=0)
assert X_means.dtype == output_dtype
assert X_vars.dtype == output_dtype
assert_array_almost_equal(X_means, np.mean(X_test, axis=0))
assert_array_almost_equal(X_vars, np.var(X_test, axis=0))
def test_mean_variance_axis1():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
with pytest.raises(TypeError):
mean_variance_axis(X_lil, axis=1)
X_csr = sp.csr_matrix(X_lil)
X_csc = sp.csc_matrix(X_lil)
expected_dtypes = [(np.float32, np.float32),
(np.float64, np.float64),
(np.int32, np.float64),
(np.int64, np.float64)]
for input_dtype, output_dtype in expected_dtypes:
X_test = X.astype(input_dtype)
for X_sparse in (X_csr, X_csc):
X_sparse = X_sparse.astype(input_dtype)
X_means, X_vars = mean_variance_axis(X_sparse, axis=0)
assert X_means.dtype == output_dtype
assert X_vars.dtype == output_dtype
assert_array_almost_equal(X_means, np.mean(X_test, axis=0))
assert_array_almost_equal(X_vars, np.var(X_test, axis=0))
@pytest.mark.parametrize(['Xw', 'X', 'weights'],
[
([[0, 0, 1], [0, 2, 3]],
[[0, 0, 1], [0, 2, 3]],
[1, 1, 1]),
([[0, 0, 1], [0, 1, 1]],
[[0, 0, 0, 1], [0, 1, 1, 1]],
[1, 2, 1]),
([[0, 0, 1], [0, 1, 1]],
[[0, 0, 1], [0, 1, 1]],
None),
([[0, np.nan, 2],
[0, np.nan, np.nan]],
[[0, np.nan, 2],
[0, np.nan, np.nan]],
[1., 1., 1.]),
([[0, 0],
[1, np.nan],
[2, 0],
[0, 3],
[np.nan, np.nan],
[np.nan, 2]],
[[0, 0, 0],
[1, 1, np.nan],
[2, 2, 0],
[0, 0, 3],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, 2]],
[2., 1.]),
([[1, 0, 1], [0, 3, 1]],
[[1, 0, 0, 0, 1], [0, 3, 3, 3, 1]],
np.array([1, 3, 1]))
]
)
@pytest.mark.parametrize("sparse_constructor",
[sp.csc_matrix, sp.csr_matrix])
@pytest.mark.parametrize("dtype",
[np.float32, np.float64])
def test_incr_mean_variance_axis_weighted_axis1(Xw, X, weights,
sparse_constructor,
dtype):
axis = 1
Xw_sparse = sparse_constructor(Xw).astype(dtype)
X_sparse = sparse_constructor(X).astype(dtype)
last_mean = np.zeros(np.shape(Xw)[0], dtype=dtype)
last_var = np.zeros_like(last_mean, dtype=dtype)
last_n = np.zeros_like(last_mean, dtype=np.int64)
means0, vars0, n_incr0 = incr_mean_variance_axis(
X=X_sparse, axis=axis, last_mean=last_mean, last_var=last_var,
last_n=last_n, weights=None)
means_w0, vars_w0, n_incr_w0 = incr_mean_variance_axis(
X=Xw_sparse, axis=axis, last_mean=last_mean, last_var=last_var,
last_n=last_n, weights=weights)
assert means_w0.dtype == dtype
assert vars_w0.dtype == dtype
assert n_incr_w0.dtype == dtype
means_simple, vars_simple = mean_variance_axis(X=X_sparse, axis=axis)
assert_array_almost_equal(means0, means_w0)
assert_array_almost_equal(means0, means_simple)
assert_array_almost_equal(vars0, vars_w0)
assert_array_almost_equal(vars0, vars_simple)
assert_array_almost_equal(n_incr0, n_incr_w0)
# check second round for incremental
means1, vars1, n_incr1 = incr_mean_variance_axis(
X=X_sparse, axis=axis, last_mean=means0, last_var=vars0,
last_n=n_incr0, weights=None)
means_w1, vars_w1, n_incr_w1 = incr_mean_variance_axis(
X=Xw_sparse, axis=axis, last_mean=means_w0, last_var=vars_w0,
last_n=n_incr_w0, weights=weights)
assert_array_almost_equal(means1, means_w1)
assert_array_almost_equal(vars1, vars_w1)
assert_array_almost_equal(n_incr1, n_incr_w1)
assert means_w1.dtype == dtype
assert vars_w1.dtype == dtype
assert n_incr_w1.dtype == dtype
@pytest.mark.parametrize(['Xw', 'X', 'weights'],
[
([[0, 0, 1], [0, 2, 3]],
[[0, 0, 1], [0, 2, 3]],
[1, 1]),
([[0, 0, 1], [0, 1, 1]],
[[0, 0, 1], [0, 1, 1], [0, 1, 1]],
[1, 2]),
([[0, 0, 1], [0, 1, 1]],
[[0, 0, 1], [0, 1, 1]],
None),
([[0, np.nan, 2],
[0, np.nan, np.nan]],
[[0, np.nan, 2],
[0, np.nan, np.nan]],
[1., 1.]),
([[0, 0, 1, np.nan, 2, 0],
[0, 3, np.nan, np.nan, np.nan, 2]],
[[0, 0, 1, np.nan, 2, 0],
[0, 0, 1, np.nan, 2, 0],
[0, 3, np.nan, np.nan, np.nan, 2]],
[2., 1.]),
([[1, 0, 1], [0, 0, 1]],
[[1, 0, 1], [0, 0, 1], [0, 0, 1], [0, 0, 1]],
np.array([1, 3]))
]
)
@pytest.mark.parametrize("sparse_constructor",
[sp.csc_matrix, sp.csr_matrix])
@pytest.mark.parametrize("dtype",
[np.float32, np.float64])
def test_incr_mean_variance_axis_weighted_axis0(Xw, X, weights,
sparse_constructor,
dtype):
axis = 0
Xw_sparse = sparse_constructor(Xw).astype(dtype)
X_sparse = sparse_constructor(X).astype(dtype)
last_mean = np.zeros(np.size(Xw, 1), dtype=dtype)
last_var = np.zeros_like(last_mean)
last_n = np.zeros_like(last_mean, dtype=np.int64)
means0, vars0, n_incr0 = incr_mean_variance_axis(
X=X_sparse, axis=axis, last_mean=last_mean, last_var=last_var,
last_n=last_n, weights=None)
means_w0, vars_w0, n_incr_w0 = incr_mean_variance_axis(
X=Xw_sparse, axis=axis, last_mean=last_mean, last_var=last_var,
last_n=last_n, weights=weights)
assert means_w0.dtype == dtype
assert vars_w0.dtype == dtype
assert n_incr_w0.dtype == dtype
means_simple, vars_simple = mean_variance_axis(X=X_sparse, axis=axis)
assert_array_almost_equal(means0, means_w0)
assert_array_almost_equal(means0, means_simple)
assert_array_almost_equal(vars0, vars_w0)
assert_array_almost_equal(vars0, vars_simple)
assert_array_almost_equal(n_incr0, n_incr_w0)
# check second round for incremental
means1, vars1, n_incr1 = incr_mean_variance_axis(
X=X_sparse, axis=axis, last_mean=means0, last_var=vars0,
last_n=n_incr0, weights=None)
means_w1, vars_w1, n_incr_w1 = incr_mean_variance_axis(
X=Xw_sparse, axis=axis, last_mean=means_w0, last_var=vars_w0,
last_n=n_incr_w0, weights=weights)
assert_array_almost_equal(means1, means_w1)
assert_array_almost_equal(vars1, vars_w1)
assert_array_almost_equal(n_incr1, n_incr_w1)
assert means_w1.dtype == dtype
assert vars_w1.dtype == dtype
assert n_incr_w1.dtype == dtype
def test_incr_mean_variance_axis():
for axis in [0, 1]:
rng = np.random.RandomState(0)
n_features = 50
n_samples = 10
if axis == 0:
data_chunks = [rng.randint(0, 2, size=n_features)
for i in range(n_samples)]
else:
data_chunks = [rng.randint(0, 2, size=n_samples)
for i in range(n_features)]
# default params for incr_mean_variance
last_mean = np.zeros(n_features) if axis == 0 else np.zeros(n_samples)
last_var = np.zeros_like(last_mean)
last_n = np.zeros_like(last_mean, dtype=np.int64)
# Test errors
X = np.array(data_chunks[0])
X = np.atleast_2d(X)
X = X.T if axis == 1 else X
X_lil = sp.lil_matrix(X)
X_csr = sp.csr_matrix(X_lil)
with pytest.raises(TypeError):
incr_mean_variance_axis(X=axis, axis=last_mean, last_mean=last_var,
last_var=last_n)
with pytest.raises(TypeError):
incr_mean_variance_axis(X_lil, axis=axis, last_mean=last_mean,
last_var=last_var, last_n=last_n)
# Test _incr_mean_and_var with a 1 row input
X_means, X_vars = mean_variance_axis(X_csr, axis)
X_means_incr, X_vars_incr, n_incr = \
incr_mean_variance_axis(X_csr, axis=axis, last_mean=last_mean,
last_var=last_var, last_n=last_n)
assert_array_almost_equal(X_means, X_means_incr)
assert_array_almost_equal(X_vars, X_vars_incr)
# X.shape[axis] picks # samples
assert_array_equal(X.shape[axis], n_incr)
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis)
assert_array_almost_equal(X_means, X_means_incr)
assert_array_almost_equal(X_vars, X_vars_incr)
assert_array_equal(X.shape[axis], n_incr)
# Test _incremental_mean_and_var with whole data
X = np.vstack(data_chunks)
X = X.T if axis == 1 else X
X_lil = sp.lil_matrix(X)
X_csr = sp.csr_matrix(X_lil)
X_csc = sp.csc_matrix(X_lil)
expected_dtypes = [(np.float32, np.float32),
(np.float64, np.float64),
(np.int32, np.float64),
(np.int64, np.float64)]
for input_dtype, output_dtype in expected_dtypes:
for X_sparse in (X_csr, X_csc):
X_sparse = X_sparse.astype(input_dtype)
last_mean = last_mean.astype(output_dtype)
last_var = last_var.astype(output_dtype)
X_means, X_vars = mean_variance_axis(X_sparse, axis)
X_means_incr, X_vars_incr, n_incr = \
incr_mean_variance_axis(X_sparse, axis=axis,
last_mean=last_mean,
last_var=last_var,
last_n=last_n)
assert X_means_incr.dtype == output_dtype
assert X_vars_incr.dtype == output_dtype
assert_array_almost_equal(X_means, X_means_incr)
assert_array_almost_equal(X_vars, X_vars_incr)
assert_array_equal(X.shape[axis], n_incr)
@pytest.mark.parametrize(
"sparse_constructor", [sp.csc_matrix, sp.csr_matrix]
)
def test_incr_mean_variance_axis_dim_mismatch(sparse_constructor):
"""Check that we raise proper error when axis=1 and the dimension mismatch.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/pull/18655
"""
n_samples, n_features = 60, 4
rng = np.random.RandomState(42)
X = sparse_constructor(rng.rand(n_samples, n_features))
last_mean = np.zeros(n_features)
last_var = np.zeros_like(last_mean)
last_n = np.zeros(last_mean.shape, dtype=np.int64)
kwargs = dict(last_mean=last_mean, last_var=last_var, last_n=last_n)
mean0, var0, _ = incr_mean_variance_axis(X, axis=0, **kwargs)
assert_allclose(np.mean(X.toarray(), axis=0), mean0)
assert_allclose(np.var(X.toarray(), axis=0), var0)
# test ValueError if axis=1 and last_mean.size == n_features
with pytest.raises(ValueError):
incr_mean_variance_axis(X, axis=1, **kwargs)
# test inconsistent shapes of last_mean, last_var, last_n
kwargs = dict(last_mean=last_mean[:-1], last_var=last_var, last_n=last_n)
with pytest.raises(ValueError):
incr_mean_variance_axis(X, axis=0, **kwargs)
@pytest.mark.parametrize(
"X1, X2",
[
(sp.random(5, 2, density=0.8, format='csr', random_state=0),
sp.random(13, 2, density=0.8, format='csr', random_state=0)),
(sp.random(5, 2, density=0.8, format='csr', random_state=0),
sp.hstack([sp.csr_matrix(np.full((13, 1), fill_value=np.nan)),
sp.random(13, 1, density=0.8, random_state=42)],
format="csr"))
]
)
def test_incr_mean_variance_axis_equivalence_mean_variance(X1, X2):
# non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/16448
# check that computing the incremental mean and variance is equivalent to
# computing the mean and variance on the stacked dataset.
axis = 0
last_mean, last_var = np.zeros(X1.shape[1]), np.zeros(X1.shape[1])
last_n = np.zeros(X1.shape[1], dtype=np.int64)
updated_mean, updated_var, updated_n = incr_mean_variance_axis(
X1, axis=axis, last_mean=last_mean, last_var=last_var, last_n=last_n
)
updated_mean, updated_var, updated_n = incr_mean_variance_axis(
X2, axis=axis, last_mean=updated_mean, last_var=updated_var,
last_n=updated_n
)
X = sp.vstack([X1, X2])
assert_allclose(updated_mean, np.nanmean(X.A, axis=axis))
assert_allclose(updated_var, np.nanvar(X.A, axis=axis))
assert_allclose(updated_n, np.count_nonzero(~np.isnan(X.A), axis=0))
def test_incr_mean_variance_no_new_n():
# check the behaviour when we update the variance with an empty matrix
axis = 0
X1 = sp.random(5, 1, density=0.8, random_state=0).tocsr()
X2 = sp.random(0, 1, density=0.8, random_state=0).tocsr()
last_mean, last_var = np.zeros(X1.shape[1]), np.zeros(X1.shape[1])
last_n = np.zeros(X1.shape[1], dtype=np.int64)
last_mean, last_var, last_n = incr_mean_variance_axis(
X1, axis=axis, last_mean=last_mean, last_var=last_var, last_n=last_n
)
# update statistic with a column which should ignored
updated_mean, updated_var, updated_n = incr_mean_variance_axis(
X2, axis=axis, last_mean=last_mean, last_var=last_var, last_n=last_n
)
assert_allclose(updated_mean, last_mean)
assert_allclose(updated_var, last_var)
assert_allclose(updated_n, last_n)
def test_incr_mean_variance_n_float():
# check the behaviour when last_n is just a number
axis = 0
X = sp.random(5, 2, density=0.8, random_state=0).tocsr()
last_mean, last_var = np.zeros(X.shape[1]), np.zeros(X.shape[1])
last_n = 0
_, _, new_n = incr_mean_variance_axis(
X, axis=axis, last_mean=last_mean, last_var=last_var, last_n=last_n
)
assert_allclose(new_n, np.full(X.shape[1], X.shape[0]))
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize("sparse_constructor", [sp.csc_matrix, sp.csr_matrix])
def test_incr_mean_variance_axis_ignore_nan(axis, sparse_constructor):
old_means = np.array([535., 535., 535., 535.])
old_variances = np.array([4225., 4225., 4225., 4225.])
old_sample_count = np.array([2, 2, 2, 2], dtype=np.int64)
X = sparse_constructor(
np.array([[170, 170, 170, 170],
[430, 430, 430, 430],
[300, 300, 300, 300]]))
X_nan = sparse_constructor(
np.array([[170, np.nan, 170, 170],
[np.nan, 170, 430, 430],
[430, 430, np.nan, 300],
[300, 300, 300, np.nan]]))
# we avoid creating specific data for axis 0 and 1: translating the data is
# enough.
if axis:
X = X.T
X_nan = X_nan.T
# take a copy of the old statistics since they are modified in place.
X_means, X_vars, X_sample_count = incr_mean_variance_axis(
X, axis=axis, last_mean=old_means.copy(),
last_var=old_variances.copy(), last_n=old_sample_count.copy())
X_nan_means, X_nan_vars, X_nan_sample_count = incr_mean_variance_axis(
X_nan, axis=axis, last_mean=old_means.copy(),
last_var=old_variances.copy(), last_n=old_sample_count.copy())
assert_allclose(X_nan_means, X_means)
assert_allclose(X_nan_vars, X_vars)
assert_allclose(X_nan_sample_count, X_sample_count)
def test_mean_variance_illegal_axis():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_csr = sp.csr_matrix(X)
with pytest.raises(ValueError):
mean_variance_axis(X_csr, axis=-3)
with pytest.raises(ValueError):
mean_variance_axis(X_csr, axis=2)
with pytest.raises(ValueError):
mean_variance_axis(X_csr, axis=-1)
with pytest.raises(ValueError):
incr_mean_variance_axis(X_csr, axis=-3, last_mean=None, last_var=None,
last_n=None)
with pytest.raises(ValueError):
incr_mean_variance_axis(X_csr, axis=2, last_mean=None, last_var=None,
last_n=None)
with pytest.raises(ValueError):
incr_mean_variance_axis(X_csr, axis=-1, last_mean=None, last_var=None,
last_n=None)
def test_densify_rows():
for dtype in (np.float32, np.float64):
X = sp.csr_matrix([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=dtype)
X_rows = np.array([0, 2, 3], dtype=np.intp)
out = np.ones((6, X.shape[1]), dtype=dtype)
out_rows = np.array([1, 3, 4], dtype=np.intp)
expect = np.ones_like(out)
expect[out_rows] = X[X_rows, :].toarray()
assign_rows_csr(X, X_rows, out_rows, out)
assert_array_equal(out, expect)
def test_inplace_column_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(200)
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
with pytest.raises(TypeError):
inplace_column_scale(X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
with pytest.raises(TypeError):
inplace_column_scale(X.tolil(), scale)
def test_inplace_row_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(100)
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
with pytest.raises(TypeError):
inplace_column_scale(X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
with pytest.raises(TypeError):
inplace_column_scale(X.tolil(), scale)
def test_inplace_swap_row():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
with pytest.raises(TypeError):
inplace_swap_row(X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
with pytest.raises(TypeError):
inplace_swap_row(X_csr.tolil())
def test_inplace_swap_column():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
with pytest.raises(TypeError):
inplace_swap_column(X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
with pytest.raises(TypeError):
inplace_swap_column(X_csr.tolil())
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize("axis", [0, 1, None])
@pytest.mark.parametrize("sparse_format", [sp.csr_matrix, sp.csc_matrix])
@pytest.mark.parametrize(
"missing_values, min_func, max_func, ignore_nan",
[(0, np.min, np.max, False),
(np.nan, np.nanmin, np.nanmax, True)]
)
@pytest.mark.parametrize("large_indices", [True, False])
def test_min_max(dtype, axis, sparse_format, missing_values, min_func,
max_func, ignore_nan, large_indices):
X = np.array([[0, 3, 0],
[2, -1, missing_values],
[0, 0, 0],
[9, missing_values, 7],
[4, 0, 5]], dtype=dtype)
X_sparse = sparse_format(X)
if large_indices:
X_sparse.indices = X_sparse.indices.astype('int64')
X_sparse.indptr = X_sparse.indptr.astype('int64')
mins_sparse, maxs_sparse = min_max_axis(X_sparse, axis=axis,
ignore_nan=ignore_nan)
assert_array_equal(mins_sparse, min_func(X, axis=axis))
assert_array_equal(maxs_sparse, max_func(X, axis=axis))
def test_min_max_axis_errors():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
with pytest.raises(TypeError):
min_max_axis(X_csr.tolil(), axis=0)
with pytest.raises(ValueError):
min_max_axis(X_csr, axis=2)
with pytest.raises(ValueError):
min_max_axis(X_csc, axis=-3)
def test_count_nonzero():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
X_nonzero = X != 0
sample_weight = [.5, .2, .3, .1, .1]
X_nonzero_weighted = X_nonzero * np.array(sample_weight)[:, None]
for axis in [0, 1, -1, -2, None]:
assert_array_almost_equal(count_nonzero(X_csr, axis=axis),
X_nonzero.sum(axis=axis))
assert_array_almost_equal(count_nonzero(X_csr, axis=axis,
sample_weight=sample_weight),
X_nonzero_weighted.sum(axis=axis))
with pytest.raises(TypeError):
count_nonzero(X_csc)
with pytest.raises(ValueError):
count_nonzero(X_csr, axis=2)
assert (count_nonzero(X_csr, axis=0).dtype ==
count_nonzero(X_csr, axis=1).dtype)
assert (count_nonzero(X_csr, axis=0, sample_weight=sample_weight).dtype ==
count_nonzero(X_csr, axis=1, sample_weight=sample_weight).dtype)
# Check dtypes with large sparse matrices too
# XXX: test fails on 32bit (Windows/Linux)
try:
X_csr.indices = X_csr.indices.astype(np.int64)
X_csr.indptr = X_csr.indptr.astype(np.int64)
assert (count_nonzero(X_csr, axis=0).dtype ==
count_nonzero(X_csr, axis=1).dtype)
assert (count_nonzero(X_csr, axis=0,
sample_weight=sample_weight).dtype ==
count_nonzero(X_csr, axis=1,
sample_weight=sample_weight).dtype)
except TypeError as e:
assert ("according to the rule 'safe'" in e.args[0]
and np.intp().nbytes < 8), e
def test_csc_row_median():
# Test csc_row_median actually calculates the median.
# Test that it gives the same output when X is dense.
rng = np.random.RandomState(0)
X = rng.rand(100, 50)
dense_median = np.median(X, axis=0)
csc = sp.csc_matrix(X)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test that it gives the same output when X is sparse
X = rng.rand(51, 100)
X[X < 0.7] = 0.0
ind = rng.randint(0, 50, 10)
X[ind] = -X[ind]
csc = sp.csc_matrix(X)
dense_median = np.median(X, axis=0)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test for toy data.
X = [[0, -2], [-1, -1], [1, 0], [2, 1]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0.5, -0.5]))
X = [[0, -2], [-1, -5], [1, -3]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0., -3]))
# Test that it raises an Error for non-csc matrices.
with pytest.raises(TypeError):
csc_median_axis_0(sp.csr_matrix(X))
def test_inplace_normalize():
ones = np.ones((10, 1))
rs = RandomState(10)
for inplace_csr_row_normalize in (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2):
for dtype in (np.float64, np.float32):
X = rs.randn(10, 5).astype(dtype)
X_csr = sp.csr_matrix(X)
for index_dtype in [np.int32, np.int64]:
# csr_matrix will use int32 indices by default,
# up-casting those to int64 when necessary
if index_dtype is np.int64:
X_csr.indptr = X_csr.indptr.astype(index_dtype)
X_csr.indices = X_csr.indices.astype(index_dtype)
assert X_csr.indices.dtype == index_dtype
assert X_csr.indptr.dtype == index_dtype
inplace_csr_row_normalize(X_csr)
assert X_csr.dtype == dtype
if inplace_csr_row_normalize is inplace_csr_row_normalize_l2:
X_csr.data **= 2
assert_array_almost_equal(np.abs(X_csr).sum(axis=1), ones)
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
def test_csr_row_norms(dtype):
# checks that csr_row_norms returns the same output as
# scipy.sparse.linalg.norm, and that the dype is the same as X.dtype.
X = sp.random(100, 10, format='csr', dtype=dtype, random_state=42)
scipy_norms = sp.linalg.norm(X, axis=1)**2
norms = csr_row_norms(X)
assert norms.dtype == dtype
rtol = 1e-6 if dtype == np.float32 else 1e-7
assert_allclose(norms, scipy_norms, rtol=rtol)
|
bsd-3-clause
|
codrut3/tensorflow
|
tensorflow/contrib/factorization/python/ops/kmeans_test.py
|
13
|
19945
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for KMeans."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import time
import numpy as np
from sklearn.cluster import KMeans as SklearnKMeans
# pylint: disable=g-import-not-at-top
from tensorflow.contrib.factorization.python.ops import kmeans as kmeans_lib
from tensorflow.python.estimator import run_config
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import flags
from tensorflow.python.platform import test
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner
FLAGS = flags.FLAGS
def normalize(x):
return x / np.sqrt(np.sum(x * x, axis=-1, keepdims=True))
def cosine_similarity(x, y):
return np.dot(normalize(x), np.transpose(normalize(y)))
def make_random_centers(num_centers, num_dims, center_norm=500):
return np.round(
np.random.rand(num_centers, num_dims).astype(np.float32) * center_norm)
def make_random_points(centers, num_points, max_offset=20):
num_centers, num_dims = centers.shape
assignments = np.random.choice(num_centers, num_points)
offsets = np.round(
np.random.randn(num_points, num_dims).astype(np.float32) * max_offset)
return (centers[assignments] + offsets, assignments, np.add.reduce(
offsets * offsets, 1))
class KMeansTestBase(test.TestCase):
def input_fn(self,
batch_size=None,
points=None,
randomize=None,
num_epochs=None):
"""Returns an input_fn that randomly selects batches from given points."""
batch_size = batch_size or self.batch_size
points = points if points is not None else self.points
num_points = points.shape[0]
if randomize is None:
randomize = (self.use_mini_batch and
self.mini_batch_steps_per_iteration <= 1)
def _fn():
x = constant_op.constant(points)
if batch_size == num_points:
return input_lib.limit_epochs(x, num_epochs=num_epochs), None
if randomize:
indices = random_ops.random_uniform(
constant_op.constant([batch_size]),
minval=0,
maxval=num_points - 1,
dtype=dtypes.int32,
seed=10)
else:
# We need to cycle through the indices sequentially. We create a queue
# to maintain the list of indices.
q = data_flow_ops.FIFOQueue(num_points, dtypes.int32, ())
# Conditionally initialize the Queue.
def _init_q():
with ops.control_dependencies(
[q.enqueue_many(math_ops.range(num_points))]):
return control_flow_ops.no_op()
init_q = control_flow_ops.cond(q.size() <= 0, _init_q,
control_flow_ops.no_op)
with ops.control_dependencies([init_q]):
offsets = q.dequeue_many(batch_size)
with ops.control_dependencies([q.enqueue_many(offsets)]):
indices = array_ops.identity(offsets)
batch = array_ops.gather(x, indices)
return (input_lib.limit_epochs(batch, num_epochs=num_epochs), None)
return _fn
@staticmethod
def config(tf_random_seed):
return run_config.RunConfig().replace(tf_random_seed=tf_random_seed)
@property
def initial_clusters(self):
return kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT
@property
def batch_size(self):
return self.num_points
@property
def use_mini_batch(self):
return False
@property
def mini_batch_steps_per_iteration(self):
return 1
class KMeansTest(KMeansTestBase):
def setUp(self):
np.random.seed(3)
self.num_centers = 5
self.num_dims = 2
self.num_points = 1000
self.true_centers = make_random_centers(self.num_centers, self.num_dims)
self.points, _, self.scores = make_random_points(self.true_centers,
self.num_points)
self.true_score = np.add.reduce(self.scores)
def _kmeans(self, relative_tolerance=None):
return kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
random_seed=24,
relative_tolerance=relative_tolerance)
def test_clusters(self):
kmeans = self._kmeans()
kmeans.train(input_fn=self.input_fn(), steps=1)
clusters = kmeans.cluster_centers()
self.assertAllEqual(list(clusters.shape), [self.num_centers, self.num_dims])
def test_fit(self):
kmeans = self._kmeans()
kmeans.train(input_fn=self.input_fn(), steps=1)
score1 = kmeans.score(input_fn=self.input_fn(batch_size=self.num_points))
steps = 10 * self.num_points // self.batch_size
kmeans.train(input_fn=self.input_fn(), steps=steps)
score2 = kmeans.score(input_fn=self.input_fn(batch_size=self.num_points))
self.assertTrue(score1 > score2)
self.assertNear(self.true_score, score2, self.true_score * 0.05)
def test_monitor(self):
if self.use_mini_batch:
# We don't test for use_mini_batch case since the loss value can be noisy.
return
kmeans = kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=self.config(14),
random_seed=12,
relative_tolerance=1e-4)
kmeans.train(
input_fn=self.input_fn(),
# Force it to train until the relative tolerance monitor stops it.
steps=None)
score = kmeans.score(input_fn=self.input_fn(batch_size=self.num_points))
self.assertNear(self.true_score, score, self.true_score * 0.01)
def test_infer(self):
kmeans = self._kmeans()
# Make a call to fit to initialize the cluster centers.
max_steps = 1
kmeans.train(input_fn=self.input_fn(), max_steps=max_steps)
clusters = kmeans.cluster_centers()
# Make a small test set
num_points = 10
points, true_assignments, true_offsets = make_random_points(
clusters, num_points)
input_fn = self.input_fn(batch_size=num_points, points=points, num_epochs=1)
# Test predict
assignments = list(kmeans.predict_cluster_index(input_fn))
self.assertAllEqual(assignments, true_assignments)
# Test score
score = kmeans.score(input_fn=lambda: (constant_op.constant(points), None))
self.assertNear(score, np.sum(true_offsets), 0.01 * score)
# Test transform
transform = list(kmeans.transform(input_fn))
true_transform = np.maximum(
0,
np.sum(np.square(points), axis=1, keepdims=True) -
2 * np.dot(points, np.transpose(clusters)) + np.transpose(
np.sum(np.square(clusters), axis=1, keepdims=True)))
self.assertAllClose(transform, true_transform, rtol=0.05, atol=10)
class KMeansTestMultiStageInit(KMeansTestBase):
def test_random(self):
points = np.array(
[[1, 2], [3, 4], [5, 6], [7, 8], [9, 0]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.RANDOM_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
kmeans.train(
input_fn=self.input_fn(batch_size=1, points=points, randomize=False),
steps=1)
clusters = kmeans.cluster_centers()
self.assertAllEqual(points, clusters)
def test_kmeans_plus_plus_batch_just_right(self):
points = np.array([[1, 2]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
kmeans.train(
input_fn=self.input_fn(batch_size=1, points=points, randomize=False),
steps=1)
clusters = kmeans.cluster_centers()
self.assertAllEqual(points, clusters)
def test_kmeans_plus_plus_batch_too_small(self):
points = np.array(
[[1, 2], [3, 4], [5, 6], [7, 8], [9, 0]], dtype=np.float32)
kmeans = kmeans_lib.KMeansClustering(
num_clusters=points.shape[0],
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
distance_metric=kmeans_lib.KMeansClustering.SQUARED_EUCLIDEAN_DISTANCE,
use_mini_batch=True,
mini_batch_steps_per_iteration=100,
random_seed=24,
relative_tolerance=None)
with self.assertRaisesOpError(AssertionError):
kmeans.train(
input_fn=self.input_fn(batch_size=4, points=points, randomize=False),
steps=1)
class MiniBatchKMeansTest(KMeansTest):
@property
def batch_size(self):
return 50
@property
def use_mini_batch(self):
return True
class FullBatchAsyncKMeansTest(KMeansTest):
@property
def batch_size(self):
return 50
@property
def use_mini_batch(self):
return True
@property
def mini_batch_steps_per_iteration(self):
return self.num_points // self.batch_size
class KMeansCosineDistanceTest(KMeansTestBase):
def setUp(self):
self.points = np.array(
[[2.5, 0.1], [2, 0.2], [3, 0.1], [4, 0.2], [0.1, 2.5], [0.2, 2],
[0.1, 3], [0.2, 4]],
dtype=np.float32)
self.num_points = self.points.shape[0]
self.true_centers = np.array(
[
normalize(
np.mean(normalize(self.points)[0:4, :], axis=0,
keepdims=True))[0],
normalize(
np.mean(normalize(self.points)[4:, :], axis=0,
keepdims=True))[0]
],
dtype=np.float32)
self.true_assignments = np.array([0] * 4 + [1] * 4)
self.true_score = len(self.points) - np.tensordot(
normalize(self.points), self.true_centers[self.true_assignments])
self.num_centers = 2
self.kmeans = kmeans_lib.KMeansClustering(
self.num_centers,
initial_clusters=kmeans_lib.KMeansClustering.RANDOM_INIT,
distance_metric=kmeans_lib.KMeansClustering.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=self.config(3))
def test_fit(self):
max_steps = 10 * self.num_points // self.batch_size
self.kmeans.train(input_fn=self.input_fn(), max_steps=max_steps)
centers = normalize(self.kmeans.cluster_centers())
centers = centers[centers[:, 0].argsort()]
true_centers = self.true_centers[self.true_centers[:, 0].argsort()]
self.assertAllClose(centers, true_centers, atol=0.04)
def test_transform(self):
self.kmeans.train(input_fn=self.input_fn(), steps=10)
centers = normalize(self.kmeans.cluster_centers())
true_transform = 1 - cosine_similarity(self.points, centers)
transform = list(
self.kmeans.transform(
input_fn=self.input_fn(batch_size=self.num_points, num_epochs=1)))
self.assertAllClose(transform, true_transform, atol=1e-3)
def test_predict(self):
max_steps = 10 * self.num_points // self.batch_size
self.kmeans.train(input_fn=self.input_fn(), max_steps=max_steps)
centers = normalize(self.kmeans.cluster_centers())
assignments = list(
self.kmeans.predict_cluster_index(
input_fn=self.input_fn(num_epochs=1, batch_size=self.num_points)))
self.assertAllClose(
centers[assignments],
self.true_centers[self.true_assignments],
atol=1e-2)
centers = centers[centers[:, 0].argsort()]
true_centers = self.true_centers[self.true_centers[:, 0].argsort()]
self.assertAllClose(centers, true_centers, atol=0.04)
score = self.kmeans.score(
input_fn=self.input_fn(batch_size=self.num_points))
self.assertAllClose(score, self.true_score, atol=1e-2)
def test_predict_kmeans_plus_plus(self):
# Most points are concetrated near one center. KMeans++ is likely to find
# the less populated centers.
points = np.array(
[[2.5, 3.5], [2.5, 3.5], [-2, 3], [-2, 3], [-3, -3], [-3.1, -3.2],
[-2.8, -3.], [-2.9, -3.1], [-3., -3.1], [-3., -3.1], [-3.2, -3.],
[-3., -3.]],
dtype=np.float32)
true_centers = np.array(
[
normalize(
np.mean(normalize(points)[0:2, :], axis=0, keepdims=True))[0],
normalize(
np.mean(normalize(points)[2:4, :], axis=0, keepdims=True))[0],
normalize(np.mean(normalize(points)[4:, :], axis=0,
keepdims=True))[0]
],
dtype=np.float32)
true_assignments = [0] * 2 + [1] * 2 + [2] * 8
true_score = len(points) - np.tensordot(
normalize(points), true_centers[true_assignments])
kmeans = kmeans_lib.KMeansClustering(
3,
initial_clusters=self.initial_clusters,
distance_metric=kmeans_lib.KMeansClustering.COSINE_DISTANCE,
use_mini_batch=self.use_mini_batch,
mini_batch_steps_per_iteration=self.mini_batch_steps_per_iteration,
config=self.config(3))
kmeans.train(
input_fn=lambda: (constant_op.constant(points), None), steps=30)
centers = normalize(kmeans.cluster_centers())
self.assertAllClose(
sorted(centers.tolist()), sorted(true_centers.tolist()), atol=1e-2)
def _input_fn():
return (input_lib.limit_epochs(
constant_op.constant(points), num_epochs=1), None)
assignments = list(kmeans.predict_cluster_index(input_fn=_input_fn))
self.assertAllClose(
centers[assignments], true_centers[true_assignments], atol=1e-2)
score = kmeans.score(input_fn=lambda: (constant_op.constant(points), None))
self.assertAllClose(score, true_score, atol=1e-2)
class MiniBatchKMeansCosineTest(KMeansCosineDistanceTest):
@property
def batch_size(self):
return 2
@property
def use_mini_batch(self):
return True
class FullBatchAsyncKMeansCosineTest(KMeansCosineDistanceTest):
@property
def batch_size(self):
return 2
@property
def use_mini_batch(self):
return True
@property
def mini_batch_steps_per_iteration(self):
return self.num_points // self.batch_size
class KMeansBenchmark(benchmark.Benchmark):
"""Base class for benchmarks."""
def SetUp(self,
dimension=50,
num_clusters=50,
points_per_cluster=10000,
center_norm=500,
cluster_width=20):
np.random.seed(123456)
self.num_clusters = num_clusters
self.num_points = num_clusters * points_per_cluster
self.centers = make_random_centers(
self.num_clusters, dimension, center_norm=center_norm)
self.points, _, scores = make_random_points(
self.centers, self.num_points, max_offset=cluster_width)
self.score = float(np.sum(scores))
def _report(self, num_iters, start, end, scores):
print(scores)
self.report_benchmark(
iters=num_iters,
wall_time=(end - start) / num_iters,
extras={'true_sum_squared_distances': self.score,
'fit_scores': scores})
def _fit(self, num_iters=10):
pass
def benchmark_01_2dim_5center_500point(self):
self.SetUp(dimension=2, num_clusters=5, points_per_cluster=100)
self._fit()
def benchmark_02_20dim_20center_10kpoint(self):
self.SetUp(dimension=20, num_clusters=20, points_per_cluster=500)
self._fit()
def benchmark_03_100dim_50center_50kpoint(self):
self.SetUp(dimension=100, num_clusters=50, points_per_cluster=1000)
self._fit()
def benchmark_03_100dim_50center_50kpoint_unseparated(self):
self.SetUp(
dimension=100,
num_clusters=50,
points_per_cluster=1000,
cluster_width=250)
self._fit()
def benchmark_04_100dim_500center_500kpoint(self):
self.SetUp(dimension=100, num_clusters=500, points_per_cluster=1000)
self._fit(num_iters=4)
def benchmark_05_100dim_500center_500kpoint_unseparated(self):
self.SetUp(
dimension=100,
num_clusters=500,
points_per_cluster=1000,
cluster_width=250)
self._fit(num_iters=4)
class TensorflowKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting tensorflow KMeans: %d' % i)
tf_kmeans = kmeans_lib.KMeansClustering(
self.num_clusters,
initial_clusters=kmeans_lib.KMeansClustering.KMEANS_PLUS_PLUS_INIT,
kmeans_plus_plus_num_retries=int(math.log(self.num_clusters) + 2),
random_seed=i * 42,
relative_tolerance=1e-6,
config=self.config(3))
tf_kmeans.train(
input_fn=lambda: (constant_op.constant(self.points), None), steps=50)
_ = tf_kmeans.cluster_centers()
scores.append(
tf_kmeans.score(
input_fn=lambda: (constant_op.constant(self.points), None)))
self._report(num_iters, start, time.time(), scores)
class SklearnKMeansBenchmark(KMeansBenchmark):
def _fit(self, num_iters=10):
scores = []
start = time.time()
for i in range(num_iters):
print('Starting sklearn KMeans: %d' % i)
sklearn_kmeans = SklearnKMeans(
n_clusters=self.num_clusters,
init='k-means++',
max_iter=50,
n_init=1,
tol=1e-4,
random_state=i * 42)
sklearn_kmeans.train(self.points)
scores.append(sklearn_kmeans.inertia_)
self._report(num_iters, start, time.time(), scores)
class KMeansTestQueues(test.TestCase):
def input_fn(self):
def _fn():
queue = data_flow_ops.FIFOQueue(
capacity=10, dtypes=dtypes.float32, shapes=[10, 3])
enqueue_op = queue.enqueue(array_ops.zeros([10, 3], dtype=dtypes.float32))
queue_runner.add_queue_runner(
queue_runner.QueueRunner(queue, [enqueue_op]))
return queue.dequeue(), None
return _fn
# This test makes sure that there are no deadlocks when using a QueueRunner.
# Note that since cluster initialization is dependendent on inputs, if input
# is generated using a QueueRunner, one has to make sure that these runners
# are started before the initialization.
def test_queues(self):
kmeans = kmeans_lib.KMeansClustering(5)
kmeans.train(input_fn=self.input_fn(), steps=1)
if __name__ == '__main__':
test.main()
|
apache-2.0
|
tmthydvnprt/compfipy
|
compfipy/asset.py
|
1
|
74951
|
# pylint: disable=too-many-public-methods
"""
asset.py
Define the an asset class to contain price data and various calculations, measures, and processed versions of data.
"""
import datetime
import collections
import tabulate
import scipy.stats
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from compfipy.util import RISK_FREE_RATE, MONTHS_IN_YEAR, DAYS_IN_YEAR, DAYS_IN_TRADING_YEAR
from compfipy.util import FIBONACCI_SEQUENCE, FIBONACCI_DECIMAL, RANK_PERCENTS, RANK_DAYS_IN_TRADING_YEAR
from compfipy.util import calc_returns, calc_cagr, fmtp, fmtn, fmttn, sma, ema
# Helper Functions for Fibonacci Code
# ------------------------------------------------------------------------------------------------------------------------------
def fibonacci_retracement(price=0.0, lastprice=0.0):
"""
Fibonacci_retracement
"""
return price + FIBONACCI_DECIMAL * (lastprice - price)
def fibonacci_arc(price=0.0, lastprice=0.0, days_since_last_price=0, n_days=0):
"""
Fibonacci_arc
"""
fib_radius = FIBONACCI_DECIMAL * np.sqrt(np.power(lastprice - price, 2) + np.power(days_since_last_price, 2))
return price - np.sqrt(np.power(fib_radius, 2) - np.power(n_days, 2))
def fibonacci_time(date=datetime.date.today()):
"""
Fibonacci_time
"""
return [date + datetime.timedelta(days=d) for d in FIBONACCI_SEQUENCE]
# General Utility Functions
# ------------------------------------------------------------------------------------------------------------------------------
def plot(x, figsize=(16, 4), title=None, logy=False, **kwargs):
"""
Plot helper, assumes a pd.Series or pd.DataFrame.
"""
title = title if title else 'Price Series'
x.plot(figsize=figsize, title=title, logy=logy, **kwargs)
def scatter_matrix(x, figsize=(16, 4), title=None, logy=False, **kwargs):
"""
Plot helper, assumes a pd.Series or pd.DataFrame.
"""
title = title if title else 'Price Scatter Matrix'
x.scatter_matrix(figsize=figsize, title=title, logy=logy, **kwargs)
def hist(x, figsize=(16, 4), title=None, logy=False, **kwargs):
"""
Plot helper, assumes a pd.Series or pd.DataFrame.
"""
title = title if title else 'Return Histogram'
x.hist(figsize=figsize, title=title, logy=logy, **kwargs)
# General Asse Class
# ------------------------------------------------------------------------------------------------------------------------------
class Asset(object):
# pylint: disable=line-too-long
"""
Asset Class for storing OCHLV price data, and calculating overlays and indicators from price data.
Overlays
--------
[x] Bollinger Bands - A chart overlay that shows the upper and lower limits of 'normal' price movements based on the Standard Deviation of prices.
[x] Chandelier Exit - A indicator that can be used to set trailing stop-losses for both long and short position.
[x] Ichimoku Clouds - A comprehensive indicator that defines support and resistance, identifies trend direction, gauges momentum and provides trading signals.
[x] Keltner Channels - A chart overlay that shows upper and lower limits for price movements based on the Average True Range of prices.
[x] Moving Averages - Simple and Exponential - Chart overlays that show the 'average' value over time. Both Simple Moving Averages (SMAs) and Exponential Moving Averages (EMAs) are explained.
[x] Moving Average Envelopes - A chart overlay consisting of a channel formed from simple moving averages.
[x] Parabolic SAR - A chart overlay that shows reversal points below prices in an uptrend and above prices in a downtrend.
[x] Pivot Points - A chart overlay that shows reversal points below prices in an uptrend and above prices in a downtrend.
[x] Price Channels - A chart overlay that shows a channel made from the highest high and lowest low for a given period of time.
[x] Volume by Price - A chart overlay with a horizontal histogram showing the amount of activity at various price levels.
[x] Volume-weighted Average Price (VWAP) - An intraday indicator based on total dollar value of all trades for the current day divided by the total trading volume for the current day.
[x] ZigZag - A chart overlay that shows filtered price movements that are greater than a given percentage.
Indicators
----------
[x] Accumulation Distribution Line - Combines price and volume to show how money may be flowing into or out of a stock.
[x] Aroon - Uses Aroon Up and Aroon Down to determine whether a stock is trending or not.
[x] Aroon Oscillator - Measures the difference between Aroon Up and Aroon Down.
[x] Average Directional Index (ADX) - Shows whether a stock is trending or oscillating.
[x] Average True Range (ATR) - Measures a stock's volatility.
[x] BandWidth - Shows the percentage difference between the upper and lower Bollinger Band.
[x] %B Indicator - Shows the relationship between price and standard deviation bands.
[x] Commodity Channel Index (CCI) - Shows a stock's variation from its 'typical' price.
[x] Coppock Curve - An oscillator that uses rate-of-change and a weighted moving average to measure momentum.
[x] Chaikin Money Flow - Combines price and volume to show how money may be flowing into or out of a stock. Alternative to Accumulation/Distribution Line.
[x] Chaikin Oscillator - Combines price and volume to show how money may be flowing into or out of a stock. Based on Accumulation/Distribution Line.
[x] Price Momentum Oscillator - An advanced momentum indicator that tracks a stock's rate of change.
[x] Detrended Price Oscillator (DPO) - A price oscillator that uses a displaced moving average to identify cycles.
[x] Ease of Movement (EMV) - An indicator that compares volume and price to identify significant moves.
[x] Force Index - A simple price-and-volume oscillator.
[x] Know Sure Thing (KST) - An indicator that measures momentum in a smooth fashion.
[x] Mass Index - An indicator that identifies reversals when the price range widens.
[x] MACD - A momentum oscillator based on the difference between two EMAs.
[x] MACD-Histogram - A momentum oscillator that shows the difference between MACD and its signal line.
[x] Money Flow Index (MFI) - A volume-weighted version of RSI that shows shifts is buying and selling pressure.
[x] Negative Volume Index (NVI) - A cumulative volume-based indicator used to identify trend reversals.
[x] On Balance Volume (OBV) - Combines price and volume in a very simple way to show how money may be flowing into or out of a stock.
[x] Percentage Price Oscillator (PPO) - A percentage-based version of the MACD indicator.
[x] Percentage Volume Oscillator - The PPO indicator applied to volume instead of price.
[x] Rate of Change (ROC) - Shows the speed at which a stock's price is changing.
[x] Relative Strength Index (RSI) - Shows how strongly a stock is moving in its current direction.
[x] StockCharts Tech. Ranks (SCTRs) - Our relative ranking system based on a stock's technical strength.
[ ] Slope - Measures the rise-over-run for a linear regression
[x] Standard Deviation (Volatility) - A statistical measure of a stock's volatility.
[x] Stochastic Oscillator - Shows how a stock's price is doing relative to past movements. Fast, Slow and Full Stochastics are explained.
[x] StochRSI - Combines Stochastics with the RSI indicator. Helps you see RSI changes more clearly.
[x] TRIX - A triple-smoothed moving average of price movements.
[x] True Strength Index (TSI) - An indicator that measures trend direction and identifies overbought/oversold levels.
[x] Ulcer Index - An indicator designed to measure market risk or volatility.
[x] Ultimate Oscillator - Combines long-term, mid-term and short-term moving averages into one number.
[x] Vortex Indicator - An indicator designed to identify the start of a new trend and define the current trend.
[x] William %R - Uses Stochastics to determine overbought and oversold levels.
Charts
------
[x] Gaps - An area of price change in which there were no trades.
[ ] Classify Gaps - decide if a gap is [ ] common, [ ] breakaway, [ ] runaway, or [ ] exhaustion
[ ] Double Top Reversal
[ ] Double Bottom Reversal
[ ] Head and Shoulders Top (Reversal)
[ ] Head and Shoulders Bottom (Reversal)
[ ] Falling Wedge (Reversal)
[ ] Rising Wedge (Reversal)
[ ] Rounding Bottom (Reversal)
[ ] Triple Top Reversal
[ ] Triple Bottom Reversal
[ ] Bump and Run Reversal (Reversal)
[ ] Flag, Pennant (Continuation)
[ ] Symmetrical Triangle (Continuation)
[ ] Ascending Triangle (Continuation)
[ ] Descending Triangle (Continuation)
[ ] Rectangle (Continuation)
[ ] Price Channel (Continuation)
[ ] Measured Move - Bullish (Continuation)
[ ] Measured Move - Bearish (Continuation)
[ ] Cup with Handle (Continuation)
[ ] Introduction to Candlesticks - An overview of candlesticks, including history, formation, and key patterns.
[ ] Candlesticks and Support - How candlestick chart patterns can mark support levels.
[ ] Candlesticks and Resistance - How candlestick chart patterns can mark resistance levels.
[ ] Candlestick Bullish Reversal Patterns - Detailed descriptions of bullish reversal candlestick patterns
[ ] Candlestick Bearish Reversal Patterns - Detailed descriptions of common bearish reversal candlestick patterns.
[ ] Candlestick Pattern Dictionary - A comprehensive list of common candlestick patterns.
[ ] Arms CandleVolume - A price chart that merges candlesticks with EquiVolume boxes.
[ ] CandleVolume - A price chart that merges candlesticks with volume.
[ ] Elder Impulse System - A trading system that color codes the price bars to show signals.
[ ] EquiVolume - Price boxes that incorporate volume. How to use and interpret EquiVolume boxes.
[ ] Heikin-Ashi - A candlestick method that uses price data from two periods instead one one.
[ ] Kagi Charts - How to use and interpret Kagi charts.
[ ] Point and Figure Charts - How to use and interpret Point and Figure charts.
[ ] Renko Charts - How to use and interpret Renko charts.
[ ] Three Line Break Charts - How to use and interpret Three Line Break charts.
[ ] Andrews' Pitchfork - Drawing, adjusting and interpreting this trend channel tool.
[ ] Cycles - Steps to finding cycles and using the Cycle Lines Tool.
[o] Fibonacci Retracements - Defines Fibonacci retracements and shows how to use them to identify reversal zones.
[o] Fibonacci Arcs - Shows how Fibonacci Arcs can be used to find reversals.
[ ] Fibonacci Fans - Explains what Fibonacci Fans are and how they can be used.
[o] Fibonacci Time Zones - Describes Fibonacci Time Zones and how they can be used.
[x] Quandrant Lines - Defines Quadrant Lines and shows how they can be used to find future support/resistance zones.
[ ] Raff Regression Channel - A channel tool based on two equidistant trendlines on either side of a linear regression.
[ ] Speed Resistance Lines - Shows how Speed Resistance Lines are used on charts.
"""
# pylint: enable=line-too-long
def __init__(self, data=None, market_cap=1.0):
"""
Create an asset, with string symbol and pandas.Series of price data.
"""
self.symbol = data.index.name
self.data = data
self.market_cap = market_cap
self.stats = {}
def __str__(self):
"""
Return string representation.
"""
return str(self.data)
# Summary stats
# --------------------------------------------------------------------------------------------------------------------------
def calc_stats(self, yearly_risk_free_return=RISK_FREE_RATE):
"""
Calculate common statistics for this asset.
"""
# pylint: disable=too-many-statements
monthly_risk_free_return = (np.power(1 + yearly_risk_free_return, 1.0 / MONTHS_IN_YEAR) - 1.0) * MONTHS_IN_YEAR
daily_risk_free_return = (np.power(1 + yearly_risk_free_return, 1.0 / DAYS_IN_TRADING_YEAR) - 1.0) * DAYS_IN_TRADING_YEAR
# Sample prices
daily_price = self.close
monthly_price = daily_price.resample('M').last()
yearly_price = daily_price.resample('A').last()
self.stats = {
'name' : self.symbol,
'start': daily_price.index[0],
'end': daily_price.index[-1],
'market_cap' : self.market_cap,
'yearly_risk_free_return': yearly_risk_free_return,
'daily_mean': np.nan,
'daily_vol': np.nan,
'daily_sharpe': np.nan,
'best_day': np.nan,
'worst_day': np.nan,
'total_return': np.nan,
'cagr': np.nan,
'incep': np.nan,
'max_drawdown': np.nan,
'avg_drawdown': np.nan,
'avg_drawdown_days': np.nan,
'daily_skew': np.nan,
'daily_kurt': np.nan,
'monthly_mean': np.nan,
'monthly_vol': np.nan,
'monthly_sharpe': np.nan,
'best_month': np.nan,
'worst_month': np.nan,
'mtd': np.nan,
'pos_month_perc': np.nan,
'avg_up_month': np.nan,
'avg_down_month': np.nan,
'three_month': np.nan,
'monthly_skew': np.nan,
'monthly_kurt': np.nan,
'six_month': np.nan,
'ytd': np.nan,
'one_year': np.nan,
'yearly_mean': np.nan,
'yearly_vol': np.nan,
'yearly_sharpe': np.nan,
'best_year': np.nan,
'worst_year': np.nan,
'three_year': np.nan,
'win_year_perc': np.nan,
'twelve_month_win_perc': np.nan,
'yearly_skew': np.nan,
'yearly_kurt': np.nan,
'five_year': np.nan,
'ten_year': np.nan,
'return_table': {}
}
if len(daily_price) is 1:
return self
# Stats with daily prices
r = calc_returns(daily_price)
if len(r) < 4:
return self
self.stats['daily_mean'] = DAYS_IN_TRADING_YEAR * r.mean()
self.stats['daily_vol'] = np.sqrt(DAYS_IN_TRADING_YEAR) * r.std()
self.stats['daily_sharpe'] = (self.stats['daily_mean'] - daily_risk_free_return) / self.stats['daily_vol']
self.stats['best_day'] = r.ix[r.idxmax():r.idxmax()]
self.stats['worst_day'] = r.ix[r.idxmin():r.idxmin()]
self.stats['total_return'] = (daily_price[-1] / daily_price[0]) - 1.0
self.stats['ytd'] = self.stats['total_return']
self.stats['cagr'] = calc_cagr(daily_price)
self.stats['incep'] = self.stats['cagr']
drawdown_info = self.drawdown_info()
self.stats['max_drawdown'] = drawdown_info['drawdown'].min()
self.stats['avg_drawdown'] = drawdown_info['drawdown'].mean()
self.stats['avg_drawdown_days'] = drawdown_info['days'].mean()
self.stats['daily_skew'] = r.skew()
self.stats['daily_kurt'] = r.kurt() if len(r[(~np.isnan(r)) & (r != 0)]) > 0 else np.nan
# Stats with monthly prices
mr = calc_returns(monthly_price)
if len(mr) < 2:
return self
self.stats['monthly_mean'] = MONTHS_IN_YEAR * mr.mean()
self.stats['monthly_vol'] = np.sqrt(MONTHS_IN_YEAR) * mr.std()
self.stats['monthly_sharpe'] = (self.stats['monthly_mean'] - monthly_risk_free_return) / self.stats['monthly_vol']
self.stats['best_month'] = mr.ix[mr.idxmax():mr.idxmax()]
self.stats['worst_month'] = mr.ix[mr.idxmin():mr.idxmin()]
self.stats['mtd'] = (daily_price[-1] / monthly_price[-2]) - 1.0 # -2 because monthly[1] = daily[-1]
self.stats['pos_month_perc'] = len(mr[mr > 0]) / float(len(mr) - 1.0) # -1 to ignore first NaN
self.stats['avg_up_month'] = mr[mr > 0].mean()
self.stats['avg_down_month'] = mr[mr <= 0].mean()
# Table for lookback periods
self.stats['return_table'] = collections.defaultdict(dict)
for mi in mr.index:
self.stats['return_table'][mi.year][mi.month] = mr[mi]
fidx = mr.index[0]
try:
self.stats['return_table'][fidx.year][fidx.month] = (float(monthly_price[0]) / daily_price[0]) - 1
except ZeroDivisionError:
self.stats['return_table'][fidx.year][fidx.month] = 0.0
# Calculate YTD
for year, months in self.stats['return_table'].items():
self.stats['return_table'][year][13] = np.prod(np.array(months.values()) + 1) - 1.0
if len(mr) < 3:
return self
denominator = daily_price[:daily_price.index[-1] - pd.DateOffset(months=3)]
self.stats['three_month'] = (daily_price[-1] / denominator[-1]) - 1 if len(denominator) > 0 else np.nan
if len(mr) < 4:
return self
self.stats['monthly_skew'] = mr.skew()
self.stats['monthly_kurt'] = mr.kurt() if len(mr[(~np.isnan(mr)) & (mr != 0)]) > 0 else np.nan
denominator = daily_price[:daily_price.index[-1] - pd.DateOffset(months=6)]
self.stats['six_month'] = (daily_price[-1] / denominator[-1]) - 1 if len(denominator) > 0 else np.nan
# Stats with yearly prices
yr = calc_returns(yearly_price)
if len(yr) < 2:
return self
self.stats['ytd'] = (daily_price[-1] / yearly_price[-2]) - 1.0
denominator = daily_price[:daily_price.index[-1] - pd.DateOffset(years=1)]
self.stats['one_year'] = (daily_price[-1] / denominator[-1]) - 1 if len(denominator) > 0 else np.nan
self.stats['yearly_mean'] = yr.mean()
self.stats['yearly_vol'] = yr.std()
self.stats['yearly_sharpe'] = (self.stats['yearly_mean'] - yearly_risk_free_return) / self.stats['yearly_vol']
self.stats['best_year'] = yr.ix[yr.idxmax():yr.idxmax()]
self.stats['worst_year'] = yr.ix[yr.idxmin():yr.idxmin()]
# Annualize stat for over 1 year
self.stats['three_year'] = calc_cagr(daily_price[daily_price.index[-1] - pd.DateOffset(years=3):])
self.stats['win_year_perc'] = len(yr[yr > 0]) / float(len(yr) - 1.0)
self.stats['twelve_month_win_perc'] = (monthly_price.pct_change(11) > 0).sum() / float(len(monthly_price) - (MONTHS_IN_YEAR - 1.0))
if len(yr) < 4:
return self
self.stats['yearly_skew'] = yr.skew()
self.stats['yearly_kurt'] = yr.kurt() if len(yr[(~np.isnan(yr)) & (yr != 0)]) > 0 else np.nan
self.stats['five_year'] = calc_cagr(daily_price[daily_price.index[-1] - pd.DateOffset(years=5):])
self.stats['ten_year'] = calc_cagr(daily_price[daily_price.index[-1] - pd.DateOffset(years=10):])
return self
# pylint: enable=too-many-statements
def display_stats(self):
"""
Display talbe of stats.
"""
stats = [
('start', 'Start', 'dt'),
('end', 'End', 'dt'),
('yearly_risk_free_return', 'Risk-free rate', 'p'),
(None, None, None),
('total_return', 'Total Return', 'p'),
('daily_sharpe', 'Daily Sharpe', 'n'),
('cagr', 'CAGR', 'p'),
('max_drawdown', 'Max Drawdown', 'p'),
('market_cap', 'Market Cap', 't'),
(None, None, None),
('mtd', 'MTD', 'p'),
('three_month', '3m', 'p'),
('six_month', '6m', 'p'),
('ytd', 'YTD', 'p'),
('one_year', '1Y', 'p'),
('three_year', '3Y (ann.)', 'p'),
('five_year', '5Y (ann.)', 'p'),
('ten_year', '10Y (ann.)', 'p'),
('incep', 'Since Incep. (ann.)', 'p'),
(None, None, None),
('daily_sharpe', 'Daily Sharpe', 'n'),
('daily_mean', 'Daily Mean (ann.)', 'p'),
('daily_vol', 'Daily Vol (ann.)', 'p'),
('daily_skew', 'Daily Skew', 'n'),
('daily_kurt', 'Daily Kurt', 'n'),
('best_day', 'Best Day', 'pp'),
('worst_day', 'Worst Day', 'pp'),
(None, None, None),
('monthly_sharpe', 'Monthly Sharpe', 'n'),
('monthly_mean', 'Monthly Mean (ann.)', 'p'),
('monthly_vol', 'Monthly Vol (ann.)', 'p'),
('monthly_skew', 'Monthly Skew', 'n'),
('monthly_kurt', 'Monthly Kurt', 'n'),
('best_month', 'Best Month', 'pp'),
('worst_month', 'Worst Month', 'pp'),
(None, None, None),
('yearly_sharpe', 'Yearly Sharpe', 'n'),
('yearly_mean', 'Yearly Mean', 'p'),
('yearly_vol', 'Yearly Vol', 'p'),
('yearly_skew', 'Yearly Skew', 'n'),
('yearly_kurt', 'Yearly Kurt', 'n'),
('best_year', 'Best Year', 'pp'),
('worst_year', 'Worst Year', 'pp'),
(None, None, None),
('avg_drawdown', 'Avg. Drawdown', 'p'),
('avg_drawdown_days', 'Avg. Drawdown Days', 'n'),
('avg_up_month', 'Avg. Up Month', 'p'),
('avg_down_month', 'Avg. Down Month', 'p'),
('win_year_perc', 'Win Year %', 'p'),
('twelve_month_win_perc', 'Win 12m %', 'p')
]
data = []
first_row = ['Stat']
first_row.extend([self.stats['name']])
data.append(first_row)
for k, n, f in stats:
# Blank row
if k is None:
row = [''] * len(data[0])
data.append(row)
continue
row = [n]
raw = self.stats[k]
if f is None:
row.append(raw)
elif f == 'p':
row.append(fmtp(raw))
elif f == 'n':
row.append(fmtn(raw))
elif f == 't':
row.append(fmttn(raw))
elif f == 'pp':
row.append(fmtp(raw[0]))
elif f == 'dt':
row.append(raw.strftime('%Y-%m-%d'))
else:
print 'bad'
data.append(row)
print tabulate.tabulate(data, headers='firstrow')
return self
def summary(self):
"""
Displays summary of Asset.
"""
print 'Summary of %s from %s to %s' % (self.stats['name'], self.stats['start'], self.stats['end'])
print 'Annual risk-free rate considered: %s' %(fmtp(self.stats['yearly_risk_free_return']))
print '\nSummary:'
data = [[fmtp(self.stats['total_return']), fmtn(self.stats['daily_sharpe']),
fmtp(self.stats['cagr']), fmtp(self.stats['max_drawdown']), fmttn(self.stats['market_cap'])]]
print tabulate.tabulate(data, headers=['Total Return', 'Sharpe', 'CAGR', 'Max Drawdown', 'Market Cap'])
print '\nAnnualized Returns:'
data = [[fmtp(self.stats['mtd']), fmtp(self.stats['three_month']), fmtp(self.stats['six_month']),
fmtp(self.stats['ytd']), fmtp(self.stats['one_year']), fmtp(self.stats['three_year']),
fmtp(self.stats['five_year']), fmtp(self.stats['ten_year']),
fmtp(self.stats['incep'])]]
print tabulate.tabulate(data, headers=['MTD', '3M', '6M', 'YTD', '1Y', '3Y', '5Y', '10Y', 'Incep.'])
print '\nPeriodic Returns:'
data = [
['sharpe', fmtn(self.stats['daily_sharpe']), fmtn(self.stats['monthly_sharpe']), fmtn(self.stats['yearly_sharpe'])],
['mean', fmtp(self.stats['daily_mean']), fmtp(self.stats['monthly_mean']), fmtp(self.stats['yearly_mean'])],
['vol', fmtp(self.stats['daily_vol']), fmtp(self.stats['monthly_vol']), fmtp(self.stats['yearly_vol'])],
['skew', fmtn(self.stats['daily_skew']), fmtn(self.stats['monthly_skew']), fmtn(self.stats['yearly_skew'])],
['kurt', fmtn(self.stats['daily_kurt']), fmtn(self.stats['monthly_kurt']), fmtn(self.stats['yearly_kurt'])],
['best price', fmtp(self.stats['best_day'][0]), fmtp(self.stats['best_month'][0]), fmtp(self.stats['best_year'][0])],
['best time', self.stats['best_day'].index[0].strftime('%Y-%m-%d'), self.stats['best_month'].index[0].strftime('%Y-%m-%d'), \
self.stats['best_year'].index[0].strftime('%Y-%m-%d')],
['worst price', fmtp(self.stats['worst_day'][0]), fmtp(self.stats['worst_month'][0]), fmtp(self.stats['worst_year'][0])],
['worst time', self.stats['worst_day'].index[0].strftime('%Y-%m-%d'), self.stats['worst_month'].index[0].strftime('%Y-%m-%d'), \
self.stats['worst_year'].index[0].strftime('%Y-%m-%d')]
]
print tabulate.tabulate(data, headers=['daily', 'monthly', 'yearly'])
print '\nDrawdowns:'
data = [
[fmtp(self.stats['max_drawdown']), fmtp(self.stats['avg_drawdown']),
fmtn(self.stats['avg_drawdown_days'])]]
print tabulate.tabulate(data, headers=['max', 'avg', '# days'])
print '\nMisc:'
data = [['avg. up month', fmtp(self.stats['avg_up_month'])],
['avg. down month', fmtp(self.stats['avg_down_month'])],
['up year %', fmtp(self.stats['win_year_perc'])],
['12m up %', fmtp(self.stats['twelve_month_win_perc'])]]
print tabulate.tabulate(data)
return self
# Class Helper Functions
# --------------------------------------------------------------------------------------------------------------------------
def describe(self):
"""
Wrapper for pandas describe().
"""
self.data.describe()
def time_range(self, start=None, end=datetime.date.today(), freq='B'):
"""
Return a specific time range of the Asset.
"""
if isinstance(start, datetime.date) and isinstance(end, datetime.date):
date_range = pd.date_range(start, end, freq=freq)
else:
date_range = pd.date_range(end - datetime.timedelta(days=start), periods=start, freq=freq)
return Asset(self.symbol, self.data.loc[date_range])
def plot(self):
"""
Wrapper for pandas plot().
"""
plt.figure()
self.data[['Open', 'Close', 'High', 'Low']].plot(figsize=(16, 4), title='{} OCHL Price'.format(self.symbol.upper()))
plt.figure()
self.data[['Volume']].plot(figsize=(16, 4), title='{} Volume'.format(self.symbol.upper()))
plt.figure()
ax3 = (100.0 * self.returns()).hist(figsize=(16, 4), bins=100, normed=1)
(100.0 * self.returns()).plot(kind='kde', ax=ax3)
ax3.set_title('{} Daily Return Distribution'.format(self.symbol.upper()))
plt.figure()
ax4 = (100.0 * self.returns(freq='M')).hist(figsize=(16, 4), bins=100, normed=1)
(100.0 * self.returns(freq='M')).plot(kind='kde', ax=ax4)
ax4.set_title('{} Monthly Return Distribution'.format(self.symbol.upper()))
# Bring underlying data to class properties
# --------------------------------------------------------------------------------------------------------------------------
@property
def number_of_days(self):
"""
Return total number of days in price data.
"""
return len(self.close)
@property
def close(self):
"""
Return closing price of asset.
"""
return self.data['Close']
@property
def c(self):
"""
Return closing price of asset.
"""
return self.close
@property
def adj_close(self):
"""
Return adjusted closing price of asset.
"""
return self.data['Adj_Close']
@property
def ac(self):
"""
Return adjusted closing price of asset.
"""
return self.adj_close
@property
def open(self):
"""
Return opening price of asset.
"""
return self.data['Open']
@property
def o(self):
"""
Return opening price of asset.
"""
return self.open
@property
def high(self):
"""
Return high price of asset.
"""
return self.data['High']
@property
def h(self):
"""
Return high price of asset.
"""
return self.high
@property
def low(self):
"""
Return low price of asset.
"""
return self.data['Low']
@property
def l(self):
"""
Return low price of asset.
"""
return self.low
@property
def volume(self):
"""
Return volume of asset.
"""
return self.data['Volume']
@property
def v(self):
"""
Return volume of asset.
"""
return self.volume
# Common Price Transformations
# --------------------------------------------------------------------------------------------------------------------------
def money_flow(self):
"""
Calculate money flow.
(close - low) - (high - close)
money flow = ------------------------------
(high - low)
"""
return ((self.close - self.low) - (self.high - self.close)) / (self.high - self.low)
def money_flow_volume(self):
"""
Calculate money flow volume.
money flow volume = money flow * volume
"""
return self.money_flow() * self.volume
def typical_price(self):
"""
Calculate typical price.
(high + low + close)
typical price = --------------------
3
"""
return (self.high + self.low + self.close) / 3.0
def close_to_open_range(self):
"""
Calculate close to open range.
close to open range = open - last close
"""
return self.open - self.close.shift(1)
def quadrant_range(self):
"""
Calculate quandrant range.
l_i = i * (high - low) / 4, for i = [1, 4]
"""
size = self.high_low_spread() / 4.0
l1 = self.low
l2 = l1 + size
l3 = l2 + size
l4 = l3 + size
l5 = l4 + size
return pd.DataFrame({'1': l1, '2': l2, '3': l3, '4': l4, '5': l5})
def true_range(self):
"""
Calculate true range.
true range = high - last low
"""
return self.high - self.low.shift(1)
def high_low_spread(self):
"""
Calculate high low spread.
high low spread = high - low
"""
return self.high - self.low
def rate_of_change(self, n=20):
"""
Calculate rate of change.
close - last close
rate of change = 100 * ------------------
last close
"""
return 100.0 * (self.close - self.close.shift(n)) / self.close.shift(n)
def roc(self, n=20):
"""
Calculate rate of change.
close - last close
rate of change = 100 * ------------------
last close
"""
return self.rate_of_change(n)
def drawdown(self):
"""
Calucate the drawdown from the highest high.
"""
# Don't change original data
draw_down = self.close.copy()
# Fill missing data
draw_down = draw_down.ffill()
# Ignore initial NaNs
draw_down[np.isnan(draw_down)] = -np.Inf
# Get highest high
highest_high = draw_down.expanding().max()
draw_down = (draw_down / highest_high) - 1.0
return draw_down
def drawdown_info(self):
"""
Return table of drawdown data.
"""
drawdown = self.drawdown()
is_zero = drawdown == 0
# Find start and end time
start = ~is_zero & is_zero.shift(1)
start = list(start[start].index)
end = is_zero & (~is_zero).shift(1)
end = list(end[end].index)
# Handle no ending
if len(end) is 0:
end.append(drawdown.index[-1])
# Handle startingin drawdown
if start[0] > end[0]:
start.insert(0, drawdown.index[0])
# Handle finishing with drawdown
if start[-1] > end[-1]:
end.append(drawdown.index[-1])
info = pd.DataFrame({
'start': start,
'end' : end,
'days' : [(e - s).days for s, e in zip(start, end)],
'drawdown':[drawdown[s:e].min() for s, e in zip(start, end)]
})
return info
# Overlays
# --------------------------------------------------------------------------------------------------------------------------
def bollinger_bands(self, n=20, k=2):
"""
Calculate Bollinger Bands.
"""
ma = pd.rolling_mean(self.close, n)
ub = ma + k * pd.rolling_std(self.close, n)
lb = ma - k * pd.rolling_std(self.close, n)
return pd.DataFrame({'ub': ub, 'mb': ma, 'lb': lb})
def chandelier_exit(self, n=22, k=3):
"""
Calculate Chandelier Exit.
"""
atr = self.atr(n)
n_day_high = pd.rolling_max(self.high, n)
n_day_low = pd.rolling_min(self.low, n)
chdlr_exit_long = n_day_high - k * atr
chdlr_exit_short = n_day_low - k * atr
return pd.DataFrame({'long': chdlr_exit_long, 'short': chdlr_exit_short})
def ichimoku_clouds(self, n1=9, n2=26, n3=52):
"""
Calculate Ichimoku Clouds.
"""
high = self.high
low = self.low
conversion = (pd.rolling_max(high, n1) + pd.rolling_min(low, n1)) / 2.0
base = (pd.rolling_max(high, n2) + pd.rolling_min(low, n2)) / 2.0
leading_a = (conversion + base) / 2.0
leading_b = (pd.rolling_max(high, n3) + pd.rolling_min(low, n3)) / 2.0
lagging = self.close.shift(-n2)
return pd.DataFrame({'conversion' : conversion, 'base': base, 'leadA': leading_a, 'leadB': leading_b, 'lag': lagging})
def keltner_channels(self, n=20, natr=10):
"""
Calculate Keltner Channels.
"""
atr = self.atr(natr)
ml = ema(self.close, n)
ul = ml + 2.0 * atr
ll = ml - 2.0 * atr
return pd.DataFrame({'ul': ul, 'ml': ml, 'll': ll})
def moving_average_envelopes(self, n=20, k=0.025):
"""
Calculate Moving Average Envelopes.
"""
close = self.close
ma = sma(close, n)
uma = ma + (k * ma)
lma = ma - (k * ma)
return pd.DataFrame({'uma': uma, 'ma': ma, 'lma': lma})
def parabolic_sar(self, step_r=0.02, step_f=0.02, max_af_r=0.2, max_af_f=0.2):
"""
Calculate Parabolic SAR.
"""
high = self.high
low = self.low
r_sar = pd.TimeSeries(np.zeros(len(high)), index=high.index)
f_sar = pd.TimeSeries(np.zeros(len(high)), index=high.index)
ep = high[0]
af = step_r
sar = low[0]
up = True
for i in range(1, len(high)):
if up:
# Rising SAR
ep = np.max([ep, high[i]])
af = np.min([af + step_r if (ep == high[i]) else af, max_af_r])
sar = sar + af * (ep - sar)
r_sar[i] = sar
else:
# Falling SAR
ep = np.min([ep, low[i]])
af = np.min([af + step_f if (ep == low[i]) else af, max_af_f])
sar = sar + af * (ep - sar)
f_sar[i] = sar
# Trend switch
if up and (sar > low[i] or sar > high[i]):
up = False
sar = ep
af = step_f
elif not up and (sar < low[i] or sar < high[i]):
up = True
sar = ep
af = step_r
return pd.DataFrame({'rising' : r_sar, 'falling': f_sar})
def pivot_point(self):
"""
Calculate pivot point
"""
p = self.typical_price()
hl = self.high_low_spread()
s1 = (2.0 * p) - self.high
s2 = p - hl
r1 = (2.0 * p) - self.low
r2 = p + hl
return pd.DataFrame({'p': p, 's1': s1, 's2': s2, 'r1': r1, 'r2': r2})
def fibonacci_pivot_point(self):
"""
Calculate Fibonacci Pivot Point.
"""
p = self.typical_price()
hl = self.high_low_spread()
s1 = p - 0.382 * hl
s2 = p - 0.618 * hl
s3 = p - 1.0 * hl
r1 = p + 0.382 * hl
r2 = p + 0.618 * hl
r3 = p + 1.0 * hl
return pd.DataFrame({'p': p, 's1': s1, 's2': s2, 's3': s3, 'r1': r1, 'r2': r2, 'r3': r3})
def demark_pivot_point(self):
"""
Calculate Demark Pivot Point.
"""
h_l_c = self.close < self.open
h_lc = self.close > self.open
hl_c = self.close == self.open
p = np.zeros(len(self.close))
p[h_l_c] = self.high[h_l_c] + 2.0 * self.low[h_l_c] + self.close[h_l_c]
p[h_lc] = 2.0 * self.high[h_lc] + self.low[h_lc] + self.close[h_lc]
p[hl_c] = self.high[hl_c] + self.low[hl_c] + 2.0 * self.close[hl_c]
s1 = p / 2.0 - self.high
r1 = p / 2.0 - self.low
p = p / 4.0
return pd.DataFrame({'p': p, 's1': s1, 'r1': r1})
def price_channel(self, n=20):
"""
Calculate Price Channel.
"""
n_day_high = pd.rolling_max(self.high, n)
n_day_low = pd.rolling_min(self.low, n)
center = (n_day_high + n_day_low) / 2.0
return pd.DataFrame({'high': n_day_high, 'low': n_day_low, 'center': center})
def volume_by_price(self, n=14, block_num=12):
"""
Calculate Volume by Price.
"""
close = self.close
volume = self.volume
nday_closing_high = pd.rolling_max(close, n).bfill()
nday_closing_low = pd.rolling_min(close, n).bfill()
# Compute price blocks: rolling high low range in block number steps
price_blocks = pd.SpareSeries()
for low, high, in zip(nday_closing_low, nday_closing_high):
price_blocks = price_blocks.append(pd.DataFrame(np.linspace(low, high, block_num)).T)
price_blocks = price_blocks.set_index(close.index)
# Find correct block for each price, then tally that days volume
volume_by_price = pd.DataFrame(np.zeros((close.shape[0], block_num)))
for j in range(n-1, close.shape[0]):
for i, c in enumerate(close[j-(n-1):j+1]):
block = (price_blocks.iloc[i, :] <= c).sum() - 1.0
block = 0 if block < 0 else block
volume_by_price.iloc[j, block] = volume[i] + volume_by_price.iloc[j, block]
volume_by_price = volume_by_price.set_index(close.index)
return volume_by_price
def volume_weighted_average_price(self):
"""
Calculate Volume Weighted Average Price (VWAP)."""
tp = self.typical_price()
return (tp * self.volume).cumsum() / self.volume.cumsum()
def vwap(self):
"""Alias for volume_weighted_average_price()."""
return self.volume_weighted_average_price()
def zigzag(self, percent=7.0):
"""
Calculate Zigzag.
"""
x = self.close
zigzag = pd.TimeSeries(np.zeros(self.number_of_days), index=x.index)
lastzig = x[0]
zigzag[0] = x[0]
for i in range(1, self.number_of_days):
if np.abs((lastzig - x[i]) / x[i]) > percent / 100.0:
zigzag[i] = x[i]
lastzig = x[i]
else:
zigzag[i] = None
return pd.Series.interpolate(zigzag)
# Indicators
# --------------------------------------------------------------------------------------------------------------------------
def accumulation_distribution_line(self):
"""
Calculate Aaccumulation Distribution Line (ADL).
"""
return self.money_flow_volume().cumsum()
def adl(self):
"""
Alias for accumulation_distribution_line().
"""
return self.accumulation_distribution_line()
def aroon(self, n=25):
"""
Calculate aroon.
"""
high = self.high
n_day_high = pd.rolling_max(high, n, 0)
highs = high[high == n_day_high]
time_since_last_max = (highs.index.values[1:] - highs.index.values[0:-1]).astype('timedelta64[D]').astype(int)
day_b4_high = (high == n_day_high).shift(-1).fillna(False)
days_since_high = pd.TimeSeries(np.nan + np.ones(len(high)), index=high.index)
days_since_high[day_b4_high] = time_since_last_max
days_since_high[high == n_day_high] = 0.0
days_since_high = days_since_high.interpolate('time').astype(int).clip_upper(n)
low = self.low
n_day_low = pd.rolling_min(low, n, 0)
lows = low[low == n_day_low]
time_since_last_min = (lows.index.values[1:] - lows.index.values[0:-1]).astype('timedelta64[D]').astype(int)
day_b4_low = (low == n_day_low).shift(-1).fillna(False)
days_since_low = pd.TimeSeries(np.nan + np.ones(len(low)), index=low.index)
days_since_low[day_b4_low] = time_since_last_min
days_since_low[low == n_day_low] = 0.0
days_since_low = days_since_low.interpolate('time').astype(int).clip_upper(n)
aroon_up = 100.0 * ((n - days_since_high) / n)
aroon_dn = 100.0 * ((n - days_since_low) / n)
aroon_osc = aroon_up - aroon_dn
return pd.DataFrame({'up': aroon_up, 'down': aroon_dn, 'oscillator': aroon_osc})
def average_directional_index(self, n=14):
"""
Calculate Average Directional Index (ADX).
"""
tr = self.true_range()
pdm = pd.TimeSeries(np.zeros(len(tr)), index=tr.index)
ndm = pd.TimeSeries(np.zeros(len(tr)), index=tr.index)
pdm[(self.high - self.high.shift(1)) > (self.low.shift(1) - self.low)] = (self.high - self.high.shift(1))
ndm[(self.low.shift(1) - self.low) > (self.high - self.high.shift(1))] = (self.low.shift(1) - self.low)
trn = ema(tr, n)
pdmn = ema(pdm, n)
ndmn = ema(ndm, n)
pdin = pdmn / trn
ndin = ndmn / trn
dx = ((pdin - ndin) / (pdin + ndin)).abs()
adx = ((n-1) * dx.shift(1) + dx) / n
return adx
def adx(self, n=14):
"""
Alias for average_directional_index().
"""
return self.average_directional_index(n)
def average_true_range(self, n=14):
"""
Calculate Average True Range.
!!!!!this is not a 100% correct - redo!!!!!
"""
tr = self.true_range()
return ((n-1) * tr.shift(1) + tr) / n
def atr(self, n=14):
"""
Alias foraverage_true_range().
!!!!!this is not a 100% correct - redo!!!!!
"""
return self.average_true_range(n)
def bandwidth(self, n=20, k=2):
"""
Calculate Bandwidth.
"""
bb = self.bollinger_bands(n, k)
return (bb['ub'] - bb['lb']) / bb['mb']
def percent_b(self, n=20, k=2):
"""
Calculate Percent B.
"""
bb = self.bollinger_bands(n, k)
return (self.close.shift(1) - bb['lb']) / (bb['ub'] - bb['lb'])
def commodity_channel_index(self, n=20):
"""
Calculate Commodity Channel Index (CCI).
"""
tp = self.typical_price()
return (tp - pd.rolling_mean(tp, n)) / (0.015 * pd.rolling_std(tp, n))
def cci(self, n=20):
"""
Alias for commodity_channel_index().
"""
return self.commodity_channel_index(n)
def coppock_curve(self, n1=10, n2=14, n3=11):
"""
Calculate Coppock Curve.
!!!!!fix!!!!!
"""
window = range(n1)
return pd.rolling_window(self.roc(n2), window) + self.roc(n3)
def chaikin_money_flow(self, n=20):
"""
Calculate Chaikin Money Flow.
"""
return pd.rolling_sum((self.money_flow_volume()), n) / pd.rolling_sum(self.volume, n)
def cmf(self, n=20):
"""Alias for chaikin_money_flow()."""
return self.chaikin_money_flow(n)
def chaikin_oscillator(self, n1=3, n2=10):
"""
Calculate Chaikin Oscillator.
"""
return ema(self.adl(), n1) - ema(self.adl(), n2)
def price_momentum_oscillator(self, n1=20, n2=35, n3=10):
"""
Calculate Price Momentum Oscillator (PMO).
"""
pmo = ema(10 * ema((100 * (self.close / self.close.shift(1))) - 100.0, n2), n1)
signal = ema(pmo, n3)
return pd.DataFrame({'pmo': pmo, 'signal': signal})
def pmo(self, n1=20, n2=35, n3=10):
"""
Alias for price_momentum_oscillator().
"""
return self.price_momentum_oscillator(n1, n2, n3)
def detrended_price_oscillator(self, n=20):
"""
Calculate Detrended Price Oscillator (DPO).
"""
return self.close.shift(int(n / 2.0 + 1.0)) - sma(self.close, n)
def dpo(self, n=20):
"""
Alias for detrended_price_oscillator().
"""
return self.detrended_price_oscillator(n)
def ease_of_movement(self, n=14):
"""
Calculate Ease Of Movement.
"""
high_low_avg = (self.high + self.low) / 2.0
distance_moved = high_low_avg - high_low_avg.shift(1)
box_ratio = (self.volume / 100000000.0) / (self.high - self.low)
emv = distance_moved / box_ratio
return sma(emv, n)
def force_index(self, n=13):
"""
Calculate Force Index.
"""
force_index = self.close - self.close.shift(1) * self.volume
return ema(force_index, n)
def know_sure_thing(self, n_sig=9):
"""
Calculate Know Sure Thing.
"""
rcma1 = sma(self.roc(10), 10)
rcma2 = sma(self.roc(15), 10)
rcma3 = sma(self.roc(20), 10)
rcma4 = sma(self.roc(30), 15)
kst = rcma1 + 2.0 * rcma2 + 3.0 * rcma3 + 4.0 * rcma4
kst_signal = sma(kst, n_sig)
return pd.DataFrame({'kst': kst, 'signal': kst_signal})
def kst(self, n_sig=9):
"""
Alias for know_sure_thing().
"""
return self.know_sure_thing(n_sig)
def mass_index(self, n1=9, n2=25):
"""
Calculate Mass Index.
"""
ema1 = ema(self.high_low_spread(), n1)
ema2 = ema(ema1, n1)
ema_ratio = ema1 / ema2
return pd.rolling_sum(ema_ratio, n2)
def moving_avg_converge_diverge(self, sn=26, fn=12, n_sig=9):
"""
Calculate moving avgerage convergence divergence (MACD).
"""
macd = ema(self.close, fn) - ema(self.close, sn)
macd_signal = ema(macd, n_sig)
macd_hist = macd - macd_signal
return pd.DataFrame({'macd': macd, 'signal': macd_signal, 'hist': macd_hist})
def macd(self, sn=26, fn=12, n_sig=9):
"""
Alias for moving_avg_converge_diverge().
"""
return self.moving_avg_converge_diverge(sn, fn, n_sig)
def money_flow_index(self, n=14):
"""
Calculate Money Flow Index.
"""
tp = self.typical_price()
rmf = tp * self.volume
pmf = rmf.copy()
nmf = rmf.copy()
pmf[pmf < 0] = 0.0
nmf[nmf > 0] = 0.0
mfr = pd.rolling_sum(pmf, n) / pd.rolling_sum(nmf, n)
return 100.0 - (100.0 / (1.0 + mfr))
def negative_volume_index(self, n=255):
"""
Calculate Negative Volume Index.
"""
pct_change = self.returns().cumsum()
# forward fill when volumes increase with last percent change of a volume decrease day
pct_change[self.volume > self.volume.shift(1)] = None
pct_change = pct_change.ffill()
nvi = 1000.0 + pct_change
nvi_signal = ema(nvi, n)
return pd.DataFrame({'nvi': nvi, 'signal': nvi_signal})
def nvi(self, n=255):
"""
Alias for negative_volume_index().
"""
return self.negative_volume_index(n)
def on_balance_volume(self):
"""
Calculate On Balance Volume.
"""
p_obv = self.volume.astype(float)
n_obv = (-1.0 * p_obv.copy())
p_obv[self.close < self.close.shift(1)] = 0.0
n_obv[self.close > self.close.shift(1)] = 0.0
p_obv[self.close == self.close.shift(1)] = None
n_obv[self.close == self.close.shift(1)] = None
obv = p_obv + n_obv
return obv.ffill().cumsum()
def obv(self):
"""
Alias for on_balance_volume().
"""
return self.on_balance_volume
def percentage_price_oscillator(self, n1=12, n2=26, n3=9):
"""
Calculate Percentage Price Oscillator.
"""
ppo = 100.0 * (ema(self.close, n1) - ema(self.close, n2)) / ema(self.close, n2)
ppo_signal = ema(ppo, n3)
ppo_hist = ppo - ppo_signal
return pd.DataFrame({'ppo': ppo, 'signal': ppo_signal, 'hist': ppo_hist})
def ppo(self, n1=12, n2=26, n3=9):
"""
Alias for percentage_price_oscillator().
"""
return self.percentage_price_oscillator(n1, n2, n3)
def percentage_volume_oscillator(self, n1=12, n2=26, n3=9):
"""
Calculate Percentage Volume Oscillator.
"""
pvo = 100.0 * (ema(self.volume, n1) - ema(self.volume, n2)) / ema(self.volume, n2)
pvo_signal = ema(pvo, n3)
pvo_hist = pvo - pvo_signal
return pd.DataFrame({'pvo': pvo, 'signal': pvo_signal, 'hist': pvo_hist})
def pvo(self, n1=12, n2=26, n3=9):
"""
Alias percentage_volume_oscillator().
"""
return self.percentage_volume_oscillator(n1, n2, n3)
def relative_strength_index(self, n=14):
"""
Calculate Relative Strength Index.
"""
change = self.close - self.close.shift(1)
gain = change.copy()
loss = change.copy()
gain[gain < 0] = 0.0
loss[loss > 0] = 0.0
loss = -1.0 * loss
avg_gain = pd.TimeSeries(np.zeros(len(gain)), index=change.index)
avg_loss = pd.TimeSeries(np.zeros(len(loss)), index=change.index)
avg_gain[n] = gain[0:n].sum() / n
avg_loss[n] = loss[0:n].sum() / n
for i in range(n+1, len(gain)):
avg_gain[i] = (n-1) * (avg_gain[i-1] / n) + (gain[i] / n)
avg_loss[i] = (n-1) * (avg_loss[i-1] / n) + (loss[i] / n)
rs = avg_gain / avg_loss
return 100.0 - (100.0 / (1.0 + rs))
def rsi(self, n=14):
"""
Alias for relative_strength_index().
"""
return self.relative_strength_index(n)
def stock_charts_tech_ranks(self, n=None, w=None):
"""
Calculate Stock Charts Tech Ranks/
"""
n = n if n else RANK_DAYS_IN_TRADING_YEAR
w = w if w else RANK_PERCENTS
close = self.close
long_ma = 100.0 * (1 - close / ema(close, n[0]))
long_roc = self.roc(n[1])
medium_ma = 100.0 * (1.0 - close / ema(close, n[2]))
medium_roc = self.roc(n[3])
ppo = self.ppo()
short_ppo_m = 100.0 * ((ppo['hist'] - ppo['hist'].shift(n[4])) / n[4]) / 2.0
short_rsi = self.rsi(n[5])
return w[0] * long_ma + w[1] * long_roc + w[2] * medium_ma + w[3] * medium_roc + w[4] * short_ppo_m + w[5] * short_rsi
def sctr(self, n=None, w=None):
"""
Alias for stock_charts_tech_ranks().
"""
return self.stock_charts_tech_ranks(n, w)
def slope(self):
"""
Calculate slope.
"""
close = self.close
return pd.TimeSeries(np.zeros(len(close)), index=close.index)
def volatility(self, n=20):
"""
Calculate volatility.
"""
return pd.rolling_std(self.close, n)
def stochastic_oscillator(self, n=20, n1=3):
"""
Calculate Stochastic Oscillator.
"""
n_day_high = pd.rolling_max(self.high, n)
n_day_low = pd.rolling_min(self.low, n)
percent_k = 100.0 * (self.close - n_day_low) / (n_day_high - n_day_low)
percent_d = sma(percent_k, n1)
return pd.DataFrame({'k': percent_k, 'd': percent_d})
def stochastic_rsi(self, n=20):
"""
Calculate Stochastic RSI.
"""
rsi = self.rsi(n)
high_rsi = pd.rolling_max(rsi, n)
low_rsi = pd.rolling_min(rsi, n)
return (rsi - low_rsi) / (high_rsi - low_rsi)
def trix(self, n=15):
"""
Calculate TRIX.
"""
ema1 = ema(self.close, n)
ema2 = ema(ema1, n)
ema3 = ema(ema2, n)
return ema3.pct_change()
def true_strength_index(self, n1=25, n2=13):
"""
Calculate True Strength Index.
"""
pc = self.close - self.close.shift(1)
ema1 = ema(pc, n1)
ema2 = ema(ema1, n2)
abs_pc = (self.close - self.close.shift(1)).abs()
abs_ema1 = ema(abs_pc, n1)
abs_ema2 = ema(abs_ema1, n2)
return 100.0 * ema2 / abs_ema2
def tsi(self, n1=25, n2=13):
"""
Alias for true_strength_index().
"""
return self.true_strength_index(n1, n2)
def ulcer_index(self, n=14):
"""
Calculate Ulcer Index.
"""
percent_draw_down = 100.0 * (self.close - pd.rolling_max(self.close, n)) / pd.rolling_max(self.close, n)
return np.sqrt(pd.rolling_sum(percent_draw_down * percent_draw_down, n) / n)
def ultimate_oscillator(self, n1=7, n2=14, n3=28):
"""
Calculate Ultimate Oscillator.
"""
bp = self.close - pd.DataFrame([self.low, self.close.shift(1)]).min()
hc_max = pd.DataFrame({'a': self.high, 'b': self.close.shift(1)}, index=bp.index).max(1)
lc_min = pd.DataFrame({'a': self.low, 'b': self.close.shift(1)}, index=bp.index).min(1)
tr = hc_max - lc_min
a1 = pd.rolling_sum(bp, n1) / pd.rolling_sum(tr, n1)
a2 = pd.rolling_sum(bp, n2) / pd.rolling_sum(tr, n2)
a3 = pd.rolling_sum(bp, n3) / pd.rolling_sum(tr, n3)
return 100.0 * (4.0 * a1 + 2.0 * a2 + a3) / (4.0 + 2.0 + 1.0)
def vortex(self, n=14):
"""
Calculate Vortex.
"""
pvm = self.high - self.low.shift(1)
nvm = self.low - self.high.shift(1)
pvm14 = pd.rolling_sum(pvm, n)
nvm14 = pd.rolling_sum(nvm, n)
hc_abs = (self.high - self.close.shift(1)).abs()
lc_abs = (self.low - self.close.shift(1)).abs()
tr = pd.DataFrame({'a': self.high_low_spread(), 'b': hc_abs, 'c': lc_abs}, index=pvm.index).max(1)
tr14 = pd.rolling_sum(tr, n)
pvi14 = pvm14 / tr14
nvi14 = nvm14 / tr14
return pd.DataFrame({'+': pvi14, '-': nvi14})
def william_percent_r(self, n=14):
"""
Calculate William Percent R.
"""
high_max = pd.rolling_max(self.high, n)
low_min = pd.rolling_min(self.low, n)
return -100.0 * (high_max - self.close) / (high_max - low_min)
# Charting
# --------------------------------------------------------------------------------------------------------------------------
def gaps(self):
"""
Calculate gaps.
"""
o = self.open
c = self.close
c2o = self.close_to_open_range()
gap = pd.TimeSeries(np.zeros(len(c)), index=c.index)
gap[o > c.shift()] = c2o
gap[o < c.shift()] = c2o
return gap
def speedlines(self, n=20):
"""
Calculate Speedlines.
"""
high = self.high
n_day_high = pd.rolling_max(high, n, 0)
highs = high[high == n_day_high]
time_since_last_max = (highs.index.values[1:] - highs.index.values[0:-1]).astype('timedelta64[D]').astype(int)
day_b4_high = (high == n_day_high).shift(-1).fillna(False)
days_since_high = pd.TimeSeries(np.nan + np.ones(len(high)), index=high.index)
days_since_high[day_b4_high] = time_since_last_max
days_since_high[high == n_day_high] = 0.0
days_since_high = days_since_high.interpolate('time').astype(int).clip_upper(n)
low = self.low
n_day_low = pd.rolling_min(low, n, 0)
lows = low[low == n_day_low]
time_since_last_min = (lows.index.values[1:] - lows.index.values[0:-1]).astype('timedelta64[D]').astype(int)
day_b4_low = (low == n_day_low).shift(-1).fillna(False)
days_since_low = pd.TimeSeries(np.nan + np.ones(len(low)), index=low.index)
days_since_low[day_b4_low] = time_since_last_min
days_since_low[low == n_day_low] = 0.0
days_since_low = days_since_low.interpolate('time').astype(int).clip_upper(n)
trend_length = (days_since_high - days_since_low)
trend = trend_length
days_behind = pd.TimeSeries(np.zeros(len(low)), index=low.index)
days_behind[trend > 0] = days_since_low
days_behind[trend < 0] = days_since_high
p = pd.TimeSeries(np.nan + np.zeros(len(low)), index=low.index)
p2_3 = pd.TimeSeries(np.nan + np.zeros(len(low)), index=low.index)
p1_3 = pd.TimeSeries(np.nan + np.zeros(len(low)), index=low.index)
base = pd.TimeSeries(np.nan + np.zeros(len(low)), index=low.index)
p[trend > 0] = n_day_low
p[trend < 0] = n_day_high
base[trend > 0] = n_day_high
base[trend < 0] = n_day_low
p2_3[trend > 0] = n_day_high - ((2.0 / 3.0) * (n_day_high - n_day_low))
p2_3[trend < 0] = n_day_low + ((2.0 / 3.0) * (n_day_high - n_day_low))
p1_3[trend > 0] = n_day_high - ((1.0 / 3.0) * (n_day_high - n_day_low))
p1_3[trend < 0] = n_day_low + ((1.0 / 3.0) * (n_day_high - n_day_low))
p = p.ffill()
base = base.ffill()
p2_3 = p2_3.ffill()
p1_3 = p1_3.ffill()
p_slope = pd.TimeSeries(np.nan + np.zeros(len(low)), index=low.index)
p2_3_slope = pd.TimeSeries(np.nan + np.zeros(len(low)), index=low.index)
p1_3_slope = pd.TimeSeries(np.nan + np.zeros(len(low)), index=low.index)
p_slope[trend > 0] = ((base - p) / (n + trend_length))
p_slope[trend < 0] = ((base - p) / (n - trend_length))
p2_3_slope[trend > 0] = ((base - p2_3) / (n + trend_length))
p2_3_slope[trend < 0] = ((base - p2_3) / (n - trend_length))
p1_3_slope[trend > 0] = ((base - p1_3) / (n + trend_length))
p1_3_slope[trend < 0] = ((base - p1_3) / (n - trend_length))
p_slope = p_slope.ffill()
p2_3_slope = p2_3_slope.ffill()
p1_3_slope = p1_3_slope.ffill()
p_now = p + (p_slope * days_behind)
# p2_3_now = p2_3 + (p2_3_slope * days_behind)
# p1_3_now = p1_3 + (p1_3_slope * days_behind)
return pd.DataFrame({'p': p_now, 'p2/3': p2_3, 'p1/3': p1_3})
# Return Asset Performance
# --------------------------------------------------------------------------------------------------------------------------
def returns(self, periods=1, freq=None):
"""
Calculate returns of asset over interval period and frequency offset freq string:
B business day frequency
C custom business day frequency (experimental)
D calendar day frequency
W weekly frequency
M month end frequency
BM business month end frequency
MS month start frequency
BMS business month start frequency
Q quarter end frequency
BQ business quarter endfrequency
QS quarter start frequency
BQS business quarter start frequency
A year end frequency
BA business year end frequency
AS year start frequency
BAS business year start frequency
H hourly frequency
T minutely frequency
S secondly frequency
L milliseonds
U microseconds
"""
return self.close.pct_change(periods=periods, freq=freq)
def price_returns(self, periods=1):
"""
Calculate price change over period.
"""
return (self.close - self.close.shift(periods)).fillna(0)
def arithmetic_return(self, periods=1, freq=None):
"""
Calculate arithmetic return.
"""
returns = self.returns(periods=periods, freq=freq).fillna(0)
return 100.0 * np.mean(returns)
def geometric_return(self, periods=1, freq=None):
"""
Calculate geometric return.
"""
returns = self.returns(periods=periods, freq=freq).fillna(0)
return 100.0 * (scipy.stats.gmean(1.0 + returns) - 1.0)
def rate_of_return(self, periods=DAYS_IN_TRADING_YEAR, freq=None):
"""
Calculate rate of return over time period freq, default to yearly (DAYS_IN_TRADING_YEAR days).
"""
returns = self.returns(periods=periods, freq=freq).fillna(0)
return returns / periods
def price_delta(self, start=None, end=None):
"""
Calculate returns between dates, defaults to total return.
"""
end = end if end else -1
start = start if start else 0
return self.close[end] - self.close[start]
def total_return(self, start=None, end=None):
"""
Calculate returns between dates, defaults to total return.
"""
start = start if start else 0
return 100.0 * self.price_delta(start=start, end=end) / self.close[start]
def return_on_investment(self, periods=DAYS_IN_TRADING_YEAR, freq=None):
"""
Calculate Return on Investment (ROI).
"""
pass
def roi(self, periods=DAYS_IN_TRADING_YEAR, freq=None):
"""
Alias for return_on_investment().
"""
return self.return_on_investment(periods=periods, freq=freq)
def compound_annual_growth_rate(self, start=None, end=None):
"""
Calculate Compound Annual Growth Rate (CAGR).
"""
end = end if end else -1
start = start if start else 0
enddate = self.close.index[end]
startdate = self.close.index[start]
years = (enddate - startdate).days / DAYS_IN_YEAR
return np.power((self.close[end] / self.close[start]), (1.0 / years)) - 1.0
def cagr(self, start=None, end=None):
"""
Alias for compound_annual_growth_rate().
"""
return self.compound_annual_growth_rate(start=start, end=end)
# Risk Performance
# --------------------------------------------------------------------------------------------------------------------------
def deviation_risk(self):
"""
Calculate Deviation Risk.
"""
return self.returns().std()
# Risk Adjusted Performance
# --------------------------------------------------------------------------------------------------------------------------
def risk_return_ratio(self):
"""
Calculate Sharpe Ratio w/o Risk-Free Rate.
"""
daily_ret = self.returns()
return np.sqrt(DAYS_IN_TRADING_YEAR) * daily_ret.mean() / daily_ret.std()
def information_ratio(self, benchmark):
"""
Caluculate the information ratio relative to a benchmark.
"""
return_delta = self.returns() - benchmark.returns()
return return_delta.mean() / return_delta.std()
# Market Comparisons
# --------------------------------------------------------------------------------------------------------------------------
def sharpe_ratio(self, market):
"""
Calcualte Sharpe Ratio against benchmark.
"""
return_delta = self.returns() - market.returns()
return return_delta.mean() / return_delta.std()
def annualized_sharpe_ratio(self, market, N=DAYS_IN_TRADING_YEAR):
"""
Calculate Annualized Sharpe Ratio against benchmark.
"""
return np.sqrt(N) * self.sharpe_ratio(market)
def equity_sharpe(self, market, risk_free_rate=RISK_FREE_RATE, N=DAYS_IN_TRADING_YEAR):
"""
Calculate the Equity sharpe against a benchmark and Risk-Free Rate.
"""
excess_returns = self.returns() - risk_free_rate / N
return_delta = excess_returns - market.returns()
return np.sqrt(N) * return_delta.mean() / return_delta.std()
def beta(self, market):
"""
Calcualte the Beta to a benchmark.
"""
cov = np.cov(self.close.returns(), market.close.returns())
return cov[0, 1] / cov[1, 1]
def alpha(self, market, risk_free_rate=RISK_FREE_RATE):
"""
Calculate the Alpha to a benchmark.
"""
return self.close.returns().mean() - risk_free_rate - self.beta(market) * (market.close.returns().mean() - risk_free_rate)
def r_squared(self, market, risk_free_rate=RISK_FREE_RATE):
"""
Calculate R-squared.
"""
eps_i = 0.0
r_i = self.alpha(market) + self.beta(market) * (market.close.returns() - risk_free_rate) + eps_i + risk_free_rate
cov = np.cov(self.close.returns(), market.close.returns())
ss_res = np.power(r_i - self.close.returns(), 2.0)
ss_tot = cov[0, 0] * (len(self.close.returns()) - 1.0)
return 1.0 - (ss_res / ss_tot)
# Package it all up...idk, used mostly to test there are no errors
# --------------------------------------------------------------------------------------------------------------------------
def all_indicators(self):
"""
Calculate all indicators for the asset.
"""
# Indicators that return multiple variables, seperated later
quadrant_range = self.quadrant_range()
bollinger_bands = self.bollinger_bands()
chandelier_exit = self.chandelier_exit()
ichimoku_clouds = self.ichimoku_clouds()
keltner_channels = self.keltner_channels()
moving_average_envelopes = self.moving_average_envelopes()
parabolic_sar = self.parabolic_sar()
pivot_point = self.pivot_point()
fibonacci_pivot_point = self.fibonacci_pivot_point()
demark_pivot_point = self.demark_pivot_point()
price_channel = self.price_channel()
aroon = self.aroon()
price_momentum_oscillator = self.price_momentum_oscillator()
know_sure_thing = self.know_sure_thing()
macd = self.macd()
negative_volume_index = self.negative_volume_index()
percentage_price_oscillator = self.percentage_price_oscillator()
percentage_volume_oscillator = self.percentage_volume_oscillator()
stochastic_oscillator = self.stochastic_oscillator()
vortex = self.vortex()
# Return all indicators
return pd.DataFrame({
'return' : self.returns(),
'money_flow' : self.money_flow(),
'money_flow_volume' : self.money_flow_volume(),
'typical_price' : self.typical_price(),
'close_to_open_range' : self.close_to_open_range(),
'l1_quadrant_range' : quadrant_range['1'],
'l2_quadrant_range' : quadrant_range['2'],
'l3_quadrant_range' : quadrant_range['3'],
'l4_quadrant_range' : quadrant_range['4'],
'l5_quadrant_range' : quadrant_range['5'],
'true_range' : self.true_range(),
'high_low_spread' : self.high_low_spread(),
'roc' : self.rate_of_change(),
'upper_bollinger_band' : bollinger_bands['ub'],
'center_bollinger_band' : bollinger_bands['mb'],
'lower_bollinger_band' : bollinger_bands['lb'],
'long_chandelier_exit' : chandelier_exit['long'],
'short_chandelier_exit' : chandelier_exit['short'],
'conversion_ichimoku_cloud': ichimoku_clouds['conversion'],
'base_line_ichimoku_cloud' : ichimoku_clouds['base'],
'leadingA_ichimoku_cloud' : ichimoku_clouds['leadA'],
'leadingB_ichimoku_cloud' : ichimoku_clouds['leadB'],
'lagging_ichimoku_cloud' : ichimoku_clouds['lag'],
'upper_keltner_channel' : keltner_channels['ul'],
'center_keltner_channel' : keltner_channels['ml'],
'lower_keltner_channel' : keltner_channels['ll'],
'upper_ma_envelope' : moving_average_envelopes['uma'],
'center_ma_envelope' : moving_average_envelopes['ma'],
'lower_ma_envelope' : moving_average_envelopes['lma'],
'rising_parabolic_sar' : parabolic_sar['rising'],
'falling_parabolic_sar' : parabolic_sar['falling'],
'p_pivot_point' : pivot_point['p'],
's1_pivot_point' : pivot_point['s1'],
's2_pivot_point' : pivot_point['s2'],
'r1_pivot_point' : pivot_point['r1'],
'r2_pivot_point' : pivot_point['r2'],
'p_fibonacci_pivot_point' : fibonacci_pivot_point['p'],
's1_fibonacci_pivot_point' : fibonacci_pivot_point['s1'],
's2_fibonacci_pivot_point' : fibonacci_pivot_point['s2'],
's3_fibonacci_pivot_point' : fibonacci_pivot_point['s3'],
'r1_fibonacci_pivot_point' : fibonacci_pivot_point['r1'],
'r2_fibonacci_pivot_point' : fibonacci_pivot_point['r2'],
'r3_fibonacci_pivot_point' : fibonacci_pivot_point['r3'],
'p_demark_pivot_point' : demark_pivot_point['p'],
's1_demark_pivot_point' : demark_pivot_point['s1'],
'r1_demark_pivot_point' : demark_pivot_point['r1'],
'high_price_channel' : price_channel['high'],
'low_price_channel' : price_channel['low'],
'center_price_channel' : price_channel['center'],
'volume_by_price' : self.volume_by_price(),
'vwap' : self.volume_weighted_average_price(),
'zigzag' : self.zigzag(),
'adl' : self.accumulation_distribution_line(),
'aroon_up' : aroon['up'],
'aroon_down' : aroon['down'],
'aroon_oscillator' : aroon['oscillator'],
'adx' : self.average_directional_index(),
'atr' : self.average_true_range(),
'bandwidth' : self.bandwidth(),
'%b' : self.percent_b(),
'cci' : self.commodity_channel_index(),
'coppock_curve' : self.coppock_curve(),
'chaikin_money_flow' : self.chaikin_money_flow,
'chaikin_oscillator' : self.chaikin_oscillator(),
'pmo' : price_momentum_oscillator['pmo'],
'pmo_signal' : price_momentum_oscillator['signal'],
'dpo' : self.detrended_price_oscillator(),
'ease_of_movement' : self.ease_of_movement(),
'force_index' : self.force_index(),
'kst' : know_sure_thing['kst'],
'kst_signal' : know_sure_thing['signal'],
'mass_index' : self.mass_index(),
'macd' : macd['macd'],
'macd_signal' : macd['signal'],
'macd_hist' : macd['hist'],
'money_flow_index' : self.money_flow_index(),
'nvi' : negative_volume_index['nvi'],
'nvi_signal' : negative_volume_index['signal'],
'obv' : self.on_balance_volume,
'ppo' : percentage_price_oscillator['ppo'],
'ppo_signal' : percentage_price_oscillator['signal'],
'ppo_hist' : percentage_price_oscillator['hist'],
'pvo' : percentage_volume_oscillator['pvo'],
'pvo_signal' : percentage_volume_oscillator['signal'],
'pvo_hist' : percentage_volume_oscillator['hist'],
'rsi' : self.relative_strength_index(),
'sctr' : self.stock_charts_tech_ranks(),
's' : self.slope(),
'volatility' : self.volatility(),
'%k_stochastic_oscillator' : stochastic_oscillator['k'],
'%d_stochastic_oscillator' : stochastic_oscillator['d'],
'stochastic_rsi' : self.stochastic_rsi(),
'trix' : self.trix(),
'tsi' : self.true_strength_index(),
'ulcer_index' : self.ulcer_index(),
'ultimate_oscillator' : self.ultimate_oscillator(),
'+vortex' : vortex['+'],
'-vortex' : vortex['-'],
'william_percent_r' : self.william_percent_r()
})
|
mit
|
cbertinato/pandas
|
pandas/tests/io/formats/test_style.py
|
1
|
54948
|
import copy
import re
import textwrap
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame
import pandas.util.testing as tm
jinja2 = pytest.importorskip('jinja2')
from pandas.io.formats.style import Styler, _get_level_lengths # noqa # isort:skip
class TestStyler:
def setup_method(self, method):
np.random.seed(24)
self.s = DataFrame({'A': np.random.permutation(range(6))})
self.df = DataFrame({'A': [0, 1], 'B': np.random.randn(2)})
self.f = lambda x: x
self.g = lambda x: x
def h(x, foo='bar'):
return pd.Series(
'color: {foo}'.format(foo=foo), index=x.index, name=x.name)
self.h = h
self.styler = Styler(self.df)
self.attrs = pd.DataFrame({'A': ['color: red', 'color: blue']})
self.dataframes = [
self.df,
pd.DataFrame({'f': [1., 2.], 'o': ['a', 'b'],
'c': pd.Categorical(['a', 'b'])})
]
def test_init_non_pandas(self):
with pytest.raises(TypeError):
Styler([1, 2, 3])
def test_init_series(self):
result = Styler(pd.Series([1, 2]))
assert result.data.ndim == 2
def test_repr_html_ok(self):
self.styler._repr_html_()
def test_repr_html_mathjax(self):
# gh-19824
assert 'tex2jax_ignore' not in self.styler._repr_html_()
with pd.option_context('display.html.use_mathjax', False):
assert 'tex2jax_ignore' in self.styler._repr_html_()
def test_update_ctx(self):
self.styler._update_ctx(self.attrs)
expected = {(0, 0): ['color: red'],
(1, 0): ['color: blue']}
assert self.styler.ctx == expected
def test_update_ctx_flatten_multi(self):
attrs = DataFrame({"A": ['color: red; foo: bar',
'color: blue; foo: baz']})
self.styler._update_ctx(attrs)
expected = {(0, 0): ['color: red', ' foo: bar'],
(1, 0): ['color: blue', ' foo: baz']}
assert self.styler.ctx == expected
def test_update_ctx_flatten_multi_traliing_semi(self):
attrs = DataFrame({"A": ['color: red; foo: bar;',
'color: blue; foo: baz;']})
self.styler._update_ctx(attrs)
expected = {(0, 0): ['color: red', ' foo: bar'],
(1, 0): ['color: blue', ' foo: baz']}
assert self.styler.ctx == expected
def test_copy(self):
s2 = copy.copy(self.styler)
assert self.styler is not s2
assert self.styler.ctx is s2.ctx # shallow
assert self.styler._todo is s2._todo
self.styler._update_ctx(self.attrs)
self.styler.highlight_max()
assert self.styler.ctx == s2.ctx
assert self.styler._todo == s2._todo
def test_deepcopy(self):
s2 = copy.deepcopy(self.styler)
assert self.styler is not s2
assert self.styler.ctx is not s2.ctx
assert self.styler._todo is not s2._todo
self.styler._update_ctx(self.attrs)
self.styler.highlight_max()
assert self.styler.ctx != s2.ctx
assert s2._todo == []
assert self.styler._todo != s2._todo
def test_clear(self):
s = self.df.style.highlight_max()._compute()
assert len(s.ctx) > 0
assert len(s._todo) > 0
s.clear()
assert len(s.ctx) == 0
assert len(s._todo) == 0
def test_render(self):
df = pd.DataFrame({"A": [0, 1]})
style = lambda x: pd.Series(["color: red", "color: blue"], name=x.name)
s = Styler(df, uuid='AB').apply(style)
s.render()
# it worked?
def test_render_empty_dfs(self):
empty_df = DataFrame()
es = Styler(empty_df)
es.render()
# An index but no columns
DataFrame(columns=['a']).style.render()
# A column but no index
DataFrame(index=['a']).style.render()
# No IndexError raised?
def test_render_double(self):
df = pd.DataFrame({"A": [0, 1]})
style = lambda x: pd.Series(["color: red; border: 1px",
"color: blue; border: 2px"], name=x.name)
s = Styler(df, uuid='AB').apply(style)
s.render()
# it worked?
def test_set_properties(self):
df = pd.DataFrame({"A": [0, 1]})
result = df.style.set_properties(color='white',
size='10px')._compute().ctx
# order is deterministic
v = ["color: white", "size: 10px"]
expected = {(0, 0): v, (1, 0): v}
assert result.keys() == expected.keys()
for v1, v2 in zip(result.values(), expected.values()):
assert sorted(v1) == sorted(v2)
def test_set_properties_subset(self):
df = pd.DataFrame({'A': [0, 1]})
result = df.style.set_properties(subset=pd.IndexSlice[0, 'A'],
color='white')._compute().ctx
expected = {(0, 0): ['color: white']}
assert result == expected
def test_empty_index_name_doesnt_display(self):
# https://github.com/pandas-dev/pandas/pull/12090#issuecomment-180695902
df = pd.DataFrame({'A': [1, 2], 'B': [3, 4], 'C': [5, 6]})
result = df.style._translate()
expected = [[{'class': 'blank level0', 'type': 'th', 'value': '',
'is_visible': True, 'display_value': ''},
{'class': 'col_heading level0 col0',
'display_value': 'A',
'type': 'th',
'value': 'A',
'is_visible': True,
},
{'class': 'col_heading level0 col1',
'display_value': 'B',
'type': 'th',
'value': 'B',
'is_visible': True,
},
{'class': 'col_heading level0 col2',
'display_value': 'C',
'type': 'th',
'value': 'C',
'is_visible': True,
}]]
assert result['head'] == expected
def test_index_name(self):
# https://github.com/pandas-dev/pandas/issues/11655
df = pd.DataFrame({'A': [1, 2], 'B': [3, 4], 'C': [5, 6]})
result = df.set_index('A').style._translate()
expected = [[{'class': 'blank level0', 'type': 'th', 'value': '',
'display_value': '', 'is_visible': True},
{'class': 'col_heading level0 col0', 'type': 'th',
'value': 'B', 'display_value': 'B', 'is_visible': True},
{'class': 'col_heading level0 col1', 'type': 'th',
'value': 'C', 'display_value': 'C', 'is_visible': True}],
[{'class': 'index_name level0', 'type': 'th',
'value': 'A'},
{'class': 'blank', 'type': 'th', 'value': ''},
{'class': 'blank', 'type': 'th', 'value': ''}]]
assert result['head'] == expected
def test_multiindex_name(self):
# https://github.com/pandas-dev/pandas/issues/11655
df = pd.DataFrame({'A': [1, 2], 'B': [3, 4], 'C': [5, 6]})
result = df.set_index(['A', 'B']).style._translate()
expected = [[
{'class': 'blank', 'type': 'th', 'value': '',
'display_value': '', 'is_visible': True},
{'class': 'blank level0', 'type': 'th', 'value': '',
'display_value': '', 'is_visible': True},
{'class': 'col_heading level0 col0', 'type': 'th',
'value': 'C', 'display_value': 'C', 'is_visible': True}],
[{'class': 'index_name level0', 'type': 'th',
'value': 'A'},
{'class': 'index_name level1', 'type': 'th',
'value': 'B'},
{'class': 'blank', 'type': 'th', 'value': ''}]]
assert result['head'] == expected
def test_numeric_columns(self):
# https://github.com/pandas-dev/pandas/issues/12125
# smoke test for _translate
df = pd.DataFrame({0: [1, 2, 3]})
df.style._translate()
def test_apply_axis(self):
df = pd.DataFrame({'A': [0, 0], 'B': [1, 1]})
f = lambda x: ['val: {max}'.format(max=x.max()) for v in x]
result = df.style.apply(f, axis=1)
assert len(result._todo) == 1
assert len(result.ctx) == 0
result._compute()
expected = {(0, 0): ['val: 1'], (0, 1): ['val: 1'],
(1, 0): ['val: 1'], (1, 1): ['val: 1']}
assert result.ctx == expected
result = df.style.apply(f, axis=0)
expected = {(0, 0): ['val: 0'], (0, 1): ['val: 1'],
(1, 0): ['val: 0'], (1, 1): ['val: 1']}
result._compute()
assert result.ctx == expected
result = df.style.apply(f) # default
result._compute()
assert result.ctx == expected
def test_apply_subset(self):
axes = [0, 1]
slices = [pd.IndexSlice[:], pd.IndexSlice[:, ['A']],
pd.IndexSlice[[1], :], pd.IndexSlice[[1], ['A']],
pd.IndexSlice[:2, ['A', 'B']]]
for ax in axes:
for slice_ in slices:
result = self.df.style.apply(self.h, axis=ax, subset=slice_,
foo='baz')._compute().ctx
expected = {(r, c): ['color: baz']
for r, row in enumerate(self.df.index)
for c, col in enumerate(self.df.columns)
if row in self.df.loc[slice_].index and
col in self.df.loc[slice_].columns}
assert result == expected
def test_applymap_subset(self):
def f(x):
return 'foo: bar'
slices = [pd.IndexSlice[:], pd.IndexSlice[:, ['A']],
pd.IndexSlice[[1], :], pd.IndexSlice[[1], ['A']],
pd.IndexSlice[:2, ['A', 'B']]]
for slice_ in slices:
result = self.df.style.applymap(f, subset=slice_)._compute().ctx
expected = {(r, c): ['foo: bar']
for r, row in enumerate(self.df.index)
for c, col in enumerate(self.df.columns)
if row in self.df.loc[slice_].index and
col in self.df.loc[slice_].columns}
assert result == expected
def test_applymap_subset_multiindex(self):
# GH 19861
# Smoke test for applymap
def color_negative_red(val):
"""
Takes a scalar and returns a string with
the css property `'color: red'` for negative
strings, black otherwise.
"""
color = 'red' if val < 0 else 'black'
return 'color: %s' % color
dic = {
('a', 'd'): [-1.12, 2.11],
('a', 'c'): [2.78, -2.88],
('b', 'c'): [-3.99, 3.77],
('b', 'd'): [4.21, -1.22],
}
idx = pd.IndexSlice
df = pd.DataFrame(dic, index=[0, 1])
(df.style
.applymap(color_negative_red, subset=idx[:, idx['b', 'd']])
.render())
def test_where_with_one_style(self):
# GH 17474
def f(x):
return x > 0.5
style1 = 'foo: bar'
result = self.df.style.where(f, style1)._compute().ctx
expected = {(r, c): [style1 if f(self.df.loc[row, col]) else '']
for r, row in enumerate(self.df.index)
for c, col in enumerate(self.df.columns)}
assert result == expected
def test_where_subset(self):
# GH 17474
def f(x):
return x > 0.5
style1 = 'foo: bar'
style2 = 'baz: foo'
slices = [pd.IndexSlice[:], pd.IndexSlice[:, ['A']],
pd.IndexSlice[[1], :], pd.IndexSlice[[1], ['A']],
pd.IndexSlice[:2, ['A', 'B']]]
for slice_ in slices:
result = self.df.style.where(f, style1, style2,
subset=slice_)._compute().ctx
expected = {(r, c):
[style1 if f(self.df.loc[row, col]) else style2]
for r, row in enumerate(self.df.index)
for c, col in enumerate(self.df.columns)
if row in self.df.loc[slice_].index and
col in self.df.loc[slice_].columns}
assert result == expected
def test_where_subset_compare_with_applymap(self):
# GH 17474
def f(x):
return x > 0.5
style1 = 'foo: bar'
style2 = 'baz: foo'
def g(x):
return style1 if f(x) else style2
slices = [pd.IndexSlice[:], pd.IndexSlice[:, ['A']],
pd.IndexSlice[[1], :], pd.IndexSlice[[1], ['A']],
pd.IndexSlice[:2, ['A', 'B']]]
for slice_ in slices:
result = self.df.style.where(f, style1, style2,
subset=slice_)._compute().ctx
expected = self.df.style.applymap(g, subset=slice_)._compute().ctx
assert result == expected
def test_empty(self):
df = pd.DataFrame({'A': [1, 0]})
s = df.style
s.ctx = {(0, 0): ['color: red'],
(1, 0): ['']}
result = s._translate()['cellstyle']
expected = [{'props': [['color', ' red']], 'selector': 'row0_col0'},
{'props': [['', '']], 'selector': 'row1_col0'}]
assert result == expected
def test_bar_align_left(self):
df = pd.DataFrame({'A': [0, 1, 2]})
result = df.style.bar()._compute().ctx
expected = {
(0, 0): ['width: 10em', ' height: 80%'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient('
'90deg,#d65f5f 50.0%, transparent 50.0%)'],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient('
'90deg,#d65f5f 100.0%, transparent 100.0%)']
}
assert result == expected
result = df.style.bar(color='red', width=50)._compute().ctx
expected = {
(0, 0): ['width: 10em', ' height: 80%'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient('
'90deg,red 25.0%, transparent 25.0%)'],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient('
'90deg,red 50.0%, transparent 50.0%)']
}
assert result == expected
df['C'] = ['a'] * len(df)
result = df.style.bar(color='red', width=50)._compute().ctx
assert result == expected
df['C'] = df['C'].astype('category')
result = df.style.bar(color='red', width=50)._compute().ctx
assert result == expected
def test_bar_align_left_0points(self):
df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
result = df.style.bar()._compute().ctx
expected = {(0, 0): ['width: 10em', ' height: 80%'],
(0, 1): ['width: 10em', ' height: 80%'],
(0, 2): ['width: 10em', ' height: 80%'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 50.0%,'
' transparent 50.0%)'],
(1, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 50.0%,'
' transparent 50.0%)'],
(1, 2): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 50.0%,'
' transparent 50.0%)'],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 100.0%'
', transparent 100.0%)'],
(2, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 100.0%'
', transparent 100.0%)'],
(2, 2): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 100.0%'
', transparent 100.0%)']}
assert result == expected
result = df.style.bar(axis=1)._compute().ctx
expected = {(0, 0): ['width: 10em', ' height: 80%'],
(0, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 50.0%,'
' transparent 50.0%)'],
(0, 2): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 100.0%'
', transparent 100.0%)'],
(1, 0): ['width: 10em', ' height: 80%'],
(1, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 50.0%'
', transparent 50.0%)'],
(1, 2): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 100.0%'
', transparent 100.0%)'],
(2, 0): ['width: 10em', ' height: 80%'],
(2, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 50.0%'
', transparent 50.0%)'],
(2, 2): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,#d65f5f 100.0%'
', transparent 100.0%)']}
assert result == expected
def test_bar_align_mid_pos_and_neg(self):
df = pd.DataFrame({'A': [-10, 0, 20, 90]})
result = df.style.bar(align='mid', color=[
'#d65f5f', '#5fba7d'])._compute().ctx
expected = {(0, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,'
'#d65f5f 10.0%, transparent 10.0%)'],
(1, 0): ['width: 10em', ' height: 80%', ],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 10.0%, #5fba7d 10.0%'
', #5fba7d 30.0%, transparent 30.0%)'],
(3, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 10.0%, '
'#5fba7d 10.0%, #5fba7d 100.0%, '
'transparent 100.0%)']}
assert result == expected
def test_bar_align_mid_all_pos(self):
df = pd.DataFrame({'A': [10, 20, 50, 100]})
result = df.style.bar(align='mid', color=[
'#d65f5f', '#5fba7d'])._compute().ctx
expected = {(0, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,'
'#5fba7d 10.0%, transparent 10.0%)'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,'
'#5fba7d 20.0%, transparent 20.0%)'],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,'
'#5fba7d 50.0%, transparent 50.0%)'],
(3, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,'
'#5fba7d 100.0%, transparent 100.0%)']}
assert result == expected
def test_bar_align_mid_all_neg(self):
df = pd.DataFrame({'A': [-100, -60, -30, -20]})
result = df.style.bar(align='mid', color=[
'#d65f5f', '#5fba7d'])._compute().ctx
expected = {(0, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,'
'#d65f5f 100.0%, transparent 100.0%)'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 40.0%, '
'#d65f5f 40.0%, #d65f5f 100.0%, '
'transparent 100.0%)'],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 70.0%, '
'#d65f5f 70.0%, #d65f5f 100.0%, '
'transparent 100.0%)'],
(3, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 80.0%, '
'#d65f5f 80.0%, #d65f5f 100.0%, '
'transparent 100.0%)']}
assert result == expected
def test_bar_align_zero_pos_and_neg(self):
# See https://github.com/pandas-dev/pandas/pull/14757
df = pd.DataFrame({'A': [-10, 0, 20, 90]})
result = df.style.bar(align='zero', color=[
'#d65f5f', '#5fba7d'], width=90)._compute().ctx
expected = {(0, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 40.0%, #d65f5f 40.0%, '
'#d65f5f 45.0%, transparent 45.0%)'],
(1, 0): ['width: 10em', ' height: 80%'],
(2, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 45.0%, #5fba7d 45.0%, '
'#5fba7d 55.0%, transparent 55.0%)'],
(3, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 45.0%, #5fba7d 45.0%, '
'#5fba7d 90.0%, transparent 90.0%)']}
assert result == expected
def test_bar_align_left_axis_none(self):
df = pd.DataFrame({'A': [0, 1], 'B': [2, 4]})
result = df.style.bar(axis=None)._compute().ctx
expected = {
(0, 0): ['width: 10em', ' height: 80%'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,'
'#d65f5f 25.0%, transparent 25.0%)'],
(0, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,'
'#d65f5f 50.0%, transparent 50.0%)'],
(1, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,'
'#d65f5f 100.0%, transparent 100.0%)']
}
assert result == expected
def test_bar_align_zero_axis_none(self):
df = pd.DataFrame({'A': [0, 1], 'B': [-2, 4]})
result = df.style.bar(align='zero', axis=None)._compute().ctx
expected = {
(0, 0): ['width: 10em', ' height: 80%'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 50.0%, #d65f5f 50.0%, '
'#d65f5f 62.5%, transparent 62.5%)'],
(0, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 25.0%, #d65f5f 25.0%, '
'#d65f5f 50.0%, transparent 50.0%)'],
(1, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 50.0%, #d65f5f 50.0%, '
'#d65f5f 100.0%, transparent 100.0%)']
}
assert result == expected
def test_bar_align_mid_axis_none(self):
df = pd.DataFrame({'A': [0, 1], 'B': [-2, 4]})
result = df.style.bar(align='mid', axis=None)._compute().ctx
expected = {
(0, 0): ['width: 10em', ' height: 80%'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 33.3%, #d65f5f 33.3%, '
'#d65f5f 50.0%, transparent 50.0%)'],
(0, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,'
'#d65f5f 33.3%, transparent 33.3%)'],
(1, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 33.3%, #d65f5f 33.3%, '
'#d65f5f 100.0%, transparent 100.0%)']
}
assert result == expected
def test_bar_align_mid_vmin(self):
df = pd.DataFrame({'A': [0, 1], 'B': [-2, 4]})
result = df.style.bar(align='mid', axis=None, vmin=-6)._compute().ctx
expected = {
(0, 0): ['width: 10em', ' height: 80%'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 60.0%, #d65f5f 60.0%, '
'#d65f5f 70.0%, transparent 70.0%)'],
(0, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 40.0%, #d65f5f 40.0%, '
'#d65f5f 60.0%, transparent 60.0%)'],
(1, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 60.0%, #d65f5f 60.0%, '
'#d65f5f 100.0%, transparent 100.0%)']
}
assert result == expected
def test_bar_align_mid_vmax(self):
df = pd.DataFrame({'A': [0, 1], 'B': [-2, 4]})
result = df.style.bar(align='mid', axis=None, vmax=8)._compute().ctx
expected = {
(0, 0): ['width: 10em', ' height: 80%'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 20.0%, #d65f5f 20.0%, '
'#d65f5f 30.0%, transparent 30.0%)'],
(0, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,'
'#d65f5f 20.0%, transparent 20.0%)'],
(1, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 20.0%, #d65f5f 20.0%, '
'#d65f5f 60.0%, transparent 60.0%)']
}
assert result == expected
def test_bar_align_mid_vmin_vmax_wide(self):
df = pd.DataFrame({'A': [0, 1], 'B': [-2, 4]})
result = df.style.bar(align='mid', axis=None,
vmin=-3, vmax=7)._compute().ctx
expected = {
(0, 0): ['width: 10em', ' height: 80%'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 30.0%, #d65f5f 30.0%, '
'#d65f5f 40.0%, transparent 40.0%)'],
(0, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 10.0%, #d65f5f 10.0%, '
'#d65f5f 30.0%, transparent 30.0%)'],
(1, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 30.0%, #d65f5f 30.0%, '
'#d65f5f 70.0%, transparent 70.0%)']
}
assert result == expected
def test_bar_align_mid_vmin_vmax_clipping(self):
df = pd.DataFrame({'A': [0, 1], 'B': [-2, 4]})
result = df.style.bar(align='mid', axis=None,
vmin=-1, vmax=3)._compute().ctx
expected = {
(0, 0): ['width: 10em', ' height: 80%'],
(1, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 25.0%, #d65f5f 25.0%, '
'#d65f5f 50.0%, transparent 50.0%)'],
(0, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,'
'#d65f5f 25.0%, transparent 25.0%)'],
(1, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 25.0%, #d65f5f 25.0%, '
'#d65f5f 100.0%, transparent 100.0%)']
}
assert result == expected
def test_bar_align_mid_nans(self):
df = pd.DataFrame({'A': [1, None], 'B': [-1, 3]})
result = df.style.bar(align='mid', axis=None)._compute().ctx
expected = {
(0, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 25.0%, #d65f5f 25.0%, '
'#d65f5f 50.0%, transparent 50.0%)'],
(1, 0): [''],
(0, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg,'
'#d65f5f 25.0%, transparent 25.0%)'],
(1, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 25.0%, #d65f5f 25.0%, '
'#d65f5f 100.0%, transparent 100.0%)']
}
assert result == expected
def test_bar_align_zero_nans(self):
df = pd.DataFrame({'A': [1, None], 'B': [-1, 2]})
result = df.style.bar(align='zero', axis=None)._compute().ctx
expected = {
(0, 0): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 50.0%, #d65f5f 50.0%, '
'#d65f5f 75.0%, transparent 75.0%)'],
(1, 0): [''],
(0, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 25.0%, #d65f5f 25.0%, '
'#d65f5f 50.0%, transparent 50.0%)'],
(1, 1): ['width: 10em', ' height: 80%',
'background: linear-gradient(90deg, '
'transparent 50.0%, #d65f5f 50.0%, '
'#d65f5f 100.0%, transparent 100.0%)']
}
assert result == expected
def test_bar_bad_align_raises(self):
df = pd.DataFrame({'A': [-100, -60, -30, -20]})
with pytest.raises(ValueError):
df.style.bar(align='poorly', color=['#d65f5f', '#5fba7d'])
def test_highlight_null(self, null_color='red'):
df = pd.DataFrame({'A': [0, np.nan]})
result = df.style.highlight_null()._compute().ctx
expected = {(0, 0): [''],
(1, 0): ['background-color: red']}
assert result == expected
def test_nonunique_raises(self):
df = pd.DataFrame([[1, 2]], columns=['A', 'A'])
with pytest.raises(ValueError):
df.style
with pytest.raises(ValueError):
Styler(df)
def test_caption(self):
styler = Styler(self.df, caption='foo')
result = styler.render()
assert all(['caption' in result, 'foo' in result])
styler = self.df.style
result = styler.set_caption('baz')
assert styler is result
assert styler.caption == 'baz'
def test_uuid(self):
styler = Styler(self.df, uuid='abc123')
result = styler.render()
assert 'abc123' in result
styler = self.df.style
result = styler.set_uuid('aaa')
assert result is styler
assert result.uuid == 'aaa'
def test_unique_id(self):
# See https://github.com/pandas-dev/pandas/issues/16780
df = pd.DataFrame({'a': [1, 3, 5, 6], 'b': [2, 4, 12, 21]})
result = df.style.render(uuid='test')
assert 'test' in result
ids = re.findall('id="(.*?)"', result)
assert np.unique(ids).size == len(ids)
def test_table_styles(self):
style = [{'selector': 'th', 'props': [('foo', 'bar')]}]
styler = Styler(self.df, table_styles=style)
result = ' '.join(styler.render().split())
assert 'th { foo: bar; }' in result
styler = self.df.style
result = styler.set_table_styles(style)
assert styler is result
assert styler.table_styles == style
def test_table_attributes(self):
attributes = 'class="foo" data-bar'
styler = Styler(self.df, table_attributes=attributes)
result = styler.render()
assert 'class="foo" data-bar' in result
result = self.df.style.set_table_attributes(attributes).render()
assert 'class="foo" data-bar' in result
def test_precision(self):
with pd.option_context('display.precision', 10):
s = Styler(self.df)
assert s.precision == 10
s = Styler(self.df, precision=2)
assert s.precision == 2
s2 = s.set_precision(4)
assert s is s2
assert s.precision == 4
def test_apply_none(self):
def f(x):
return pd.DataFrame(np.where(x == x.max(), 'color: red', ''),
index=x.index, columns=x.columns)
result = (pd.DataFrame([[1, 2], [3, 4]])
.style.apply(f, axis=None)._compute().ctx)
assert result[(1, 1)] == ['color: red']
def test_trim(self):
result = self.df.style.render() # trim=True
assert result.count('#') == 0
result = self.df.style.highlight_max().render()
assert result.count('#') == len(self.df.columns)
def test_highlight_max(self):
df = pd.DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])
# max(df) = min(-df)
for max_ in [True, False]:
if max_:
attr = 'highlight_max'
else:
df = -df
attr = 'highlight_min'
result = getattr(df.style, attr)()._compute().ctx
assert result[(1, 1)] == ['background-color: yellow']
result = getattr(df.style, attr)(color='green')._compute().ctx
assert result[(1, 1)] == ['background-color: green']
result = getattr(df.style, attr)(subset='A')._compute().ctx
assert result[(1, 0)] == ['background-color: yellow']
result = getattr(df.style, attr)(axis=0)._compute().ctx
expected = {(1, 0): ['background-color: yellow'],
(1, 1): ['background-color: yellow'],
(0, 1): [''], (0, 0): ['']}
assert result == expected
result = getattr(df.style, attr)(axis=1)._compute().ctx
expected = {(0, 1): ['background-color: yellow'],
(1, 1): ['background-color: yellow'],
(0, 0): [''], (1, 0): ['']}
assert result == expected
# separate since we can't negate the strs
df['C'] = ['a', 'b']
result = df.style.highlight_max()._compute().ctx
expected = {(1, 1): ['background-color: yellow']}
result = df.style.highlight_min()._compute().ctx
expected = {(0, 0): ['background-color: yellow']}
def test_export(self):
f = lambda x: 'color: red' if x > 0 else 'color: blue'
g = lambda x, y, z: 'color: {z}'.format(z=z) \
if x > 0 else 'color: {z}'.format(z=z)
style1 = self.styler
style1.applymap(f)\
.applymap(g, y='a', z='b')\
.highlight_max()
result = style1.export()
style2 = self.df.style
style2.use(result)
assert style1._todo == style2._todo
style2.render()
def test_display_format(self):
df = pd.DataFrame(np.random.random(size=(2, 2)))
ctx = df.style.format("{:0.1f}")._translate()
assert all(['display_value' in c for c in row]
for row in ctx['body'])
assert all([len(c['display_value']) <= 3 for c in row[1:]]
for row in ctx['body'])
assert len(ctx['body'][0][1]['display_value'].lstrip('-')) <= 3
def test_display_format_raises(self):
df = pd.DataFrame(np.random.randn(2, 2))
with pytest.raises(TypeError):
df.style.format(5)
with pytest.raises(TypeError):
df.style.format(True)
def test_display_subset(self):
df = pd.DataFrame([[.1234, .1234], [1.1234, 1.1234]],
columns=['a', 'b'])
ctx = df.style.format({"a": "{:0.1f}", "b": "{0:.2%}"},
subset=pd.IndexSlice[0, :])._translate()
expected = '0.1'
assert ctx['body'][0][1]['display_value'] == expected
assert ctx['body'][1][1]['display_value'] == '1.1234'
assert ctx['body'][0][2]['display_value'] == '12.34%'
raw_11 = '1.1234'
ctx = df.style.format("{:0.1f}",
subset=pd.IndexSlice[0, :])._translate()
assert ctx['body'][0][1]['display_value'] == expected
assert ctx['body'][1][1]['display_value'] == raw_11
ctx = df.style.format("{:0.1f}",
subset=pd.IndexSlice[0, :])._translate()
assert ctx['body'][0][1]['display_value'] == expected
assert ctx['body'][1][1]['display_value'] == raw_11
ctx = df.style.format("{:0.1f}",
subset=pd.IndexSlice['a'])._translate()
assert ctx['body'][0][1]['display_value'] == expected
assert ctx['body'][0][2]['display_value'] == '0.1234'
ctx = df.style.format("{:0.1f}",
subset=pd.IndexSlice[0, 'a'])._translate()
assert ctx['body'][0][1]['display_value'] == expected
assert ctx['body'][1][1]['display_value'] == raw_11
ctx = df.style.format("{:0.1f}",
subset=pd.IndexSlice[[0, 1], ['a']])._translate()
assert ctx['body'][0][1]['display_value'] == expected
assert ctx['body'][1][1]['display_value'] == '1.1'
assert ctx['body'][0][2]['display_value'] == '0.1234'
assert ctx['body'][1][2]['display_value'] == '1.1234'
def test_display_dict(self):
df = pd.DataFrame([[.1234, .1234], [1.1234, 1.1234]],
columns=['a', 'b'])
ctx = df.style.format({"a": "{:0.1f}", "b": "{0:.2%}"})._translate()
assert ctx['body'][0][1]['display_value'] == '0.1'
assert ctx['body'][0][2]['display_value'] == '12.34%'
df['c'] = ['aaa', 'bbb']
ctx = df.style.format({"a": "{:0.1f}", "c": str.upper})._translate()
assert ctx['body'][0][1]['display_value'] == '0.1'
assert ctx['body'][0][3]['display_value'] == 'AAA'
def test_bad_apply_shape(self):
df = pd.DataFrame([[1, 2], [3, 4]])
with pytest.raises(ValueError):
df.style._apply(lambda x: 'x', subset=pd.IndexSlice[[0, 1], :])
with pytest.raises(ValueError):
df.style._apply(lambda x: [''], subset=pd.IndexSlice[[0, 1], :])
with pytest.raises(ValueError):
df.style._apply(lambda x: ['', '', '', ''])
with pytest.raises(ValueError):
df.style._apply(lambda x: ['', '', ''], subset=1)
with pytest.raises(ValueError):
df.style._apply(lambda x: ['', '', ''], axis=1)
def test_apply_bad_return(self):
def f(x):
return ''
df = pd.DataFrame([[1, 2], [3, 4]])
with pytest.raises(TypeError):
df.style._apply(f, axis=None)
def test_apply_bad_labels(self):
def f(x):
return pd.DataFrame(index=[1, 2], columns=['a', 'b'])
df = pd.DataFrame([[1, 2], [3, 4]])
with pytest.raises(ValueError):
df.style._apply(f, axis=None)
def test_get_level_lengths(self):
index = pd.MultiIndex.from_product([['a', 'b'], [0, 1, 2]])
expected = {(0, 0): 3, (0, 3): 3, (1, 0): 1, (1, 1): 1, (1, 2): 1,
(1, 3): 1, (1, 4): 1, (1, 5): 1}
result = _get_level_lengths(index)
tm.assert_dict_equal(result, expected)
def test_get_level_lengths_un_sorted(self):
index = pd.MultiIndex.from_arrays([
[1, 1, 2, 1],
['a', 'b', 'b', 'd']
])
expected = {(0, 0): 2, (0, 2): 1, (0, 3): 1,
(1, 0): 1, (1, 1): 1, (1, 2): 1, (1, 3): 1}
result = _get_level_lengths(index)
tm.assert_dict_equal(result, expected)
def test_mi_sparse(self):
df = pd.DataFrame({'A': [1, 2]},
index=pd.MultiIndex.from_arrays([['a', 'a'],
[0, 1]]))
result = df.style._translate()
body_0 = result['body'][0][0]
expected_0 = {
"value": "a", "display_value": "a", "is_visible": True,
"type": "th", "attributes": ["rowspan=2"],
"class": "row_heading level0 row0", "id": "level0_row0"
}
tm.assert_dict_equal(body_0, expected_0)
body_1 = result['body'][0][1]
expected_1 = {
"value": 0, "display_value": 0, "is_visible": True,
"type": "th", "class": "row_heading level1 row0",
"id": "level1_row0"
}
tm.assert_dict_equal(body_1, expected_1)
body_10 = result['body'][1][0]
expected_10 = {
"value": 'a', "display_value": 'a', "is_visible": False,
"type": "th", "class": "row_heading level0 row1",
"id": "level0_row1"
}
tm.assert_dict_equal(body_10, expected_10)
head = result['head'][0]
expected = [
{'type': 'th', 'class': 'blank', 'value': '',
'is_visible': True, "display_value": ''},
{'type': 'th', 'class': 'blank level0', 'value': '',
'is_visible': True, 'display_value': ''},
{'type': 'th', 'class': 'col_heading level0 col0', 'value': 'A',
'is_visible': True, 'display_value': 'A'}]
assert head == expected
def test_mi_sparse_disabled(self):
with pd.option_context('display.multi_sparse', False):
df = pd.DataFrame({'A': [1, 2]},
index=pd.MultiIndex.from_arrays([['a', 'a'],
[0, 1]]))
result = df.style._translate()
body = result['body']
for row in body:
assert 'attributes' not in row[0]
def test_mi_sparse_index_names(self):
df = pd.DataFrame({'A': [1, 2]}, index=pd.MultiIndex.from_arrays(
[['a', 'a'], [0, 1]],
names=['idx_level_0', 'idx_level_1'])
)
result = df.style._translate()
head = result['head'][1]
expected = [{
'class': 'index_name level0', 'value': 'idx_level_0',
'type': 'th'},
{'class': 'index_name level1', 'value': 'idx_level_1',
'type': 'th'},
{'class': 'blank', 'value': '', 'type': 'th'}]
assert head == expected
def test_mi_sparse_column_names(self):
df = pd.DataFrame(
np.arange(16).reshape(4, 4),
index=pd.MultiIndex.from_arrays(
[['a', 'a', 'b', 'a'], [0, 1, 1, 2]],
names=['idx_level_0', 'idx_level_1']),
columns=pd.MultiIndex.from_arrays(
[['C1', 'C1', 'C2', 'C2'], [1, 0, 1, 0]],
names=['col_0', 'col_1']
)
)
result = df.style._translate()
head = result['head'][1]
expected = [
{'class': 'blank', 'value': '', 'display_value': '',
'type': 'th', 'is_visible': True},
{'class': 'index_name level1', 'value': 'col_1',
'display_value': 'col_1', 'is_visible': True, 'type': 'th'},
{'class': 'col_heading level1 col0',
'display_value': 1,
'is_visible': True,
'type': 'th',
'value': 1},
{'class': 'col_heading level1 col1',
'display_value': 0,
'is_visible': True,
'type': 'th',
'value': 0},
{'class': 'col_heading level1 col2',
'display_value': 1,
'is_visible': True,
'type': 'th',
'value': 1},
{'class': 'col_heading level1 col3',
'display_value': 0,
'is_visible': True,
'type': 'th',
'value': 0},
]
assert head == expected
def test_hide_single_index(self):
# GH 14194
# single unnamed index
ctx = self.df.style._translate()
assert ctx['body'][0][0]['is_visible']
assert ctx['head'][0][0]['is_visible']
ctx2 = self.df.style.hide_index()._translate()
assert not ctx2['body'][0][0]['is_visible']
assert not ctx2['head'][0][0]['is_visible']
# single named index
ctx3 = self.df.set_index('A').style._translate()
assert ctx3['body'][0][0]['is_visible']
assert len(ctx3['head']) == 2 # 2 header levels
assert ctx3['head'][0][0]['is_visible']
ctx4 = self.df.set_index('A').style.hide_index()._translate()
assert not ctx4['body'][0][0]['is_visible']
assert len(ctx4['head']) == 1 # only 1 header levels
assert not ctx4['head'][0][0]['is_visible']
def test_hide_multiindex(self):
# GH 14194
df = pd.DataFrame({'A': [1, 2]}, index=pd.MultiIndex.from_arrays(
[['a', 'a'], [0, 1]],
names=['idx_level_0', 'idx_level_1'])
)
ctx1 = df.style._translate()
# tests for 'a' and '0'
assert ctx1['body'][0][0]['is_visible']
assert ctx1['body'][0][1]['is_visible']
# check for blank header rows
assert ctx1['head'][0][0]['is_visible']
assert ctx1['head'][0][1]['is_visible']
ctx2 = df.style.hide_index()._translate()
# tests for 'a' and '0'
assert not ctx2['body'][0][0]['is_visible']
assert not ctx2['body'][0][1]['is_visible']
# check for blank header rows
assert not ctx2['head'][0][0]['is_visible']
assert not ctx2['head'][0][1]['is_visible']
def test_hide_columns_single_level(self):
# GH 14194
# test hiding single column
ctx = self.df.style._translate()
assert ctx['head'][0][1]['is_visible']
assert ctx['head'][0][1]['display_value'] == 'A'
assert ctx['head'][0][2]['is_visible']
assert ctx['head'][0][2]['display_value'] == 'B'
assert ctx['body'][0][1]['is_visible'] # col A, row 1
assert ctx['body'][1][2]['is_visible'] # col B, row 1
ctx = self.df.style.hide_columns('A')._translate()
assert not ctx['head'][0][1]['is_visible']
assert not ctx['body'][0][1]['is_visible'] # col A, row 1
assert ctx['body'][1][2]['is_visible'] # col B, row 1
# test hiding mulitiple columns
ctx = self.df.style.hide_columns(['A', 'B'])._translate()
assert not ctx['head'][0][1]['is_visible']
assert not ctx['head'][0][2]['is_visible']
assert not ctx['body'][0][1]['is_visible'] # col A, row 1
assert not ctx['body'][1][2]['is_visible'] # col B, row 1
def test_hide_columns_mult_levels(self):
# GH 14194
# setup dataframe with multiple column levels and indices
i1 = pd.MultiIndex.from_arrays([['a', 'a'], [0, 1]],
names=['idx_level_0',
'idx_level_1'])
i2 = pd.MultiIndex.from_arrays([['b', 'b'], [0, 1]],
names=['col_level_0',
'col_level_1'])
df = pd.DataFrame([[1, 2], [3, 4]], index=i1, columns=i2)
ctx = df.style._translate()
# column headers
assert ctx['head'][0][2]['is_visible']
assert ctx['head'][1][2]['is_visible']
assert ctx['head'][1][3]['display_value'] == 1
# indices
assert ctx['body'][0][0]['is_visible']
# data
assert ctx['body'][1][2]['is_visible']
assert ctx['body'][1][2]['display_value'] == 3
assert ctx['body'][1][3]['is_visible']
assert ctx['body'][1][3]['display_value'] == 4
# hide top column level, which hides both columns
ctx = df.style.hide_columns('b')._translate()
assert not ctx['head'][0][2]['is_visible'] # b
assert not ctx['head'][1][2]['is_visible'] # 0
assert not ctx['body'][1][2]['is_visible'] # 3
assert ctx['body'][0][0]['is_visible'] # index
# hide first column only
ctx = df.style.hide_columns([('b', 0)])._translate()
assert ctx['head'][0][2]['is_visible'] # b
assert not ctx['head'][1][2]['is_visible'] # 0
assert not ctx['body'][1][2]['is_visible'] # 3
assert ctx['body'][1][3]['is_visible']
assert ctx['body'][1][3]['display_value'] == 4
# hide second column and index
ctx = df.style.hide_columns([('b', 1)]).hide_index()._translate()
assert not ctx['body'][0][0]['is_visible'] # index
assert ctx['head'][0][2]['is_visible'] # b
assert ctx['head'][1][2]['is_visible'] # 0
assert not ctx['head'][1][3]['is_visible'] # 1
assert not ctx['body'][1][3]['is_visible'] # 4
assert ctx['body'][1][2]['is_visible']
assert ctx['body'][1][2]['display_value'] == 3
def test_pipe(self):
def set_caption_from_template(styler, a, b):
return styler.set_caption(
'Dataframe with a = {a} and b = {b}'.format(a=a, b=b))
styler = self.df.style.pipe(set_caption_from_template, 'A', b='B')
assert 'Dataframe with a = A and b = B' in styler.render()
# Test with an argument that is a (callable, keyword_name) pair.
def f(a, b, styler):
return (a, b, styler)
styler = self.df.style
result = styler.pipe((f, 'styler'), a=1, b=2)
assert result == (1, 2, styler)
@td.skip_if_no_mpl
class TestStylerMatplotlibDep:
def test_background_gradient(self):
df = pd.DataFrame([[1, 2], [2, 4]], columns=['A', 'B'])
for c_map in [None, 'YlOrRd']:
result = df.style.background_gradient(cmap=c_map)._compute().ctx
assert all("#" in x[0] for x in result.values())
assert result[(0, 0)] == result[(0, 1)]
assert result[(1, 0)] == result[(1, 1)]
result = df.style.background_gradient(
subset=pd.IndexSlice[1, 'A'])._compute().ctx
assert result[(1, 0)] == ['background-color: #fff7fb',
'color: #000000']
@pytest.mark.parametrize(
'c_map,expected', [
(None, {
(0, 0): ['background-color: #440154', 'color: #f1f1f1'],
(1, 0): ['background-color: #fde725', 'color: #000000']}),
('YlOrRd', {
(0, 0): ['background-color: #ffffcc', 'color: #000000'],
(1, 0): ['background-color: #800026', 'color: #f1f1f1']})])
def test_text_color_threshold(self, c_map, expected):
df = pd.DataFrame([1, 2], columns=['A'])
result = df.style.background_gradient(cmap=c_map)._compute().ctx
assert result == expected
@pytest.mark.parametrize("text_color_threshold", [1.1, '1', -1, [2, 2]])
def test_text_color_threshold_raises(self, text_color_threshold):
df = pd.DataFrame([[1, 2], [2, 4]], columns=['A', 'B'])
msg = "`text_color_threshold` must be a value from 0 to 1."
with pytest.raises(ValueError, match=msg):
df.style.background_gradient(
text_color_threshold=text_color_threshold)._compute()
@td.skip_if_no_mpl
def test_background_gradient_axis(self):
df = pd.DataFrame([[1, 2], [2, 4]], columns=['A', 'B'])
low = ['background-color: #f7fbff', 'color: #000000']
high = ['background-color: #08306b', 'color: #f1f1f1']
mid = ['background-color: #abd0e6', 'color: #000000']
result = df.style.background_gradient(cmap='Blues',
axis=0)._compute().ctx
assert result[(0, 0)] == low
assert result[(0, 1)] == low
assert result[(1, 0)] == high
assert result[(1, 1)] == high
result = df.style.background_gradient(cmap='Blues',
axis=1)._compute().ctx
assert result[(0, 0)] == low
assert result[(0, 1)] == high
assert result[(1, 0)] == low
assert result[(1, 1)] == high
result = df.style.background_gradient(cmap='Blues',
axis=None)._compute().ctx
assert result[(0, 0)] == low
assert result[(0, 1)] == mid
assert result[(1, 0)] == mid
assert result[(1, 1)] == high
def test_block_names():
# catch accidental removal of a block
expected = {
'before_style', 'style', 'table_styles', 'before_cellstyle',
'cellstyle', 'before_table', 'table', 'caption', 'thead', 'tbody',
'after_table', 'before_head_rows', 'head_tr', 'after_head_rows',
'before_rows', 'tr', 'after_rows',
}
result = set(Styler.template.blocks)
assert result == expected
def test_from_custom_template(tmpdir):
p = tmpdir.mkdir("templates").join("myhtml.tpl")
p.write(textwrap.dedent("""\
{% extends "html.tpl" %}
{% block table %}
<h1>{{ table_title|default("My Table") }}</h1>
{{ super() }}
{% endblock table %}"""))
result = Styler.from_custom_template(str(tmpdir.join('templates')),
'myhtml.tpl')
assert issubclass(result, Styler)
assert result.env is not Styler.env
assert result.template is not Styler.template
styler = result(pd.DataFrame({"A": [1, 2]}))
assert styler.render()
|
bsd-3-clause
|
lin-credible/scikit-learn
|
benchmarks/bench_sgd_regression.py
|
283
|
5569
|
"""
Benchmark for SGD regression
Compares SGD regression against coordinate descent and Ridge
on synthetic data.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# License: BSD 3 clause
import numpy as np
import pylab as pl
import gc
from time import time
from sklearn.linear_model import Ridge, SGDRegressor, ElasticNet
from sklearn.metrics import mean_squared_error
from sklearn.datasets.samples_generator import make_regression
if __name__ == "__main__":
list_n_samples = np.linspace(100, 10000, 5).astype(np.int)
list_n_features = [10, 100, 1000]
n_test = 1000
noise = 0.1
alpha = 0.01
sgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
elnet_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
ridge_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
asgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
for i, n_train in enumerate(list_n_samples):
for j, n_features in enumerate(list_n_features):
X, y, coef = make_regression(
n_samples=n_train + n_test, n_features=n_features,
noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
print("=======================")
print("Round %d %d" % (i, j))
print("n_features:", n_features)
print("n_samples:", n_train)
# Shuffle data
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
print("- benchmarking ElasticNet")
clf = ElasticNet(alpha=alpha, l1_ratio=0.5, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
elnet_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
elnet_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.01, power_t=0.25)
tstart = time()
clf.fit(X_train, y_train)
sgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
sgd_results[i, j, 1] = time() - tstart
gc.collect()
print("n_iter", n_iter)
print("- benchmarking A-SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.002, power_t=0.05,
average=(n_iter * n_train // 2))
tstart = time()
clf.fit(X_train, y_train)
asgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
asgd_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking RidgeRegression")
clf = Ridge(alpha=alpha, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
ridge_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
ridge_results[i, j, 1] = time() - tstart
# Plot results
i = 0
m = len(list_n_features)
pl.figure('scikit-learn SGD regression benchmark results',
figsize=(5 * 2, 4 * m))
for j in range(m):
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 0]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 0]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 0]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 0]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("RMSE")
pl.title("Test error - %d features" % list_n_features[j])
i += 1
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 1]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 1]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 1]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 1]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("Time [sec]")
pl.title("Training time - %d features" % list_n_features[j])
i += 1
pl.subplots_adjust(hspace=.30)
pl.show()
|
bsd-3-clause
|
dakcarto/QGIS
|
python/plugins/processing/algs/qgis/VectorLayerScatterplot.py
|
15
|
3160
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
EquivalentNumField.py
---------------------
Date : January 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'January 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import matplotlib.pyplot as plt
import matplotlib.pylab as lab
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterTableField
from processing.core.outputs import OutputHTML
from processing.tools import vector
from processing.tools import dataobjects
class VectorLayerScatterplot(GeoAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
XFIELD = 'XFIELD'
YFIELD = 'YFIELD'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Vector layer scatterplot')
self.group, self.i18n_group = self.trAlgorithm('Graphics')
self.addParameter(ParameterVector(self.INPUT,
self.tr('Input layer'), [ParameterVector.VECTOR_TYPE_ANY]))
self.addParameter(ParameterTableField(self.XFIELD,
self.tr('X attribute'), self.INPUT,
ParameterTableField.DATA_TYPE_NUMBER))
self.addParameter(ParameterTableField(self.YFIELD,
self.tr('Y attribute'), self.INPUT,
ParameterTableField.DATA_TYPE_NUMBER))
self.addOutput(OutputHTML(self.OUTPUT, self.tr('Scatterplot')))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(
self.getParameterValue(self.INPUT))
xfieldname = self.getParameterValue(self.XFIELD)
yfieldname = self.getParameterValue(self.YFIELD)
output = self.getOutputValue(self.OUTPUT)
values = vector.values(layer, xfieldname, yfieldname)
plt.close()
plt.scatter(values[xfieldname], values[yfieldname])
plt.ylabel(yfieldname)
plt.xlabel(xfieldname)
plotFilename = output + '.png'
lab.savefig(plotFilename)
f = open(output, 'w')
f.write('<html><img src="' + plotFilename + '"/></html>')
f.close()
|
gpl-2.0
|
khkaminska/scikit-learn
|
examples/svm/plot_oneclass.py
|
249
|
2302
|
"""
==========================================
One-class SVM with non-linear kernel (RBF)
==========================================
An example using a one-class SVM for novelty detection.
:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised
algorithm that learns a decision function for novelty detection:
classifying new data as similar or different to the training set.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Novelty Detection")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.Blues_r)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='red')
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='orange')
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white')
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='green')
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='red')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.xlabel(
"error train: %d/200 ; errors novel regular: %d/40 ; "
"errors novel abnormal: %d/40"
% (n_error_train, n_error_test, n_error_outliers))
plt.show()
|
bsd-3-clause
|
IshankGulati/scikit-learn
|
examples/cluster/plot_segmentation_toy.py
|
91
|
3522
|
"""
===========================================
Spectral clustering for image segmentation
===========================================
In this example, an image with connected circles is generated and
spectral clustering is used to separate the circles.
In these settings, the :ref:`spectral_clustering` approach solves the problem
know as 'normalized graph cuts': the image is seen as a graph of
connected voxels, and the spectral clustering algorithm amounts to
choosing graph cuts defining regions while minimizing the ratio of the
gradient along the cut, and the volume of the region.
As the algorithm tries to balance the volume (ie balance the region
sizes), if we take circles with different sizes, the segmentation fails.
In addition, as there is no useful information in the intensity of the image,
or its gradient, we choose to perform the spectral clustering on a graph
that is only weakly informed by the gradient. This is close to performing
a Voronoi partition of the graph.
In addition, we use the mask of the objects to restrict the graph to the
outline of the objects. In this example, we are interested in
separating the objects one from the other, and not from the background.
"""
print(__doc__)
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
###############################################################################
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
###############################################################################
# 4 circles
img = circle1 + circle2 + circle3 + circle4
# We use a mask that limits to the foreground: the problem that we are
# interested in here is not separating the objects from the background,
# but separating them one from the other.
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependent from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
###############################################################################
# 2 circles
img = circle1 + circle2
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
graph = image.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
plt.show()
|
bsd-3-clause
|
jmmease/pandas
|
pandas/tests/io/parser/dtypes.py
|
2
|
15223
|
# -*- coding: utf-8 -*-
"""
Tests dtype specification during parsing
for all of the parsers defined in parsers.py
"""
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series, Index, MultiIndex, Categorical
from pandas.compat import StringIO
from pandas.core.dtypes.dtypes import CategoricalDtype
from pandas.errors import ParserWarning
class DtypeTests(object):
def test_passing_dtype(self):
# see gh-6607
df = DataFrame(np.random.rand(5, 2).round(4), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# see gh-3795: passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
expected = df.astype(str)
tm.assert_frame_equal(result, expected)
# for parsing, interpret object as str
result = self.read_csv(path, dtype=object, index_col=0)
tm.assert_frame_equal(result, expected)
# we expect all object columns, so need to
# convert to test for equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
pytest.raises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# see gh-12048: empty frame
actual = self.read_csv(StringIO('A,B'), dtype=str)
expected = DataFrame({'A': [], 'B': []}, index=[], dtype=str)
tm.assert_frame_equal(actual, expected)
def test_pass_dtype(self):
data = """\
one,two
1,2.5
2,3.5
3,4.5
4,5.5"""
result = self.read_csv(StringIO(data), dtype={'one': 'u1', 1: 'S1'})
assert result['one'].dtype == 'u1'
assert result['two'].dtype == 'object'
def test_categorical_dtype(self):
# GH 10153
data = """a,b,c
1,a,3.4
1,a,3.4
2,b,4.5"""
expected = pd.DataFrame({'a': Categorical(['1', '1', '2']),
'b': Categorical(['a', 'a', 'b']),
'c': Categorical(['3.4', '3.4', '4.5'])})
actual = self.read_csv(StringIO(data), dtype='category')
tm.assert_frame_equal(actual, expected)
actual = self.read_csv(StringIO(data), dtype=CategoricalDtype())
tm.assert_frame_equal(actual, expected)
actual = self.read_csv(StringIO(data), dtype={'a': 'category',
'b': 'category',
'c': CategoricalDtype()})
tm.assert_frame_equal(actual, expected)
actual = self.read_csv(StringIO(data), dtype={'b': 'category'})
expected = pd.DataFrame({'a': [1, 1, 2],
'b': Categorical(['a', 'a', 'b']),
'c': [3.4, 3.4, 4.5]})
tm.assert_frame_equal(actual, expected)
actual = self.read_csv(StringIO(data), dtype={1: 'category'})
tm.assert_frame_equal(actual, expected)
# unsorted
data = """a,b,c
1,b,3.4
1,b,3.4
2,a,4.5"""
expected = pd.DataFrame({'a': Categorical(['1', '1', '2']),
'b': Categorical(['b', 'b', 'a']),
'c': Categorical(['3.4', '3.4', '4.5'])})
actual = self.read_csv(StringIO(data), dtype='category')
tm.assert_frame_equal(actual, expected)
# missing
data = """a,b,c
1,b,3.4
1,nan,3.4
2,a,4.5"""
expected = pd.DataFrame({'a': Categorical(['1', '1', '2']),
'b': Categorical(['b', np.nan, 'a']),
'c': Categorical(['3.4', '3.4', '4.5'])})
actual = self.read_csv(StringIO(data), dtype='category')
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_encoding(self):
# GH 10153
pth = tm.get_data_path('unicode_series.csv')
encoding = 'latin-1'
expected = self.read_csv(pth, header=None, encoding=encoding)
expected[1] = Categorical(expected[1])
actual = self.read_csv(pth, header=None, encoding=encoding,
dtype={1: 'category'})
tm.assert_frame_equal(actual, expected)
pth = tm.get_data_path('utf16_ex.txt')
encoding = 'utf-16'
expected = self.read_table(pth, encoding=encoding)
expected = expected.apply(Categorical)
actual = self.read_table(pth, encoding=encoding, dtype='category')
tm.assert_frame_equal(actual, expected)
def test_categorical_dtype_chunksize(self):
# GH 10153
data = """a,b
1,a
1,b
1,b
2,c"""
expecteds = [pd.DataFrame({'a': [1, 1],
'b': Categorical(['a', 'b'])}),
pd.DataFrame({'a': [1, 2],
'b': Categorical(['b', 'c'])},
index=[2, 3])]
actuals = self.read_csv(StringIO(data), dtype={'b': 'category'},
chunksize=2)
for actual, expected in zip(actuals, expecteds):
tm.assert_frame_equal(actual, expected)
@pytest.mark.parametrize('ordered', [False, True])
@pytest.mark.parametrize('categories', [
['a', 'b', 'c'],
['a', 'c', 'b'],
['a', 'b', 'c', 'd'],
['c', 'b', 'a'],
])
def test_categorical_categoricaldtype(self, categories, ordered):
data = """a,b
1,a
1,b
1,b
2,c"""
expected = pd.DataFrame({
"a": [1, 1, 1, 2],
"b": Categorical(['a', 'b', 'b', 'c'],
categories=categories,
ordered=ordered)
})
dtype = {"b": CategoricalDtype(categories=categories,
ordered=ordered)}
result = self.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_categoricaldtype_unsorted(self):
data = """a,b
1,a
1,b
1,b
2,c"""
dtype = CategoricalDtype(['c', 'b', 'a'])
expected = pd.DataFrame({
'a': [1, 1, 1, 2],
'b': Categorical(['a', 'b', 'b', 'c'], categories=['c', 'b', 'a'])
})
result = self.read_csv(StringIO(data), dtype={'b': dtype})
tm.assert_frame_equal(result, expected)
def test_categoricaldtype_coerces_numeric(self):
dtype = {'b': CategoricalDtype([1, 2, 3])}
data = "b\n1\n1\n2\n3"
expected = pd.DataFrame({'b': Categorical([1, 1, 2, 3])})
result = self.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categoricaldtype_coerces_datetime(self):
dtype = {
'b': CategoricalDtype(pd.date_range('2017', '2019', freq='AS'))
}
data = "b\n2017-01-01\n2018-01-01\n2019-01-01"
expected = pd.DataFrame({'b': Categorical(dtype['b'].categories)})
result = self.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
dtype = {
'b': CategoricalDtype([pd.Timestamp("2014")])
}
data = "b\n2014-01-01\n2014-01-01T00:00:00"
expected = pd.DataFrame({'b': Categorical([pd.Timestamp('2014')] * 2)})
result = self.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categoricaldtype_coerces_timedelta(self):
dtype = {'b': CategoricalDtype(pd.to_timedelta(['1H', '2H', '3H']))}
data = "b\n1H\n2H\n3H"
expected = pd.DataFrame({'b': Categorical(dtype['b'].categories)})
result = self.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categoricaldtype_unexpected_categories(self):
dtype = {'b': CategoricalDtype(['a', 'b', 'd', 'e'])}
data = "b\nd\na\nc\nd" # Unexpected c
expected = pd.DataFrame({"b": Categorical(list('dacd'),
dtype=dtype['b'])})
result = self.read_csv(StringIO(data), dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_categorical_categoricaldtype_chunksize(self):
# GH 10153
data = """a,b
1,a
1,b
1,b
2,c"""
cats = ['a', 'b', 'c']
expecteds = [pd.DataFrame({'a': [1, 1],
'b': Categorical(['a', 'b'],
categories=cats)}),
pd.DataFrame({'a': [1, 2],
'b': Categorical(['b', 'c'],
categories=cats)},
index=[2, 3])]
dtype = CategoricalDtype(cats)
actuals = self.read_csv(StringIO(data), dtype={'b': dtype},
chunksize=2)
for actual, expected in zip(actuals, expecteds):
tm.assert_frame_equal(actual, expected)
def test_empty_pass_dtype(self):
data = 'one,two'
result = self.read_csv(StringIO(data), dtype={'one': 'u1'})
expected = DataFrame({'one': np.empty(0, dtype='u1'),
'two': np.empty(0, dtype=np.object)})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_index_pass_dtype(self):
data = 'one,two'
result = self.read_csv(StringIO(data), index_col=['one'],
dtype={'one': 'u1', 1: 'f'})
expected = DataFrame({'two': np.empty(0, dtype='f')},
index=Index([], dtype='u1', name='one'))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_multiindex_pass_dtype(self):
data = 'one,two,three'
result = self.read_csv(StringIO(data), index_col=['one', 'two'],
dtype={'one': 'u1', 1: 'f8'})
exp_idx = MultiIndex.from_arrays([np.empty(0, dtype='u1'),
np.empty(0, dtype='O')],
names=['one', 'two'])
expected = DataFrame(
{'three': np.empty(0, dtype=np.object)}, index=exp_idx)
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_mangled_column_pass_dtype_by_names(self):
data = 'one,one'
result = self.read_csv(StringIO(data), dtype={
'one': 'u1', 'one.1': 'f'})
expected = DataFrame(
{'one': np.empty(0, dtype='u1'), 'one.1': np.empty(0, dtype='f')})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_mangled_column_pass_dtype_by_indexes(self):
data = 'one,one'
result = self.read_csv(StringIO(data), dtype={0: 'u1', 1: 'f'})
expected = DataFrame(
{'one': np.empty(0, dtype='u1'), 'one.1': np.empty(0, dtype='f')})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_dup_column_pass_dtype_by_indexes(self):
# see gh-9424
expected = pd.concat([Series([], name='one', dtype='u1'),
Series([], name='one.1', dtype='f')], axis=1)
data = 'one,one'
result = self.read_csv(StringIO(data), dtype={0: 'u1', 1: 'f'})
tm.assert_frame_equal(result, expected, check_index_type=False)
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
data = ''
result = self.read_csv(StringIO(data), names=['one', 'one'],
dtype={0: 'u1', 1: 'f'})
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_raise_on_passed_int_dtype_with_nas(self):
# see gh-2631
data = """YEAR, DOY, a
2001,106380451,10
2001,,11
2001,106380451,67"""
pytest.raises(ValueError, self.read_csv, StringIO(data),
sep=",", skipinitialspace=True,
dtype={'DOY': np.int64})
def test_dtype_with_converter(self):
data = """a,b
1.1,2.2
1.2,2.3"""
# dtype spec ignored if converted specified
with tm.assert_produces_warning(ParserWarning):
result = self.read_csv(StringIO(data), dtype={'a': 'i8'},
converters={'a': lambda x: str(x)})
expected = DataFrame({'a': ['1.1', '1.2'], 'b': [2.2, 2.3]})
tm.assert_frame_equal(result, expected)
def test_empty_dtype(self):
# see gh-14712
data = 'a,b'
expected = pd.DataFrame(columns=['a', 'b'], dtype=np.float64)
result = self.read_csv(StringIO(data), header=0, dtype=np.float64)
tm.assert_frame_equal(result, expected)
expected = pd.DataFrame({'a': pd.Categorical([]),
'b': pd.Categorical([])},
index=[])
result = self.read_csv(StringIO(data), header=0,
dtype='category')
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), header=0,
dtype={'a': 'category', 'b': 'category'})
tm.assert_frame_equal(result, expected)
expected = pd.DataFrame(columns=['a', 'b'], dtype='datetime64[ns]')
result = self.read_csv(StringIO(data), header=0,
dtype='datetime64[ns]')
tm.assert_frame_equal(result, expected)
expected = pd.DataFrame({'a': pd.Series([], dtype='timedelta64[ns]'),
'b': pd.Series([], dtype='timedelta64[ns]')},
index=[])
result = self.read_csv(StringIO(data), header=0,
dtype='timedelta64[ns]')
tm.assert_frame_equal(result, expected)
expected = pd.DataFrame(columns=['a', 'b'])
expected['a'] = expected['a'].astype(np.float64)
result = self.read_csv(StringIO(data), header=0,
dtype={'a': np.float64})
tm.assert_frame_equal(result, expected)
expected = pd.DataFrame(columns=['a', 'b'])
expected['a'] = expected['a'].astype(np.float64)
result = self.read_csv(StringIO(data), header=0,
dtype={0: np.float64})
tm.assert_frame_equal(result, expected)
expected = pd.DataFrame(columns=['a', 'b'])
expected['a'] = expected['a'].astype(np.int32)
expected['b'] = expected['b'].astype(np.float64)
result = self.read_csv(StringIO(data), header=0,
dtype={'a': np.int32, 1: np.float64})
tm.assert_frame_equal(result, expected)
def test_numeric_dtype(self):
data = '0\n1'
for dt in np.typecodes['AllInteger'] + np.typecodes['Float']:
expected = pd.DataFrame([0, 1], dtype=dt)
result = self.read_csv(StringIO(data), header=None, dtype=dt)
tm.assert_frame_equal(expected, result)
|
bsd-3-clause
|
Clyde-fare/scikit-learn
|
examples/datasets/plot_iris_dataset.py
|
283
|
1928
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
|
bsd-3-clause
|
IndraVikas/scikit-learn
|
examples/applications/plot_outlier_detection_housing.py
|
243
|
5577
|
"""
====================================
Outlier detection on a real data set
====================================
This example illustrates the need for robust covariance estimation
on a real data set. It is useful both for outlier detection and for
a better understanding of the data structure.
We selected two sets of two variables from the Boston housing data set
as an illustration of what kind of analysis can be done with several
outlier detection tools. For the purpose of visualization, we are working
with two-dimensional examples, but one should be aware that things are
not so trivial in high-dimension, as it will be pointed out.
In both examples below, the main result is that the empirical covariance
estimate, as a non-robust one, is highly influenced by the heterogeneous
structure of the observations. Although the robust covariance estimate is
able to focus on the main mode of the data distribution, it sticks to the
assumption that the data should be Gaussian distributed, yielding some biased
estimation of the data structure, but yet accurate to some extent.
The One-Class SVM algorithm
First example
-------------
The first example illustrates how robust covariance estimation can help
concentrating on a relevant cluster when another one exists. Here, many
observations are confounded into one and break down the empirical covariance
estimation.
Of course, some screening tools would have pointed out the presence of two
clusters (Support Vector Machines, Gaussian Mixture Models, univariate
outlier detection, ...). But had it been a high-dimensional example, none
of these could be applied that easily.
Second example
--------------
The second example shows the ability of the Minimum Covariance Determinant
robust estimator of covariance to concentrate on the main mode of the data
distribution: the location seems to be well estimated, although the covariance
is hard to estimate due to the banana-shaped distribution. Anyway, we can
get rid of some outlying observations.
The One-Class SVM is able to capture the real data structure, but the
difficulty is to adjust its kernel bandwidth parameter so as to obtain
a good compromise between the shape of the data scatter matrix and the
risk of over-fitting the data.
"""
print(__doc__)
# Author: Virgile Fritsch <virgile.fritsch@inria.fr>
# License: BSD 3 clause
import numpy as np
from sklearn.covariance import EllipticEnvelope
from sklearn.svm import OneClassSVM
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.datasets import load_boston
# Get data
X1 = load_boston()['data'][:, [8, 10]] # two clusters
X2 = load_boston()['data'][:, [5, 12]] # "banana"-shaped
# Define "classifiers" to be used
classifiers = {
"Empirical Covariance": EllipticEnvelope(support_fraction=1.,
contamination=0.261),
"Robust Covariance (Minimum Covariance Determinant)":
EllipticEnvelope(contamination=0.261),
"OCSVM": OneClassSVM(nu=0.261, gamma=0.05)}
colors = ['m', 'g', 'b']
legend1 = {}
legend2 = {}
# Learn a frontier for outlier detection with several classifiers
xx1, yy1 = np.meshgrid(np.linspace(-8, 28, 500), np.linspace(3, 40, 500))
xx2, yy2 = np.meshgrid(np.linspace(3, 10, 500), np.linspace(-5, 45, 500))
for i, (clf_name, clf) in enumerate(classifiers.items()):
plt.figure(1)
clf.fit(X1)
Z1 = clf.decision_function(np.c_[xx1.ravel(), yy1.ravel()])
Z1 = Z1.reshape(xx1.shape)
legend1[clf_name] = plt.contour(
xx1, yy1, Z1, levels=[0], linewidths=2, colors=colors[i])
plt.figure(2)
clf.fit(X2)
Z2 = clf.decision_function(np.c_[xx2.ravel(), yy2.ravel()])
Z2 = Z2.reshape(xx2.shape)
legend2[clf_name] = plt.contour(
xx2, yy2, Z2, levels=[0], linewidths=2, colors=colors[i])
legend1_values_list = list( legend1.values() )
legend1_keys_list = list( legend1.keys() )
# Plot the results (= shape of the data points cloud)
plt.figure(1) # two clusters
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X1[:, 0], X1[:, 1], color='black')
bbox_args = dict(boxstyle="round", fc="0.8")
arrow_args = dict(arrowstyle="->")
plt.annotate("several confounded points", xy=(24, 19),
xycoords="data", textcoords="data",
xytext=(13, 10), bbox=bbox_args, arrowprops=arrow_args)
plt.xlim((xx1.min(), xx1.max()))
plt.ylim((yy1.min(), yy1.max()))
plt.legend((legend1_values_list[0].collections[0],
legend1_values_list[1].collections[0],
legend1_values_list[2].collections[0]),
(legend1_keys_list[0], legend1_keys_list[1], legend1_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("accessibility to radial highways")
plt.xlabel("pupil-teacher ratio by town")
legend2_values_list = list( legend2.values() )
legend2_keys_list = list( legend2.keys() )
plt.figure(2) # "banana" shape
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X2[:, 0], X2[:, 1], color='black')
plt.xlim((xx2.min(), xx2.max()))
plt.ylim((yy2.min(), yy2.max()))
plt.legend((legend2_values_list[0].collections[0],
legend2_values_list[1].collections[0],
legend2_values_list[2].collections[0]),
(legend2_values_list[0], legend2_values_list[1], legend2_values_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("% lower status of the population")
plt.xlabel("average number of rooms per dwelling")
plt.show()
|
bsd-3-clause
|
plissonf/scikit-learn
|
benchmarks/bench_random_projections.py
|
397
|
8900
|
"""
===========================
Random projection benchmark
===========================
Benchmarks for random projections.
"""
from __future__ import division
from __future__ import print_function
import gc
import sys
import optparse
from datetime import datetime
import collections
import numpy as np
import scipy.sparse as sp
from sklearn import clone
from sklearn.externals.six.moves import xrange
from sklearn.random_projection import (SparseRandomProjection,
GaussianRandomProjection,
johnson_lindenstrauss_min_dim)
def type_auto_or_float(val):
if val == "auto":
return "auto"
else:
return float(val)
def type_auto_or_int(val):
if val == "auto":
return "auto"
else:
return int(val)
def compute_time(t_start, delta):
mu_second = 0.0 + 10 ** 6 # number of microseconds in a second
return delta.seconds + delta.microseconds / mu_second
def bench_scikit_transformer(X, transfomer):
gc.collect()
clf = clone(transfomer)
# start time
t_start = datetime.now()
clf.fit(X)
delta = (datetime.now() - t_start)
# stop time
time_to_fit = compute_time(t_start, delta)
# start time
t_start = datetime.now()
clf.transform(X)
delta = (datetime.now() - t_start)
# stop time
time_to_transform = compute_time(t_start, delta)
return time_to_fit, time_to_transform
# Make some random data with uniformly located non zero entries with
# Gaussian distributed values
def make_sparse_random_data(n_samples, n_features, n_nonzeros,
random_state=None):
rng = np.random.RandomState(random_state)
data_coo = sp.coo_matrix(
(rng.randn(n_nonzeros),
(rng.randint(n_samples, size=n_nonzeros),
rng.randint(n_features, size=n_nonzeros))),
shape=(n_samples, n_features))
return data_coo.toarray(), data_coo.tocsr()
def print_row(clf_type, time_fit, time_transform):
print("%s | %s | %s" % (clf_type.ljust(30),
("%.4fs" % time_fit).center(12),
("%.4fs" % time_transform).center(12)))
if __name__ == "__main__":
###########################################################################
# Option parser
###########################################################################
op = optparse.OptionParser()
op.add_option("--n-times",
dest="n_times", default=5, type=int,
help="Benchmark results are average over n_times experiments")
op.add_option("--n-features",
dest="n_features", default=10 ** 4, type=int,
help="Number of features in the benchmarks")
op.add_option("--n-components",
dest="n_components", default="auto",
help="Size of the random subspace."
" ('auto' or int > 0)")
op.add_option("--ratio-nonzeros",
dest="ratio_nonzeros", default=10 ** -3, type=float,
help="Number of features in the benchmarks")
op.add_option("--n-samples",
dest="n_samples", default=500, type=int,
help="Number of samples in the benchmarks")
op.add_option("--random-seed",
dest="random_seed", default=13, type=int,
help="Seed used by the random number generators.")
op.add_option("--density",
dest="density", default=1 / 3,
help="Density used by the sparse random projection."
" ('auto' or float (0.0, 1.0]")
op.add_option("--eps",
dest="eps", default=0.5, type=float,
help="See the documentation of the underlying transformers.")
op.add_option("--transformers",
dest="selected_transformers",
default='GaussianRandomProjection,SparseRandomProjection',
type=str,
help="Comma-separated list of transformer to benchmark. "
"Default: %default. Available: "
"GaussianRandomProjection,SparseRandomProjection")
op.add_option("--dense",
dest="dense",
default=False,
action="store_true",
help="Set input space as a dense matrix.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
opts.n_components = type_auto_or_int(opts.n_components)
opts.density = type_auto_or_float(opts.density)
selected_transformers = opts.selected_transformers.split(',')
###########################################################################
# Generate dataset
###########################################################################
n_nonzeros = int(opts.ratio_nonzeros * opts.n_features)
print('Dataset statics')
print("===========================")
print('n_samples \t= %s' % opts.n_samples)
print('n_features \t= %s' % opts.n_features)
if opts.n_components == "auto":
print('n_components \t= %s (auto)' %
johnson_lindenstrauss_min_dim(n_samples=opts.n_samples,
eps=opts.eps))
else:
print('n_components \t= %s' % opts.n_components)
print('n_elements \t= %s' % (opts.n_features * opts.n_samples))
print('n_nonzeros \t= %s per feature' % n_nonzeros)
print('ratio_nonzeros \t= %s' % opts.ratio_nonzeros)
print('')
###########################################################################
# Set transformer input
###########################################################################
transformers = {}
###########################################################################
# Set GaussianRandomProjection input
gaussian_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed
}
transformers["GaussianRandomProjection"] = \
GaussianRandomProjection(**gaussian_matrix_params)
###########################################################################
# Set SparseRandomProjection input
sparse_matrix_params = {
"n_components": opts.n_components,
"random_state": opts.random_seed,
"density": opts.density,
"eps": opts.eps,
}
transformers["SparseRandomProjection"] = \
SparseRandomProjection(**sparse_matrix_params)
###########################################################################
# Perform benchmark
###########################################################################
time_fit = collections.defaultdict(list)
time_transform = collections.defaultdict(list)
print('Benchmarks')
print("===========================")
print("Generate dataset benchmarks... ", end="")
X_dense, X_sparse = make_sparse_random_data(opts.n_samples,
opts.n_features,
n_nonzeros,
random_state=opts.random_seed)
X = X_dense if opts.dense else X_sparse
print("done")
for name in selected_transformers:
print("Perform benchmarks for %s..." % name)
for iteration in xrange(opts.n_times):
print("\titer %s..." % iteration, end="")
time_to_fit, time_to_transform = bench_scikit_transformer(X_dense,
transformers[name])
time_fit[name].append(time_to_fit)
time_transform[name].append(time_to_transform)
print("done")
print("")
###########################################################################
# Print results
###########################################################################
print("Script arguments")
print("===========================")
arguments = vars(opts)
print("%s \t | %s " % ("Arguments".ljust(16),
"Value".center(12),))
print(25 * "-" + ("|" + "-" * 14) * 1)
for key, value in arguments.items():
print("%s \t | %s " % (str(key).ljust(16),
str(value).strip().center(12)))
print("")
print("Transformer performance:")
print("===========================")
print("Results are averaged over %s repetition(s)." % opts.n_times)
print("")
print("%s | %s | %s" % ("Transformer".ljust(30),
"fit".center(12),
"transform".center(12)))
print(31 * "-" + ("|" + "-" * 14) * 2)
for name in sorted(selected_transformers):
print_row(name,
np.mean(time_fit[name]),
np.mean(time_transform[name]))
print("")
print("")
|
bsd-3-clause
|
blankenberg/tools-iuc
|
tools/table_compute/scripts/safety.py
|
17
|
9977
|
import re
class Safety():
"""
Class to safely evaluate mathematical expression on single
or table data
"""
__allowed_tokens = (
'(', ')', 'if', 'else', 'or', 'and', 'not', 'in',
'+', '-', '*', '/', '%', ',', '!=', '==', '>', '>=', '<', '<=',
'min', 'max', 'sum',
)
__allowed_ref_types = {
'pd.DataFrame': {
'abs', 'add', 'agg', 'aggregate', 'align', 'all', 'any', 'append',
'apply', 'applymap', 'as_matrix', 'asfreq', 'at', 'axes', 'bool',
'clip', 'clip_lower', 'clip_upper', 'columns', 'combine',
'compound', 'corr', 'count', 'cov', 'cummax', 'cummin', 'cumprod',
'cumsum', 'describe', 'div', 'divide', 'dot', 'drop',
'drop_duplicates', 'droplevel', 'dropna', 'duplicated', 'empty',
'eq', 'equals', 'expanding', 'ffill', 'fillna', 'filter', 'first',
'first_valid_index', 'floordiv', 'ge', 'groupby', 'gt', 'head',
'iat', 'iloc', 'index', 'insert', 'interpolate', 'isin', 'isna',
'isnull', 'items', 'iteritems', 'iterrows', 'itertuples', 'ix',
'join', 'keys', 'kurt', 'kurtosis', 'last', 'last_valid_index',
'le', 'loc', 'lookup', 'lt', 'mad', 'mask', 'max', 'mean',
'median', 'melt', 'merge', 'min', 'mod', 'mode', 'mul', 'multiply',
'ndim', 'ne', 'nlargest', 'notna', 'notnull', 'nsmallest',
'nunique', 'pct_change', 'pivot', 'pivot_table', 'pop', 'pow',
'prod', 'product', 'quantile', 'radd', 'rank', 'rdiv', 'replace',
'resample', 'rfloordiv', 'rmod', 'rmul', 'rolling', 'round',
'rpow', 'rsub', 'rtruediv', 'sample', 'select',
'sem', 'shape', 'shift', 'size', 'skew', 'slice_shift',
'squeeze', 'stack', 'std', 'sub', 'subtract', 'sum', 'swapaxes',
'swaplevel', 'T', 'tail', 'take', 'transform', 'transpose',
'truediv', 'truncate', 'tshift', 'unstack', 'var', 'where',
},
'pd.Series': {
'abs', 'add', 'agg', 'aggregate', 'align', 'all', 'any', 'append',
'apply', 'argsort', 'as_matrix', 'asfreq', 'asof', 'astype', 'at',
'at_time', 'autocorr', 'axes', 'between', 'between_time', 'bfill',
'bool', 'cat', 'clip', 'clip_lower', 'clip_upper', 'combine',
'combine_first', 'compound', 'corr', 'count', 'cov', 'cummax',
'cummin', 'cumprod', 'cumsum', 'describe', 'diff', 'div', 'divide',
'divmod', 'dot', 'drop', 'drop_duplicates', 'droplevel', 'dropna',
'dt', 'dtype', 'dtypes', 'duplicated', 'empty', 'eq', 'equals',
'ewm', 'expanding', 'factorize', 'ffill', 'fillna', 'filter',
'first', 'first_valid_index', 'flags', 'floordiv', 'ge', 'groupby',
'gt', 'hasnans', 'head', 'iat', 'idxmax', 'idxmin', 'iloc', 'imag',
'index', 'interpolate', 'is_monotonic', 'is_monotonic_decreasing',
'is_monotonic_increasing', 'is_unique', 'isin', 'isna', 'isnull',
'item', 'items', 'iteritems', 'ix', 'keys', 'kurt', 'kurtosis',
'last', 'last_valid_index', 'le', 'loc', 'lt', 'mad', 'map',
'mask', 'max', 'mean', 'median', 'min', 'mod', 'mode', 'mul',
'multiply', 'name', 'ndim', 'ne', 'nlargest', 'nonzero', 'notna',
'notnull', 'nsmallest', 'nunique', 'pct_change', 'pop', 'pow',
'prod', 'product', 'ptp', 'quantile', 'radd', 'rank', 'rdiv',
'rdivmod', 'real', 'repeat', 'replace', 'resample', 'rfloordiv',
'rmod', 'rmul', 'rolling', 'round', 'rpow', 'rsub', 'rtruediv',
'sample', 'searchsorted', 'select', 'sem', 'shape', 'shift',
'size', 'skew', 'slice_shift', 'sort_index', 'sort_values',
'squeeze', 'std', 'sub', 'subtract', 'sum', 'swapaxes',
'swaplevel', 'T', 'tail', 'take', 'transform', 'transpose',
'truediv', 'truncate', 'tshift', 'unique', 'unstack',
'value_counts', 'var', 'where', 'xs',
},
}
__allowed_qualified = {
# allowed numpy functionality
'np': {
'abs', 'add', 'all', 'any', 'append', 'array', 'bool', 'ceil',
'complex', 'cos', 'cosh', 'cov', 'cumprod', 'cumsum', 'degrees',
'divide', 'divmod', 'dot', 'e', 'empty', 'exp', 'float', 'floor',
'hypot', 'inf', 'int', 'isfinite', 'isin', 'isinf', 'isnan', 'log',
'log10', 'log2', 'max', 'mean', 'median', 'min', 'mod', 'multiply',
'nan', 'ndim', 'pi', 'product', 'quantile', 'radians', 'rank',
'remainder', 'round', 'sin', 'sinh', 'size', 'sqrt', 'squeeze',
'stack', 'std', 'str', 'subtract', 'sum', 'swapaxes', 'take',
'tan', 'tanh', 'transpose', 'unique', 'var', 'where',
},
# allowed math functionality
'math': {
'acos', 'acosh', 'asin', 'asinh', 'atan', 'atan2', 'atanh', 'ceil',
'copysign', 'cos', 'cosh', 'degrees', 'e', 'erf', 'erfc', 'exp',
'expm1', 'fabs', 'factorial', 'floor', 'fmod', 'frexp', 'fsum',
'gamma', 'gcd', 'hypot', 'inf', 'isclose', 'isfinite', 'isinf',
'isnan', 'ldexp', 'lgamma', 'log', 'log10', 'log1p', 'log2',
'modf', 'nan', 'pi', 'pow', 'radians', 'remainder', 'sin', 'sinh',
'sqrt', 'tan', 'tanh', 'tau', 'trunc',
},
# allowed pd functionality
'pd': {
'DataFrame', 'array', 'concat', 'cut', 'date_range', 'factorize',
'interval_range', 'isna', 'isnull', 'melt', 'merge', 'notna',
'notnull', 'period_range', 'pivot', 'pivot_table', 'unique',
'value_counts', 'wide_to_long',
},
}
def __init__(self, expression,
ref_whitelist=None, ref_type=None,
custom_qualified=None):
self.allowed_qualified = self.__allowed_qualified.copy()
if ref_whitelist is None:
self.these = []
else:
self.these = ref_whitelist
if ref_type is None or ref_type not in self.__allowed_ref_types:
self.allowed_qualified['_this'] = set()
else:
self.allowed_qualified[
'_this'
] = self.__allowed_ref_types[ref_type]
if custom_qualified is not None:
self.allowed_qualified.update(custom_qualified)
self.expr = expression
self.__assertSafe()
def generateFunction(self):
"Generates a function to be evaluated outside the class"
cust_fun = "def fun(%s):\n\treturn(%s)" % (self.these[0], self.expr)
return cust_fun
def __assertSafe(self):
indeed, problematic_token = self.__isSafeStatement()
if not indeed:
self.detailedExcuse(problematic_token)
raise ValueError("Custom Expression is not safe.")
@staticmethod
def detailedExcuse(word):
"Gives a verbose statement for why users should not use some specific operators."
mess = None
if word == "for":
mess = "for loops and comprehensions are not allowed. Use numpy or pandas table operations instead."
elif word == ":":
mess = "Colons are not allowed. Use inline Python if/else statements."
elif word == "=":
mess = "Variable assignment is not allowed. Use object methods to substitute values."
elif word in ("[", "]"):
mess = "Direct indexing of arrays is not allowed. Use numpy or pandas functions/methods to address specific parts of tables."
else:
mess = "Not an allowed token in this operation"
print("( '%s' ) %s" % (word, mess))
def __isSafeStatement(self):
"""
Determines if a user-expression is safe to evaluate.
To be considered safe an expression may contain only:
- standard Python operators and numbers
- inline conditional expressions
- select functions and objects
by default, these come from the math, numpy and pandas
libraries, and must be qualified with the modules' conventional
names math, np, pd; can be overridden at the instance level
- references to a whitelist of objects (pd.DataFrames by default)
and their methods
"""
safe = True
# examples of user-expressions
# '-math.log(1 - elem/4096) * 4096 if elem != 1 else elem - 0.5'
# 'vec.median() + vec.sum()'
# 1. Break expressions into tokens
# e.g.,
# [
# '-', 'math.log', '(', '1', '-', 'elem', '/', '4096', ')', '*',
# '4096', 'if', 'elem', '!=', '1', 'else', 'elem', '-', '0.5'
# ]
# or
# ['vec.median', '(', ')', '+', 'vec.sum', '(', ')']
tokens = [
e for e in re.split(
r'([a-zA-Z0-9_.]+|[^a-zA-Z0-9_.() ]+|[()])', self.expr
) if e.strip()
]
# 2. Subtract allowed standard tokens
rem = [e for e in tokens if e not in self.__allowed_tokens]
# 3. Subtract allowed qualified objects from allowed modules
# and whitelisted references and their attributes
rem2 = []
for e in rem:
parts = e.split('.')
if len(parts) == 1:
if parts[0] in self.these:
continue
if len(parts) == 2:
if parts[0] in self.these:
parts[0] = '_this'
if parts[0] in self.allowed_qualified:
if parts[1] in self.allowed_qualified[parts[0]]:
continue
rem2.append(e)
# 4. Assert that rest are real numbers or strings
e = ''
for e in rem2:
try:
_ = float(e)
except ValueError:
safe = False
break
return safe, e
|
mit
|
lorenzo-desantis/mne-python
|
mne/decoding/csp.py
|
6
|
21527
|
# Authors: Romain Trachel <trachelr@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Alexandre Barachant <alexandre.barachant@gmail.com>
#
# License: BSD (3-clause)
import copy as cp
import warnings
import numpy as np
from scipy import linalg
from .mixin import TransformerMixin
from ..cov import _regularized_covariance
class CSP(TransformerMixin):
"""M/EEG signal decomposition using the Common Spatial Patterns (CSP).
This object can be used as a supervised decomposition to estimate
spatial filters for feature extraction in a 2 class decoding problem.
See [1].
Parameters
----------
n_components : int (default 4)
The number of components to decompose M/EEG signals.
This number should be set by cross-validation.
reg : float | str | None (default None)
if not None, allow regularization for covariance estimation
if float, shrinkage covariance is used (0 <= shrinkage <= 1).
if str, optimal shrinkage using Ledoit-Wolf Shrinkage ('ledoit_wolf')
or Oracle Approximating Shrinkage ('oas').
log : bool (default True)
If true, apply log to standardize the features.
If false, features are just z-scored.
Attributes
----------
filters_ : ndarray, shape (n_channels, n_channels)
If fit, the CSP components used to decompose the data, else None.
patterns_ : ndarray, shape (n_channels, n_channels)
If fit, the CSP patterns used to restore M/EEG signals, else None.
mean_ : ndarray, shape (n_channels,)
If fit, the mean squared power for each component.
std_ : ndarray, shape (n_channels,)
If fit, the std squared power for each component.
References
----------
[1] Zoltan J. Koles. The quantitative extraction and topographic mapping
of the abnormal components in the clinical EEG. Electroencephalography
and Clinical Neurophysiology, 79(6):440--447, December 1991.
"""
def __init__(self, n_components=4, reg=None, log=True):
"""Init of CSP."""
self.n_components = n_components
if reg == 'lws':
warnings.warn('`lws` has been deprecated for the `reg`'
' argument. It will be removed in 0.11.'
' Use `ledoit_wolf` instead.', DeprecationWarning)
reg = 'ledoit_wolf'
self.reg = reg
self.log = log
self.filters_ = None
self.patterns_ = None
self.mean_ = None
self.std_ = None
def fit(self, epochs_data, y):
"""Estimate the CSP decomposition on epochs.
Parameters
----------
epochs_data : ndarray, shape (n_epochs, n_channels, n_times)
The data to estimate the CSP on.
y : array, shape (n_epochs,)
The class for each epoch.
Returns
-------
self : instance of CSP
Returns the modified instance.
"""
if not isinstance(epochs_data, np.ndarray):
raise ValueError("epochs_data should be of type ndarray (got %s)."
% type(epochs_data))
epochs_data = np.atleast_3d(epochs_data)
# check number of epochs
if epochs_data.shape[0] != len(y):
raise ValueError("n_epochs must be the same for epochs_data and y")
classes = np.unique(y)
if len(classes) != 2:
raise ValueError("More than two different classes in the data.")
# concatenate epochs
class_1 = np.transpose(epochs_data[y == classes[0]],
[1, 0, 2]).reshape(epochs_data.shape[1], -1)
class_2 = np.transpose(epochs_data[y == classes[1]],
[1, 0, 2]).reshape(epochs_data.shape[1], -1)
cov_1 = _regularized_covariance(class_1, reg=self.reg)
cov_2 = _regularized_covariance(class_2, reg=self.reg)
# then fit on covariance
self._fit(cov_1, cov_2)
pick_filters = self.filters_[:self.n_components]
X = np.asarray([np.dot(pick_filters, e) for e in epochs_data])
# compute features (mean band power)
X = (X ** 2).mean(axis=-1)
# To standardize features
self.mean_ = X.mean(axis=0)
self.std_ = X.std(axis=0)
return self
def _fit(self, cov_a, cov_b):
"""Aux Function (modifies cov_a and cov_b in-place)."""
cov_a /= np.trace(cov_a)
cov_b /= np.trace(cov_b)
# computes the eigen values
lambda_, u = linalg.eigh(cov_a + cov_b)
# sort them
ind = np.argsort(lambda_)[::-1]
lambda2_ = lambda_[ind]
u = u[:, ind]
p = np.dot(np.sqrt(linalg.pinv(np.diag(lambda2_))), u.T)
# Compute the generalized eigen value problem
w_a = np.dot(np.dot(p, cov_a), p.T)
w_b = np.dot(np.dot(p, cov_b), p.T)
# and solve it
vals, vecs = linalg.eigh(w_a, w_b)
# sort vectors by discriminative power using eigen values
ind = np.argsort(np.maximum(vals, 1. / vals))[::-1]
vecs = vecs[:, ind]
# and project
w = np.dot(vecs.T, p)
self.filters_ = w
self.patterns_ = linalg.pinv(w).T
def transform(self, epochs_data, y=None):
"""Estimate epochs sources given the CSP filters.
Parameters
----------
epochs_data : array, shape (n_epochs, n_channels, n_times)
The data.
y : None
Not used.
Returns
-------
X : ndarray of shape (n_epochs, n_sources)
The CSP features averaged over time.
"""
if not isinstance(epochs_data, np.ndarray):
raise ValueError("epochs_data should be of type ndarray (got %s)."
% type(epochs_data))
if self.filters_ is None:
raise RuntimeError('No filters available. Please first fit CSP '
'decomposition.')
pick_filters = self.filters_[:self.n_components]
X = np.asarray([np.dot(pick_filters, e) for e in epochs_data])
# compute features (mean band power)
X = (X ** 2).mean(axis=-1)
if self.log:
X = np.log(X)
else:
X -= self.mean_
X /= self.std_
return X
def plot_patterns(self, info, components=None, ch_type=None, layout=None,
vmin=None, vmax=None, cmap='RdBu_r', sensors=True,
colorbar=True, scale=None, scale_time=1, unit=None,
res=64, size=1, cbar_fmt='%3.1f',
name_format='CSP%01d', proj=False, show=True,
show_names=False, title=None, mask=None,
mask_params=None, outlines='head', contours=6,
image_interp='bilinear', average=None, head_pos=None):
"""Plot topographic patterns of CSP components.
The CSP patterns explain how the measured data was generated
from the neural sources (a.k.a. the forward model).
Parameters
----------
info : instance of Info
Info dictionary of the epochs used to fit CSP.
If not possible, consider using ``create_info``.
components : float | array of floats | None.
The CSP patterns to plot. If None, n_components will be shown.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then channels are chosen in the order given above.
layout : None | Layout
Layout instance specifying sensor positions (does not need to be
specified for Neuromag data). If possible, the correct layout file
is inferred from the data; if no appropriate layout file was found
the layout is automatically generated from the sensor locations.
vmin : float | callable
The value specfying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data).
vmax : float | callable
The value specfying the upper bound of the color range.
If None, the maximum absolute value is used. If vmin is None,
but vmax is not, defaults to np.min(data).
If callable, the output equals vmax(data).
cmap : matplotlib colormap
Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
'Reds'.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True,
a circle will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
scale : dict | float | None
Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
for grad and 1e15 for mag.
scale_time : float | None
Scale the time labels. Defaults to 1.
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
name_format : str
String format for topomap values. Defaults to "CSP%01d"
proj : bool | 'interactive'
If true SSP projections are applied before display.
If 'interactive', a check box for reversible selection
of SSP projection vectors will be show.
show : bool
Show figure if True.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function lambda x: x.replace('MEG ', ''). If `mask` is not None,
only significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
mask : ndarray of bool, shape (n_channels, n_times) | None
The channels to be marked as significant at a given time point.
Indicies set to `True` will be considered. Defaults to None.
mask_params : dict | None
Additional plotting parameters for plotting significant sensors.
Default (None) equals::
dict(marker='o', markerfacecolor='w', markeredgecolor='k',
linewidth=0, markersize=4)
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will
be drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos'
will serve as image mask, and the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside
the outline. Alternatively, a matplotlib patch object can be passed
for advanced masking options, either directly or as a function that
returns patches (required for multi-axis plots). If None, nothing
will be drawn. Defaults to 'head'.
contours : int | False | None
The number of contour lines to draw.
If 0, no contours will be drawn.
image_interp : str
The image interpolation to be used.
All matplotlib options are accepted.
average : float | None
The time window around a given time to be used for averaging
(seconds). For example, 0.01 would translate into window that
starts 5 ms before and ends 5 ms after a given time point.
Defaults to None, which means no averaging.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head
should be relative to the electrode locations.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
from .. import EvokedArray
if components is None:
components = np.arange(self.n_components)
# set sampling frequency to have 1 component per time point
info = cp.deepcopy(info)
info['sfreq'] = 1.
# create an evoked
patterns = EvokedArray(self.patterns_.T, info, tmin=0)
# the call plot_topomap
return patterns.plot_topomap(times=components, ch_type=ch_type,
layout=layout, vmin=vmin, vmax=vmax,
cmap=cmap, colorbar=colorbar, res=res,
cbar_fmt=cbar_fmt, sensors=sensors,
scale=1, scale_time=1, unit='a.u.',
time_format=name_format, size=size,
show_names=show_names,
mask_params=mask_params,
mask=mask, outlines=outlines,
contours=contours,
image_interp=image_interp, show=show,
head_pos=head_pos)
def plot_filters(self, info, components=None, ch_type=None, layout=None,
vmin=None, vmax=None, cmap='RdBu_r', sensors=True,
colorbar=True, scale=None, scale_time=1, unit=None,
res=64, size=1, cbar_fmt='%3.1f',
name_format='CSP%01d', proj=False, show=True,
show_names=False, title=None, mask=None,
mask_params=None, outlines='head', contours=6,
image_interp='bilinear', average=None, head_pos=None):
"""Plot topographic filters of CSP components.
The CSP filters are used to extract discriminant neural sources from
the measured data (a.k.a. the backward model).
Parameters
----------
info : instance of Info
Info dictionary of the epochs used to fit CSP.
If not possible, consider using ``create_info``.
components : float | array of floats | None.
The CSP patterns to plot. If None, n_components will be shown.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then channels are chosen in the order given above.
layout : None | Layout
Layout instance specifying sensor positions (does not need to be
specified for Neuromag data). If possible, the correct layout file
is inferred from the data; if no appropriate layout file was found
the layout is automatically generated from the sensor locations.
vmin : float | callable
The value specfying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data).
vmax : float | callable
The value specfying the upper bound of the color range.
If None, the maximum absolute value is used. If vmin is None,
but vmax is not, defaults to np.min(data).
If callable, the output equals vmax(data).
cmap : matplotlib colormap
Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
'Reds'.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True,
a circle will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
scale : dict | float | None
Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
for grad and 1e15 for mag.
scale_time : float | None
Scale the time labels. Defaults to 1.
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
name_format : str
String format for topomap values. Defaults to "CSP%01d"
proj : bool | 'interactive'
If true SSP projections are applied before display.
If 'interactive', a check box for reversible selection
of SSP projection vectors will be show.
show : bool
Show figure if True.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function lambda x: x.replace('MEG ', ''). If `mask` is not None,
only significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
mask : ndarray of bool, shape (n_channels, n_times) | None
The channels to be marked as significant at a given time point.
Indicies set to `True` will be considered. Defaults to None.
mask_params : dict | None
Additional plotting parameters for plotting significant sensors.
Default (None) equals::
dict(marker='o', markerfacecolor='w', markeredgecolor='k',
linewidth=0, markersize=4)
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will
be drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos'
will serve as image mask, and the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside
the outline. Alternatively, a matplotlib patch object can be passed
for advanced masking options, either directly or as a function that
returns patches (required for multi-axis plots). If None, nothing
will be drawn. Defaults to 'head'.
contours : int | False | None
The number of contour lines to draw.
If 0, no contours will be drawn.
image_interp : str
The image interpolation to be used.
All matplotlib options are accepted.
average : float | None
The time window around a given time to be used for averaging
(seconds). For example, 0.01 would translate into window that
starts 5 ms before and ends 5 ms after a given time point.
Defaults to None, which means no averaging.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head
should be relative to the electrode locations.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
from .. import EvokedArray
if components is None:
components = np.arange(self.n_components)
# set sampling frequency to have 1 component per time point
info = cp.deepcopy(info)
info['sfreq'] = 1.
# create an evoked
filters = EvokedArray(self.filters_, info, tmin=0)
# the call plot_topomap
return filters.plot_topomap(times=components, ch_type=ch_type,
layout=layout, vmin=vmin, vmax=vmax,
cmap=cmap, colorbar=colorbar, res=res,
cbar_fmt=cbar_fmt, sensors=sensors,
scale=1, scale_time=1, unit='a.u.',
time_format=name_format, size=size,
show_names=show_names,
mask_params=mask_params,
mask=mask, outlines=outlines,
contours=contours,
image_interp=image_interp, show=show,
head_pos=head_pos)
|
bsd-3-clause
|
iancze/Pysplotter
|
test_label.py
|
1
|
6392
|
import matplotlib
matplotlib.use("Qt4Agg")
from matplotlib.pyplot import figure, show
from matplotlib.patches import Ellipse
import numpy as np
if 1:
fig = figure(1,figsize=(8,5))
ax = fig.add_subplot(111, autoscale_on=False, xlim=(-1,5), ylim=(-4,3))
t = np.arange(0.0, 5.0, 0.01)
s = np.cos(2*np.pi*t)
line, = ax.plot(t, s, lw=3, color='purple')
ax.annotate('arrowstyle', xy=(0, 1), xycoords='data',
xytext=(-50, 30), textcoords='offset points',
arrowprops=dict(arrowstyle="->")
)
ax.annotate('arc3', xy=(0.5, -1), xycoords='data',
xytext=(-30, -30), textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3,rad=.2")
)
ax.annotate('arc', xy=(1., 1), xycoords='data',
xytext=(-40, 30), textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc,angleA=0,armA=30,rad=10"),
)
ax.annotate('arc', xy=(1.5, -1), xycoords='data',
xytext=(-40, -30), textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc,angleA=0,armA=20,angleB=-90,armB=15,rad=7"),
)
ax.annotate('angle1', xy=(2., 1), xycoords='data',
xytext=(-50, 30), textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="angle,angleA=0,angleB=90,rad=10"),
)
ax.annotate('angle2(3)', xy=(2.5, -1), xycoords='data',
xytext=(-50, -30), textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="angle3,angleA=0,angleB=-90"),
)
ax.annotate('angle3', xy=(3., 1), xycoords='data',
xytext=(-50, 30), textcoords='offset points',
bbox=dict(boxstyle="round,rounding_size=0.2", fc="white"),
arrowprops=dict(arrowstyle="->",
connectionstyle="angle,angleA=0,angleB=90,rad=10"),
)
ax.annotate('angle4', xy=(3.5, -1), xycoords='data',
xytext=(-70, -60), textcoords='offset points',
size=20,
bbox=dict(boxstyle="round4,pad=.5", fc="0.8"),
arrowprops=dict(arrowstyle="->",
connectionstyle="angle,angleA=0,angleB=-90,rad=10"),
)
ax.annotate('angle5', xy=(4., 1), xycoords='data',
xytext=(-50, 30), textcoords='offset points',
bbox=dict(boxstyle="round", fc="0.8"),
arrowprops=dict(arrowstyle="->",
shrinkA=0, shrinkB=10,
connectionstyle="angle,angleA=0,angleB=90,rad=10"),
)
ann = ax.annotate('', xy=(4., 1.), xycoords='data',
xytext=(4.5, -1), textcoords='data',
arrowprops=dict(arrowstyle="<->",
connectionstyle="bar",
ec="k",
shrinkA=5, shrinkB=5,
)
)
def plot_more():
fig = figure(2)
fig.clf()
ax = fig.add_subplot(111, autoscale_on=False, xlim=(-1,5), ylim=(-5,3))
el = Ellipse((2, -1), 0.5, 0.5)
ax.add_patch(el)
ax.annotate('$->$', xy=(2., -1), xycoords='data',
xytext=(-150, -140), textcoords='offset points',
bbox=dict(boxstyle="round", fc="0.8"),
arrowprops=dict(arrowstyle="->",
patchB=el,
connectionstyle="angle,angleA=90,angleB=0,rad=10"),
)
ax.annotate('fancy', xy=(2., -1), xycoords='data',
xytext=(-100, 60), textcoords='offset points',
size=20,
#bbox=dict(boxstyle="round", fc="0.8"),
arrowprops=dict(arrowstyle="fancy",
fc="0.6", ec="none",
patchB=el,
connectionstyle="angle3,angleA=0,angleB=-90"),
)
ax.annotate('simple', xy=(2., -1), xycoords='data',
xytext=(100, 60), textcoords='offset points',
size=20,
#bbox=dict(boxstyle="round", fc="0.8"),
arrowprops=dict(arrowstyle="simple",
fc="0.6", ec="none",
patchB=el,
connectionstyle="arc3,rad=0.3"),
)
ax.annotate('wedge1', xy=(2., -1), xycoords='data',
xytext=(-100, -100), textcoords='offset points',
size=20,
#bbox=dict(boxstyle="round", fc="0.8"),
arrowprops=dict(arrowstyle="wedge,tail_width=0.7",
fc="0.6", ec="none",
patchB=el,
connectionstyle="arc3,rad=-0.3"),
)
ann = ax.annotate('wedge2', xy=(2., -1), xycoords='data',
xytext=(0, -45), textcoords='offset points',
size=20,
bbox=dict(boxstyle="round", fc=(1.0, 0.7, 0.7), ec=(1., .5, .5)),
arrowprops=dict(arrowstyle="wedge,tail_width=1.",
fc=(1.0, 0.7, 0.7), ec=(1., .5, .5),
patchA=None,
patchB=el,
relpos=(0.2, 0.8),
connectionstyle="arc3,rad=-0.1"),
)
ann = ax.annotate('wedge3', xy=(2., -1), xycoords='data',
xytext=(35, 0), textcoords='offset points',
size=20, va="center",
bbox=dict(boxstyle="round", fc=(1.0, 0.7, 0.7), ec="none"),
arrowprops=dict(arrowstyle="wedge,tail_width=1.",
fc=(1.0, 0.7, 0.7), ec="none",
patchA=None,
patchB=el,
relpos=(0.2, 0.5),
)
)
show()
|
mit
|
jseabold/statsmodels
|
examples/python/quantile_regression.py
|
5
|
4049
|
# coding: utf-8
# DO NOT EDIT
# Autogenerated from the notebook quantile_regression.ipynb.
# Edit the notebook and then sync the output with this file.
#
# flake8: noqa
# DO NOT EDIT
# # Quantile regression
#
# This example page shows how to use ``statsmodels``' ``QuantReg`` class
# to replicate parts of the analysis published in
#
# * Koenker, Roger and Kevin F. Hallock. "Quantile Regressioin". Journal
# of Economic Perspectives, Volume 15, Number 4, Fall 2001, Pages 143–156
#
# We are interested in the relationship between income and expenditures on
# food for a sample of working class Belgian households in 1857 (the Engel
# data).
#
# ## Setup
#
# We first need to load some modules and to retrieve the data.
# Conveniently, the Engel dataset is shipped with ``statsmodels``.
import numpy as np
import pandas as pd
import statsmodels.api as sm
import statsmodels.formula.api as smf
import matplotlib.pyplot as plt
data = sm.datasets.engel.load_pandas().data
data.head()
# ## Least Absolute Deviation
#
# The LAD model is a special case of quantile regression where q=0.5
mod = smf.quantreg('foodexp ~ income', data)
res = mod.fit(q=.5)
print(res.summary())
# ## Visualizing the results
#
# We estimate the quantile regression model for many quantiles between .05
# and .95, and compare best fit line from each of these models to Ordinary
# Least Squares results.
# ### Prepare data for plotting
#
# For convenience, we place the quantile regression results in a Pandas
# DataFrame, and the OLS results in a dictionary.
quantiles = np.arange(.05, .96, .1)
def fit_model(q):
res = mod.fit(q=q)
return [q, res.params['Intercept'], res.params['income']
] + res.conf_int().loc['income'].tolist()
models = [fit_model(x) for x in quantiles]
models = pd.DataFrame(models, columns=['q', 'a', 'b', 'lb', 'ub'])
ols = smf.ols('foodexp ~ income', data).fit()
ols_ci = ols.conf_int().loc['income'].tolist()
ols = dict(
a=ols.params['Intercept'],
b=ols.params['income'],
lb=ols_ci[0],
ub=ols_ci[1])
print(models)
print(ols)
# ### First plot
#
# This plot compares best fit lines for 10 quantile regression models to
# the least squares fit. As Koenker and Hallock (2001) point out, we see
# that:
#
# 1. Food expenditure increases with income
# 2. The *dispersion* of food expenditure increases with income
# 3. The least squares estimates fit low income observations quite poorly
# (i.e. the OLS line passes over most low income households)
x = np.arange(data.income.min(), data.income.max(), 50)
get_y = lambda a, b: a + b * x
fig, ax = plt.subplots(figsize=(8, 6))
for i in range(models.shape[0]):
y = get_y(models.a[i], models.b[i])
ax.plot(x, y, linestyle='dotted', color='grey')
y = get_y(ols['a'], ols['b'])
ax.plot(x, y, color='red', label='OLS')
ax.scatter(data.income, data.foodexp, alpha=.2)
ax.set_xlim((240, 3000))
ax.set_ylim((240, 2000))
legend = ax.legend()
ax.set_xlabel('Income', fontsize=16)
ax.set_ylabel(
'Food expenditure', fontsize=16)
# ### Second plot
#
# The dotted black lines form 95% point-wise confidence band around 10
# quantile regression estimates (solid black line). The red lines represent
# OLS regression results along with their 95% confidence interval.
#
# In most cases, the quantile regression point estimates lie outside the
# OLS confidence interval, which suggests that the effect of income on food
# expenditure may not be constant across the distribution.
n = models.shape[0]
p1 = plt.plot(models.q, models.b, color='black', label='Quantile Reg.')
p2 = plt.plot(models.q, models.ub, linestyle='dotted', color='black')
p3 = plt.plot(models.q, models.lb, linestyle='dotted', color='black')
p4 = plt.plot(models.q, [ols['b']] * n, color='red', label='OLS')
p5 = plt.plot(models.q, [ols['lb']] * n, linestyle='dotted', color='red')
p6 = plt.plot(models.q, [ols['ub']] * n, linestyle='dotted', color='red')
plt.ylabel(r'$\beta_{income}$')
plt.xlabel('Quantiles of the conditional food expenditure distribution')
plt.legend()
plt.show()
|
bsd-3-clause
|
LudwigKnuepfer/otm
|
otm.py
|
2
|
9568
|
#!/usr/bin/env python2
description = 'otm - display static memory of an elf file in a treemap'
"""
Copyright (C) 2014 Ludwig Ortmann <ludwig@spline.de>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
from os import path
import subprocess
import random
import re
import argparse
from pprint import pprint
import pylab
from matplotlib.patches import Rectangle
class Treemap:
def __init__(self, tree):
self.ax = pylab.subplot(111,aspect='equal')
pylab.subplots_adjust(left=0, right=1, top=1, bottom=0)
self.ax.set_xticks([])
self.ax.set_yticks([])
self.iterate(tree)
def iterate(self, node, lower=[0,0], upper=[1,1], axis=0):
axis = axis % 2
self.draw_rectangle(lower, upper, node)
width = upper[axis] - lower[axis]
ns = node.get_size()
for child in node.children:
cs = child.get_size()
upper[axis] = (lower[axis] + ((width * float(cs)) / ns))
lo = list(lower)
up = list(upper)
self.iterate(child, lo, up, axis + 1)
lower[axis] = upper[axis]
def draw_rectangle(self, lower, upper, node):
r = Rectangle( lower, upper[0]-lower[0], upper[1] - lower[1],
edgecolor='k',
facecolor= node.get_color(),
label=node.name)
self.ax.add_patch(r)
rx, ry = r.get_xy()
rw = r.get_width()
rh = r.get_height()
cx = rx + rw/2.0
cy = ry + rh/2.0
if isinstance(node, PathNode):
t = node.name
if rw * 3 < rh:
t += ", "
else:
t += "\n"
t += str(node.size) + ", " + node.stype
c='w'
if rw < rh:
o = "vertical"
else:
o = "horizontal"
else:
t = node.name
if node.isfile:
c='k'
o = 45
else:
return
self.ax.annotate(
t,
(cx,cy),
color=c,
weight='bold', ha='center', va='center',
rotation=o
)
class PathTree():
def __init__(self, name, path_dict):
self.children = list()
self.name = name
self.size = None
self.isfile = False
print name
subdirectories = list()
for p in path_dict:
if p == '':
#print "content", p
self.add_children(path_dict[p])
self.isfile = True
else:
#print "entry", p
subdirectories.append(p)
cdict = dict()
for pathname in subdirectories:
parts = pathname.split("/", 1)
if len(parts) == 1:
x = parts[0]
rest = ""
else:
x,rest = parts
if not x in cdict:
cdict[x] = dict()
cdict[x][rest] = path_dict[pathname]
#print "adding", pathname, "to", x
for k in cdict:
#pprint(v, indent=2)
self.children.append(PathTree(k, cdict[k]))
#print "size:", self.get_size()
def __repr__(self):
return self.name
def add_children(self, sym_list):
for symbol in sym_list:
self.children.append(PathNode(*symbol))
def get_size(self):
if self.size is None:
self.size = 0
for c in self.children:
self.size += c.get_size()
return self.size
def get_color(self):
return (random.random(),random.random(),random.random())
class PathNode(PathTree):
def __init__(self, name, line, size, stype):
self.children = []
print "\t", name, stype
self.name = name
self.size = size
self.line = line
self.isfile = False
self.stype = stype
def parse_elf(filename, minimum_size=None, symbol_type_list=None,
function_path_regex_in=None, function_name_regex_in=None,
object_path_regex_in=None, object_name_regex_in=None,
function_path_regex_ex=None, function_name_regex_ex=None,
object_path_regex_ex=None, object_name_regex_ex=None,
):
"""parse elf file into a {path: [(symbol, linenumber, size)]} dictionary"""
output = subprocess.check_output([
"nm",
"--radix=d",
"-S",
"-l",
"--size-sort",
filename])
"addr size type name [path:line]"
addressses = [x.split() for x in output.splitlines()]
paths = dict()
for foo in addressses:
size = foo[1]
stype = foo[2]
symbolname = foo[3]
if len(foo) > 4:
pathname,lineno = foo[4].split(":")
else:
pathname,lineno = '??','?'
size = int(size)
if minimum_size and size < minimum_size:
continue
pathname = path.normpath(pathname)
if pathname[0] == '/':
pathname = pathname[1:]
if stype in "tT":
ppati = function_path_regex_in
npati = function_name_regex_in
ppate = function_path_regex_ex
npate = function_name_regex_ex
elif stype in 'bB':
ppati = object_path_regex_in
npati = object_name_regex_in
ppate = object_path_regex_ex
npate = object_name_regex_ex
else:
ppat = None
npat = None
ppati = None
ppate = None
npati = None
npate = None
if ppati and not re.search(ppati, pathname):
continue
if npati and not re.search(npati, symbolname):
continue
if ppate and re.search(ppate, pathname):
continue
if npate and re.search(npate, symbolname):
continue
if symbol_type_list and stype not in symbol_type_list:
continue
if not pathname in paths:
paths[pathname] = list()
paths[pathname].append((symbolname, lineno, size, stype))
return paths
def arg_parser():
p = argparse.ArgumentParser(description=description)
p.add_argument("filename", default="a.out", nargs='?',
help="the elf file to parse")
p.add_argument("-d","--documentation",
action="store_true", default=argparse.SUPPRESS,
help="print additional documentation and exit")
p.add_argument("-fp", "--function-path-regex-in", default=None,
help="regular expression for function path inclusion")
p.add_argument("-op","--object-path-regex-in", default=None,
help="regular expression for object path inclusion")
p.add_argument("-fn", "--function-name-regex-in", default=None,
help="regular expression for function name inclusion")
p.add_argument("-on","--object-name-regex-in", default=None,
help="regular expression for object name inclusion")
p.add_argument("-Fp", "--function-path-regex-ex", default=None,
help="regular expression for function path exclusion")
p.add_argument("-Op","--object-path-regex-ex", default=None,
help="regular expression for object path exclusion")
p.add_argument("-Fn", "--function-name-regex-ex", default=None,
help="regular expression for function name exclusion")
p.add_argument("-On","--object-name-regex-ex", default=None,
help="regular expression for object name exclusion")
p.add_argument("-t","--symbol-type-list", default=None,
help="list of symbol types to include")
p.add_argument("-m","--minimum-size", type=int, default=1,
help="mininum size for all types")
return p
def exit_doc():
print """
Regular expression examples:
display only functions that come from net or core:
--function-path-regex-in "net|core"
display only objects that nm could not look up
--obj-path-regex "\?\?"
do not display objects that end on _stack
--object-name-regex-ex "_stack$"
When combining these options, exclusion takes precedence over
inclusion:
display only objects from main.c filtering out stacks:
-op "main\.c" -On "_stack$|_stk$"
Symbol type list:
include text and BSS section symbols check the nm manpage for
details:
--symbol-type-list tTbB
Minumum size:
The minimum-size argument is taken as an inclusion hurdle, i.e.
symbols below that size are not taken into consideration at all.
"""
sys.exit()
if __name__ == '__main__':
args = arg_parser().parse_args()
if hasattr(args,"documentation"):
exit_doc()
if not path.isfile(args.filename):
sys.exit("file does not exist: " + args.filename)
elf = parse_elf(**vars(args))
tree = PathTree("root", elf)
Treemap(tree)
pylab.show()
|
gpl-3.0
|
b1quint/samfp
|
samfp/mkcube.py
|
1
|
7108
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
"""
SAMI Make Cube
This file gets several FITS images and put them together inside a single
FITS file with three dimensions (data-cube).
Todo
----
- Treat error case multiple extensions.
"""
import astropy.io.fits as pyfits
import argparse
import itertools
import numpy as np
import pandas as pd
from . import io
from .tools import version
logger = io.get_logger("MakeCube")
__author__ = 'Bruno Quint'
def main():
# Parsing Arguments -------------------------------------------------------
parser = argparse.ArgumentParser(
description="Build a data-cube from image files.")
parser.add_argument('-a', '--algorithm', metavar='algorithm', type=str,
default='average',
help="Algorithm used when combining images per "
"frame (average | median | sum)")
parser.add_argument('-b', '--binning', type=int, nargs=2, default=(1, 1),
help='New binning to be applied to the data-cube')
parser.add_argument('-d', '--debug', action='store_true',
help="Run debug mode.")
parser.add_argument('-o', '--output', metavar='output', type=str,
default="cube.fits", help="Name of the output cube.")
parser.add_argument('-q', '--quiet', action='store_true',
help="Run quietly.")
parser.add_argument('files', metavar='files', type=str, nargs='+',
help="input filenames.")
parsed_args = parser.parse_args()
if parsed_args.quiet:
logger.setLevel('NOTSET')
elif parsed_args.debug:
logger.setLevel('DEBUG')
else:
logger.setLevel('INFO')
logger.info("")
logger.info("SAM-FP Tools: mkcube")
logger.info("by Bruno Quint (bquint@ctio.noao.edu)")
logger.info("version {:s}".format(version.__str__))
logger.info("Starting program.")
logger.info("")
make_cube(parsed_args.files,
output=parsed_args.output,
combine_algorithm=parsed_args.algorithm,
binning=parsed_args.binning)
def make_cube(list_of_files, z_key='FAPEROTZ', combine_algorithm='average',
output='cube.fits', binning=(1, 1)):
"""
Stack FITS images within a single FITS data-cube.
Parameters
----------
list_of_files : list
A list of strings containing the path to the input fits files.
z_key : str
The wildcard name responsible to store the FP gap size in *bcv*
units.
combine_algorithm : string
The algorithm used to combine several images into a single frame
(average|median|sum)
output : str
Name of the output data-cube.
binning : list or tuple
Binning to be applied to the data-cube when mounting it.
"""
assert isinstance(list_of_files, list)
list_of_files.sort()
logger.debug('Create table')
df = pd.DataFrame(columns=['filename', 'nrows', 'ncols', 'z'])
logger.debug('Filling the table')
for f in list_of_files:
logger.debug('Read %s file' % f)
hdr = pyfits.getheader(f)
ds = pd.Series({
'filename': f,
'nrows': int(hdr['naxis1']),
'ncols': int(hdr['naxis2']),
'z': int(hdr[z_key].strip())
})
df = df.append(ds, ignore_index=True)
logger.debug('%d files with different number of rows' % len(
df['nrows'].unique()))
logger.debug('%d files with different number of columns' % len(
df['ncols'].unique()))
logger.debug('%d files with different Z' % len(df['z'].unique()))
if len(df['nrows'].unique()) is not 1:
raise (
IOError, 'Height mismatch for %d files' % len(df['nrows'].unique()))
if len(df['ncols'].unique()) is not 1:
raise (
IOError, 'Width mismatch for %d files' % len(df['ncols'].unique()))
nrows = int(df['nrows'].unique() // binning[0])
ncols = int(df['ncols'].unique() // binning[1])
nchan = len(df['z'].unique())
nrows = int(nrows)
ncols = int(ncols)
nchan = int(nchan)
logger.info('Creating data-cube with shape')
logger.info('[%d, %d, %d]' % (nrows, ncols, nchan))
cube = np.zeros((nchan, ncols, nrows))
z_array = df['z'].unique()
z_array = np.array(z_array, dtype=np.float64)
z_array.sort()
z_array = z_array[::-1] # Reverse array so lambda increases inside the cube
combine_algorithm = combine_algorithm.lower()
if combine_algorithm in ['mean', 'average']:
combine = np.mean
elif combine_algorithm in ['median']:
combine = np.median
elif combine_algorithm in ['sum']:
combine = np.sum
else:
raise ValueError('"combine_algorith" kwarg must be average/median/sum')
logger.info('Filling data-cube')
x, y = range(binning[0]), range(binning[1])
# Build data-cube
for i in range(z_array.size):
logger.debug('Processing channel %03d - z = %.2f' % (i + 1, z_array[i]))
files = df[df['z'] == z_array[i]]['filename'].tolist()
temp_cube = np.zeros((len(files), ncols, nrows))
# Build temporary data-cube for each frame before combine it
for j in range(len(files)):
temp_image = pyfits.getdata(files[j])
# Binning images ---
for (m, n) in itertools.product(x, y):
temp_cube[j] += temp_image[n::binning[1], m::binning[0]]
cube[i] = combine(temp_cube, axis=0)
logger.info('Find Z solution')
z = np.arange(z_array.size) + 1
z = np.array(z, dtype=np.float64)
p = np.polyfit(z, z_array, deg=1)
delta_z = p[0]
z_zero = np.polyval(p, 1)
hdr.set('CRPIX3', 1, 'Reference channel')
hdr.set('CRVAL3', z_zero, 'Reference channel value')
hdr.set('CUNIT3', 'bcv', 'Units in Z')
hdr.set('CDELT3', delta_z, 'Average increment in Z')
hdr.set('CR3_3', delta_z, 'Average increment in Z')
hdr.set('C3_3', delta_z, 'Average increment in Z')
# Saving filenames in the header ---
hdr.add_history('Cube mounted using `mkcube`')
for i in range(z_array.size):
files = df[df['z'] == z_array[i]]['filename'].tolist()
for j in range(len(files)):
hdr.append(('CHAN_%03d' % (i + 1), files[j],
'z = %+04d' % z_array[i]))
hdr.add_blank('', after='CHAN_%03d' % (i + 1))
hdr.add_blank('', before='CHAN_001')
hdr.add_blank('--- Channels and Files ---', before='CHAN_001')
output = io.safe_save(output, verbose=True)
logger.info('Writing file to {:s}'.format(output))
pyfits.writeto(output, cube, hdr, overwrite=True)
logger.debug(
pd.DataFrame(
data={
'x': z,
'y': z_array,
'fit_y': np.polyval(p, z),
'round_fit': np.round(np.polyval(p, z))
}
)
)
logger.debug(p)
return
if __name__ == '__main__':
main()
|
bsd-3-clause
|
pypot/scikit-learn
|
examples/linear_model/plot_ridge_path.py
|
254
|
1655
|
"""
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations.
"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
###############################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
clf = linear_model.Ridge(fit_intercept=False)
coefs = []
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
###############################################################################
# Display results
ax = plt.gca()
ax.set_color_cycle(['b', 'r', 'g', 'c', 'k', 'y', 'm'])
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
quheng/scikit-learn
|
sklearn/utils/testing.py
|
71
|
26178
|
"""Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# License: BSD 3 clause
import os
import inspect
import pkgutil
import warnings
import sys
import re
import platform
import scipy as sp
import scipy.io
from functools import wraps
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import tempfile
import shutil
import os.path as op
import atexit
# WindowsError only exist on Windows
try:
WindowsError
except NameError:
WindowsError = None
import sklearn
from sklearn.base import BaseEstimator
from sklearn.externals import joblib
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
try:
from nose.tools import assert_raises_regex
except ImportError:
# for Python 2
def assert_raises_regex(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except expected_exception as e:
error_message = str(e)
if not re.compile(expected_regexp).search(error_message):
raise AssertionError("Error message should match pattern "
"%r. %r does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("%s not raised by %s" %
(expected_exception.__name__,
callable_obj.__name__))
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the bacward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
def assert_less_equal(a, b, msg=None):
message = "%r is not lower than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a <= b, message
def assert_greater_equal(a, b, msg=None):
message = "%r is not greater than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a >= b, message
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [issubclass(warning.category, warning_class) for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>, got: '%s'"
% (message, func.__name__, msg))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _ignore_warnings(obj)
else:
return _IgnoreWarnings()
def _ignore_warnings(fn):
"""Decorator to catch and hide warnings without visual nesting"""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
return fn(*args, **kwargs)
w[:] = []
return wrapper
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager
Copied from Python 2.7.5 and modified as required.
"""
def __init__(self):
"""
Parameters
==========
category : warning class
The category to filter. Defaults to Warning. If None,
all categories will be muted.
"""
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter('always')
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
self.log = []
def showwarning(*args, **kwargs):
self.log.append(warnings.WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return self.log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exceptions, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions
Parameters
----------
exceptions : exception or tuple of exception
Name of the estimator
func : callable
Calable object to raise error
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
"""
try:
function(*args, **kwargs)
except exceptions as e:
error_message = str(e)
if message not in error_message:
raise AssertionError("Error message does not include the expected"
" string: %r. Observed error message: %r" %
(message, error_message))
else:
# concatenate exception names
if isinstance(exceptions, tuple):
names = " or ".join(e.__name__ for e in exceptions)
else:
names = exceptions.__name__
raise AssertionError("%s not raised by %s" %
(names, function.__name__))
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict : dict, keys=str, values=ndarray
Contains data as columns_dict[column_name] = array of data.
dataname : string
Name of data set.
matfile : string or file object
The file name string or the file-like object of the output file.
ordering : list, default None
List of column_names, determines the ordering in the data set.
Notes
-----
This function transposes all arrays, while fetch_mldata only transposes
'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier",
"OutputCodeClassifier", "OneVsRestClassifier", "RFE",
"RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV",
"RandomizedSearchCV"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder',
'MultiLabelBinarizer', 'TfidfTransformer',
'TfidfVectorizer', 'IsotonicRegression',
'OneHotEncoder', 'RandomTreesEmbedding',
'FeatureHasher', 'DummyClassifier', 'DummyRegressor',
'TruncatedSVD', 'PolynomialFeatures',
'GaussianRandomProjectionHash', 'HashingVectorizer',
'CheckingClassifier', 'PatchExtractor', 'CountVectorizer',
# GradientBoosting base estimators, maybe should
# exclude them in another way
'ZeroEstimator', 'ScaledLogOddsEstimator',
'QuantileEstimator', 'MeanEstimator',
'LogOddsEstimator', 'PriorProbabilityEstimator',
'_SigmoidCalibration', 'VotingClassifier']
def all_estimators(include_meta_estimators=False,
include_other=False, type_filter=None,
include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if ".tests." in modname:
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator)
and c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {'classifier': ClassifierMixin,
'regressor': RegressorMixin,
'transformer': TransformerMixin,
'cluster': ClusterMixin}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators
if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or None, got"
" %s." % repr(type_filter))
# drop duplicates, sort for reproducibility
return sorted(set(estimators))
def set_random_state(estimator, random_state=0):
if "random_state" in estimator.get_params().keys():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
import matplotlib.pyplot as plt
plt.figure()
except ImportError:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def if_not_mac_os(versions=('10.7', '10.8', '10.9'),
message='Multi-process bug in Mac OS X >= 10.7 '
'(see issue #636)'):
"""Test decorator that skips test if OS is Mac OS X and its
major version is one of ``versions``.
"""
warnings.warn("if_not_mac_os is deprecated in 0.17 and will be removed"
" in 0.19: use the safer and more generic"
" if_safe_multiprocessing_with_blas instead",
DeprecationWarning)
mac_version, _, _ = platform.mac_ver()
skip = '.'.join(mac_version.split('.')[:2]) in versions
def decorator(func):
if skip:
@wraps(func)
def func(*args, **kwargs):
raise SkipTest(message)
return func
return decorator
def if_safe_multiprocessing_with_blas(func):
"""Decorator for tests involving both BLAS calls and multiprocessing
Under Python < 3.4 and POSIX (e.g. Linux or OSX), using multiprocessing in
conjunction with some implementation of BLAS (or other libraries that
manage an internal posix thread pool) can cause a crash or a freeze of the
Python process.
Under Python 3.4 and later, joblib uses the forkserver mode of
multiprocessing which does not trigger this problem.
In practice all known packaged distributions (from Linux distros or
Anaconda) of BLAS under Linux seems to be safe. So we this problem seems to
only impact OSX users.
This wrapper makes it possible to skip tests that can possibly cause
this crash under OSX with.
"""
@wraps(func)
def run_test(*args, **kwargs):
if sys.platform == 'darwin' and sys.version_info[:2] < (3, 4):
raise SkipTest(
"Possible multi-process bug with some BLAS under Python < 3.4")
return func(*args, **kwargs)
return run_test
def clean_warning_registry():
"""Safe way to reset warnings """
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
def _delete_folder(folder_path, warn=False):
"""Utility function to cleanup a temporary folder if still existing.
Copy from joblib.pool (for independance)"""
try:
if os.path.exists(folder_path):
# This can fail under windows,
# but will succeed when called by atexit
shutil.rmtree(folder_path)
except WindowsError:
if warn:
warnings.warn("Could not delete temporary folder %s" % folder_path)
class TempMemmap(object):
def __init__(self, data, mmap_mode='r'):
self.temp_folder = tempfile.mkdtemp(prefix='sklearn_testing_')
self.mmap_mode = mmap_mode
self.data = data
def __enter__(self):
fpath = op.join(self.temp_folder, 'data.pkl')
joblib.dump(self.data, fpath)
data_read_only = joblib.load(fpath, mmap_mode=self.mmap_mode)
atexit.register(lambda: _delete_folder(self.temp_folder, warn=True))
return data_read_only
def __exit__(self, exc_type, exc_val, exc_tb):
_delete_folder(self.temp_folder)
with_network = with_setup(check_skip_network)
with_travis = with_setup(check_skip_travis)
|
bsd-3-clause
|
orionzhou/robin
|
apps/venn3.py
|
1
|
2679
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Readlist utilities
"""
import os.path as op
import sys
import re
import logging
import pandas as pd
from matplotlib import pyplot as plt
import numpy as np
from matplotlib_venn import venn3, venn3_circles
def venn3_coord(args):
fhi = open(args.fi, 'r')
s1 = fhi.readline().strip().split(",")
s2 = fhi.readline().strip().split(",")
s3 = fhi.readline().strip().split(",")
fhi.close()
s1, s2, s3 = set(s1), set(s2), set(s3)
v = venn3([s1, s2, s3], ('A','B','C'))
fho1 = open(args.fo1, 'w')
for xy, l in zip(v.centers, v.radii):
x, y = xy
fho1.write("%s\t%s\t%s\n" % (x, y, l))
fho1.close()
fho2 = open(args.fo2, 'w')
for xyl in v.subset_labels:
x, y = xyl.get_position()
l = xyl.get_text()
fho2.write("%s\t%s\t%s\n" % (x, y, l))
fho2.close()
def add_stat(args):
cvt = {k: int for k in 'Replicate'.split()}
sl = pd.read_csv(args.fi, sep="\t", header=0, converters=cvt)
firstN = 10000
sl['spots'] = [0] * len(sl.index)
sl['avgLength'] = [0] * len(sl.index)
for i in range(len(sl)):
sid = sl['SampleID'][i]
fq = ''
if sl['paired'][i]:
r1, r2 = sl['r1'][i], sl['r2'][i]
fq = r1
else:
fq = sl['r0'][i]
nrcd = 0
L = []
for rec in iter_fastq(fq):
if not rec:
break
nrcd += 1
if nrcd <= firstN:
L.append(len(rec))
avgLength = SummaryStats(L).mean
if sl['paired'][i]:
avgLength = avgLength * 2
print("\t".join(str(x) for x in (sid, nrcd, avgLength)))
sl.at[i, 'spots'] = nrcd
sl.at[i, 'avgLength'] = avgLength
sl.to_csv(args.fo, sep="\t", header=True, index=False)
def main():
import argparse
ps = argparse.ArgumentParser(
formatter_class = argparse.ArgumentDefaultsHelpFormatter,
description = '3-way venn-diagram'
)
sp = ps.add_subparsers(title = 'available commands', dest = 'command')
sp1 = sp.add_parser('coord', help='compute venn3 coordinates',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
sp1.add_argument('fi', help = 'input file containing sets')
sp1.add_argument('fo1', help = 'output circle coordinates')
sp1.add_argument('fo2', help = 'output label coordinates')
sp1.set_defaults(func = venn3_coord)
args = ps.parse_args()
if args.command:
args.func(args)
else:
print('Error: need to specify a sub command\n')
parser.print_help()
if __name__ == '__main__':
main()
|
gpl-2.0
|
jemromerol/apasvo
|
apasvo/gui/views/FilterDesing.py
|
1
|
12915
|
# encoding: utf-8
'''
@author: Jose Emilio Romero Lopez
@copyright: Copyright 2013-2014, Jose Emilio Romero Lopez.
@license: GPL
@contact: jemromerol@gmail.com
This file is part of APASVO.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from PySide import QtCore
from PySide import QtGui
import matplotlib
matplotlib.rcParams['backend'] = 'qt4agg'
matplotlib.rcParams['backend.qt4'] = 'PySide'
matplotlib.rcParams['patch.antialiased'] = False
matplotlib.rcParams['agg.path.chunksize'] = 80000
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from apasvo.gui.views import navigationtoolbar
from apasvo.gui.views import processingdialog
from apasvo.utils import clt
import matplotlib.pyplot as plt
from scipy import signal
from scipy.signal import butter, lfilter, freqz
import numpy as np
import traceback
from apasvo.picking import apasvotrace as rc
from apasvo.picking import takanami
from apasvo._version import _application_name
from apasvo._version import _organization
MINIMUM_MARGIN_IN_SECS = 0.5
class FilterDesignTask(QtCore.QObject):
"""A class to handle a Takanami exec. task.
Attributes:
record: An opened seismic record.
start: Start point of the signal segment where
the algorithm is going to be applied.
end: End point of the signal segment where
the algorithm is going to be applied.
Signals:
finished: Task finishes.
position_estimated: Return values of Takanami method are ready.
"""
finished = QtCore.Signal()
error = QtCore.Signal(str, str)
position_estimated = QtCore.Signal(int, np.ndarray, int)
def __init__(self, record):
super(FilterDesignTask, self).__init__()
self.record = record
class FilterDesignDialog(QtGui.QDialog):
"""A dialog to apply Takanami's AR picking method to a selected piece of a
seismic signal.
Attributes:
document: Current opened document containing a seismic record.
seismic_event: A seismic event to be refined by using Takanami method.
If no event is provided, then a new seismic event will be created
by using the estimated arrival time after clicking on 'Accept'
"""
def __init__(self, stream, trace_list=None, parent=None):
super(FilterDesignDialog, self).__init__(parent)
# Calc max. frequency
traces = stream.traces if not trace_list else trace_list
self.max_freq = max([trace.fs for trace in traces])
self._init_ui()
self.load_settings()
# Initial draw
w, h_db, angles = self._retrieve_filter_plot_data()
self._module_data = self.module_axes.plot(w, h_db, 'b')[0]
self._phase_data = self.phase_axes.plot(w, angles, 'g')[0]
self.module_axes.set_ylim([-60,10])
self.phase_axes.set_ylim([min(angles), max(angles)])
self.canvas.draw_idle()
self.start_point_spinbox.valueChanged.connect(self.on_freq_min_changed)
self.end_point_spinbox.valueChanged.connect(self.on_freq_max_changed)
self.start_point_spinbox.valueChanged.connect(self._draw_filter_response)
self.end_point_spinbox.valueChanged.connect(self._draw_filter_response)
self.number_coefficient_spinbox.valueChanged.connect(self._draw_filter_response)
self.zeroPhaseCheckBox.toggled.connect(self._draw_filter_response)
self.button_box.accepted.connect(self.accept)
self.button_box.rejected.connect(self.reject)
self.button_box.clicked.connect(self.on_click)
def _init_ui(self):
self.setWindowTitle("Filter Design (Butterworth-Bandpass Filter)")
self.fig, _ = plt.subplots(1, 1, sharex=True)
# Set up filter axes
self.module_axes = self.fig.axes[0]
self.phase_axes = self.module_axes.twinx()
self.module_axes.set_title('Digital filter frequency response (Butterworth-Bandpass filter)')
self.module_axes.set_xlabel('Frequency [Hz]')
self.module_axes.set_ylabel('Amplitude [dB]', color='b')
self.module_axes.axis('tight')
self.module_axes.grid(which='both', axis='both')
self.phase_axes.set_ylabel('Angle (radians)', color='g')
self.canvas = FigureCanvas(self.fig)
self.canvas.setMinimumSize(self.canvas.size())
self.canvas.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Policy.Expanding,
QtGui.QSizePolicy.Policy.Expanding))
self.toolBarNavigation = navigationtoolbar.NavigationToolBar(self.canvas, self)
self.group_box = QtGui.QGroupBox(self)
self.group_box2 = QtGui.QGroupBox(self)
self.group_box3 = QtGui.QGroupBox(self)
self.group_box4 = QtGui.QGroupBox(self)
self.group_box.setTitle("")
self.group_box2.setTitle("")
self.group_box3.setTitle("Parameters")
self.start_point_label = QtGui.QLabel("Lower cutoff frequency (Hz): ")
self.start_point_label.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Policy.Maximum,
QtGui.QSizePolicy.Policy.Preferred))
self.start_point_spinbox = QtGui.QDoubleSpinBox(self.group_box)
self.start_point_spinbox.setMinimum(1.0)
self.start_point_spinbox.setSingleStep(1.00)
self.start_point_spinbox.setAccelerated(True)
self.start_point_spinbox.setMaximum(self.max_freq * 0.5)
self.end_point_label = QtGui.QLabel("Higher cutoff frequency (Hz):")
self.end_point_label.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Policy.Maximum,
QtGui.QSizePolicy.Policy.Preferred))
self.end_point_spinbox = QtGui.QDoubleSpinBox(self.group_box4)
self.end_point_spinbox.setMinimum(1.0)
self.end_point_spinbox.setSingleStep(1.00)
self.end_point_spinbox.setAccelerated(True)
self.end_point_spinbox.setMaximum(self.max_freq * 0.5)
self.end_point_spinbox.setValue(5.0)
#######################################################################
self.number_coefficient_label = QtGui.QLabel("Order: ")
self.number_coefficient_label2 = QtGui.QLabel("")
self.number_coefficient_label.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Policy.Maximum,
QtGui.QSizePolicy.Policy.Preferred))
self.number_coefficient_label2.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Policy.Maximum,
QtGui.QSizePolicy.Policy.Preferred))
self.number_coefficient_spinbox = QtGui.QSpinBox(self.group_box3)
self.number_coefficient_spinbox.adjustSize()
self.number_coefficient_spinbox.setMinimum(1)
self.number_coefficient_spinbox.setSingleStep(1)
self.number_coefficient_spinbox.setAccelerated(True)
self.zeroPhaseCheckBox = QtGui.QCheckBox("Zero phase filtering", self.group_box2)
self.zeroPhaseCheckBox.setChecked(True)
#######################################################################
self.group_box_layout = QtGui.QHBoxLayout(self.group_box)
self.group_box_layout.setContentsMargins(9, 9, 9, 9)
self.group_box_layout.setSpacing(12)
self.group_box_layout.addWidget(self.start_point_label)
self.group_box_layout.addWidget(self.start_point_spinbox)
self.group_box4_layout = QtGui.QHBoxLayout(self.group_box4)
self.group_box4_layout.setContentsMargins(9, 9, 9, 9)
self.group_box4_layout.setSpacing(12)
self.group_box4_layout.addWidget(self.end_point_label)
self.group_box4_layout.addWidget(self.end_point_spinbox)
#####################################################################
self.group_box2_layout = QtGui.QHBoxLayout(self.group_box2)
self.group_box2_layout.setContentsMargins(9, 9, 9, 9)
self.group_box2_layout.setSpacing(12)
self.group_box2_layout.addWidget(self.zeroPhaseCheckBox)
###################################################################
self.group_box3_layout = QtGui.QHBoxLayout(self.group_box3)
self.group_box3_layout.setContentsMargins(9, 9, 9, 9)
self.group_box3_layout.setSpacing(12)
self.group_box3_layout.addWidget(self.number_coefficient_label)
self.group_box3_layout.addWidget(self.number_coefficient_spinbox)
self.group_box3_layout.addWidget(self.number_coefficient_label2)
#####################################################################
self.button_box = QtGui.QDialogButtonBox(self)
self.button_box.setOrientation(QtCore.Qt.Horizontal)
self.button_box.setStandardButtons(QtGui.QDialogButtonBox.Apply |
QtGui.QDialogButtonBox.Cancel |
QtGui.QDialogButtonBox.Ok)
self.layout = QtGui.QVBoxLayout(self)
self.layout.setContentsMargins(9, 9, 9, 9)
self.layout.setSpacing(6)
self.layout.addWidget(self.toolBarNavigation)
self.layout.addWidget(self.canvas)
self.layout.addWidget(self.group_box3)
self.layout.addWidget(self.group_box)
self.layout.addWidget(self.group_box4)
#self.layout.addWidget(self.group_box2)
self.layout.addWidget(self.zeroPhaseCheckBox)
self.layout.addWidget(self.button_box)
def on_freq_min_changed(self, value):
self.end_point_spinbox.setMinimum(value + 1.0)
def on_freq_max_changed(self, value):
self.start_point_spinbox.setMaximum(value - 1.0)
def on_click(self, button):
if self.button_box.standardButton(button) == QtGui.QDialogButtonBox.Ok:
self.save_settings()
if self.button_box.standardButton(button) == QtGui.QDialogButtonBox.Apply:
self._draw_filter_response()
def save_settings(self):
"""Save settings to persistent storage."""
settings = QtCore.QSettings(_organization, _application_name)
settings.beginGroup("filterdesign_settings")
#self.default_margin = int(float(settings.value('filterdesign_margin', 5.0)) *
#self.record.fs)
settings.setValue('freq_min', self.start_point_spinbox.value())
settings.setValue('freq_max', self.end_point_spinbox.value())
settings.setValue('coef_number', self.number_coefficient_spinbox.value())
settings.setValue('zero_phase', self.zeroPhaseCheckBox.isChecked())
settings.endGroup()
def load_settings(self):
"""Loads settings from persistent storage."""
settings = QtCore.QSettings(_organization, _application_name)
settings.beginGroup("filterdesign_settings")
self.start_point_spinbox.setValue(float(settings.value('freq_min', 0.0)))
self.end_point_spinbox.setValue(float(settings.value('freq_max', self.max_freq * 0.5)))
self.number_coefficient_spinbox.setValue(int(settings.value('coef_number', 1)))
self.zeroPhaseCheckBox.setChecked(bool(settings.value('zero_phase', True)))
settings.endGroup()
def _butter_bandpass(self, lowcut, highcut, fs, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='band')
return b, a
def _retrieve_filter_plot_data(self):
b, a = self._butter_bandpass(self.start_point_spinbox.value(), self.end_point_spinbox.value(), self.max_freq, order=self.number_coefficient_spinbox.value())
#w, h = freqz(b, a)
w, h = freqz(b, a,1024)
angles = np.unwrap(np.angle(h))
#return (self.max_freq * 0.5 / np.pi) * w, 20 * np.log10(abs(h)), angles
f= (self.max_freq/2)*(w/np.pi)
return f, 20 * np.log10(abs(h)), angles
def _draw_filter_response(self, *args, **kwargs):
w, h_db, angles = self._retrieve_filter_plot_data()
self._module_data.set_xdata(w)
self._module_data.set_ydata(h_db)
self._phase_data.set_xdata(w)
self._phase_data.set_ydata(angles)
self.phase_axes.set_ylim([min(angles), max(angles)])
self.canvas.draw_idle()
|
gpl-3.0
|
h2educ/scikit-learn
|
examples/bicluster/bicluster_newsgroups.py
|
142
|
7183
|
"""
================================================================
Biclustering documents with the Spectral Co-clustering algorithm
================================================================
This example demonstrates the Spectral Co-clustering algorithm on the
twenty newsgroups dataset. The 'comp.os.ms-windows.misc' category is
excluded because it contains many posts containing nothing but data.
The TF-IDF vectorized posts form a word frequency matrix, which is
then biclustered using Dhillon's Spectral Co-Clustering algorithm. The
resulting document-word biclusters indicate subsets words used more
often in those subsets documents.
For a few of the best biclusters, its most common document categories
and its ten most important words get printed. The best biclusters are
determined by their normalized cut. The best words are determined by
comparing their sums inside and outside the bicluster.
For comparison, the documents are also clustered using
MiniBatchKMeans. The document clusters derived from the biclusters
achieve a better V-measure than clusters found by MiniBatchKMeans.
Output::
Vectorizing...
Coclustering...
Done in 9.53s. V-measure: 0.4455
MiniBatchKMeans...
Done in 12.00s. V-measure: 0.3309
Best biclusters:
----------------
bicluster 0 : 1951 documents, 4373 words
categories : 23% talk.politics.guns, 19% talk.politics.misc, 14% sci.med
words : gun, guns, geb, banks, firearms, drugs, gordon, clinton, cdt, amendment
bicluster 1 : 1165 documents, 3304 words
categories : 29% talk.politics.mideast, 26% soc.religion.christian, 25% alt.atheism
words : god, jesus, christians, atheists, kent, sin, morality, belief, resurrection, marriage
bicluster 2 : 2219 documents, 2830 words
categories : 18% comp.sys.mac.hardware, 16% comp.sys.ibm.pc.hardware, 16% comp.graphics
words : voltage, dsp, board, receiver, circuit, shipping, packages, stereo, compression, package
bicluster 3 : 1860 documents, 2745 words
categories : 26% rec.motorcycles, 23% rec.autos, 13% misc.forsale
words : bike, car, dod, engine, motorcycle, ride, honda, cars, bmw, bikes
bicluster 4 : 12 documents, 155 words
categories : 100% rec.sport.hockey
words : scorer, unassisted, reichel, semak, sweeney, kovalenko, ricci, audette, momesso, nedved
"""
from __future__ import print_function
print(__doc__)
from collections import defaultdict
import operator
import re
from time import time
import numpy as np
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster import MiniBatchKMeans
from sklearn.externals.six import iteritems
from sklearn.datasets.twenty_newsgroups import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.cluster import v_measure_score
def number_aware_tokenizer(doc):
""" Tokenizer that maps all numeric tokens to a placeholder.
For many applications, tokens that begin with a number are not directly
useful, but the fact that such a token exists can be relevant. By applying
this form of dimensionality reduction, some methods may perform better.
"""
token_pattern = re.compile(u'(?u)\\b\\w\\w+\\b')
tokens = token_pattern.findall(doc)
tokens = ["#NUMBER" if token[0] in "0123456789_" else token
for token in tokens]
return tokens
# exclude 'comp.os.ms-windows.misc'
categories = ['alt.atheism', 'comp.graphics',
'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware',
'comp.windows.x', 'misc.forsale', 'rec.autos',
'rec.motorcycles', 'rec.sport.baseball',
'rec.sport.hockey', 'sci.crypt', 'sci.electronics',
'sci.med', 'sci.space', 'soc.religion.christian',
'talk.politics.guns', 'talk.politics.mideast',
'talk.politics.misc', 'talk.religion.misc']
newsgroups = fetch_20newsgroups(categories=categories)
y_true = newsgroups.target
vectorizer = TfidfVectorizer(stop_words='english', min_df=5,
tokenizer=number_aware_tokenizer)
cocluster = SpectralCoclustering(n_clusters=len(categories),
svd_method='arpack', random_state=0)
kmeans = MiniBatchKMeans(n_clusters=len(categories), batch_size=20000,
random_state=0)
print("Vectorizing...")
X = vectorizer.fit_transform(newsgroups.data)
print("Coclustering...")
start_time = time()
cocluster.fit(X)
y_cocluster = cocluster.row_labels_
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_cocluster, y_true)))
print("MiniBatchKMeans...")
start_time = time()
y_kmeans = kmeans.fit_predict(X)
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_kmeans, y_true)))
feature_names = vectorizer.get_feature_names()
document_names = list(newsgroups.target_names[i] for i in newsgroups.target)
def bicluster_ncut(i):
rows, cols = cocluster.get_indices(i)
if not (np.any(rows) and np.any(cols)):
import sys
return sys.float_info.max
row_complement = np.nonzero(np.logical_not(cocluster.rows_[i]))[0]
col_complement = np.nonzero(np.logical_not(cocluster.columns_[i]))[0]
# Note: the following is identical to X[rows[:, np.newaxis], cols].sum() but
# much faster in scipy <= 0.16
weight = X[rows][:, cols].sum()
cut = (X[row_complement][:, cols].sum() +
X[rows][:, col_complement].sum())
return cut / weight
def most_common(d):
"""Items of a defaultdict(int) with the highest values.
Like Counter.most_common in Python >=2.7.
"""
return sorted(iteritems(d), key=operator.itemgetter(1), reverse=True)
bicluster_ncuts = list(bicluster_ncut(i)
for i in range(len(newsgroups.target_names)))
best_idx = np.argsort(bicluster_ncuts)[:5]
print()
print("Best biclusters:")
print("----------------")
for idx, cluster in enumerate(best_idx):
n_rows, n_cols = cocluster.get_shape(cluster)
cluster_docs, cluster_words = cocluster.get_indices(cluster)
if not len(cluster_docs) or not len(cluster_words):
continue
# categories
counter = defaultdict(int)
for i in cluster_docs:
counter[document_names[i]] += 1
cat_string = ", ".join("{:.0f}% {}".format(float(c) / n_rows * 100, name)
for name, c in most_common(counter)[:3])
# words
out_of_cluster_docs = cocluster.row_labels_ != cluster
out_of_cluster_docs = np.where(out_of_cluster_docs)[0]
word_col = X[:, cluster_words]
word_scores = np.array(word_col[cluster_docs, :].sum(axis=0) -
word_col[out_of_cluster_docs, :].sum(axis=0))
word_scores = word_scores.ravel()
important_words = list(feature_names[cluster_words[i]]
for i in word_scores.argsort()[:-11:-1])
print("bicluster {} : {} documents, {} words".format(
idx, n_rows, n_cols))
print("categories : {}".format(cat_string))
print("words : {}\n".format(', '.join(important_words)))
|
bsd-3-clause
|
cainiaocome/scikit-learn
|
sklearn/utils/testing.py
|
47
|
23587
|
"""Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# License: BSD 3 clause
import os
import inspect
import pkgutil
import warnings
import sys
import re
import platform
import scipy as sp
import scipy.io
from functools import wraps
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import sklearn
from sklearn.base import BaseEstimator
# Conveniently import all assertions in one place.
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_false
from nose.tools import assert_raises
from nose.tools import raises
from nose import SkipTest
from nose import with_setup
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "raises", "with_setup", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal"]
try:
from nose.tools import assert_in, assert_not_in
except ImportError:
# Nose < 1.0.0
def assert_in(x, container):
assert_true(x in container, msg="%r in %r" % (x, container))
def assert_not_in(x, container):
assert_false(x in container, msg="%r in %r" % (x, container))
try:
from nose.tools import assert_raises_regex
except ImportError:
# for Python 2
def assert_raises_regex(expected_exception, expected_regexp,
callable_obj=None, *args, **kwargs):
"""Helper function to check for message patterns in exceptions"""
not_raised = False
try:
callable_obj(*args, **kwargs)
not_raised = True
except expected_exception as e:
error_message = str(e)
if not re.compile(expected_regexp).search(error_message):
raise AssertionError("Error message should match pattern "
"%r. %r does not." %
(expected_regexp, error_message))
if not_raised:
raise AssertionError("%s not raised by %s" %
(expected_exception.__name__,
callable_obj.__name__))
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the bacward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def _assert_less(a, b, msg=None):
message = "%r is not lower than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a < b, message
def _assert_greater(a, b, msg=None):
message = "%r is not greater than %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a > b, message
def assert_less_equal(a, b, msg=None):
message = "%r is not lower than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a <= b, message
def assert_greater_equal(a, b, msg=None):
message = "%r is not greater than or equal to %r" % (a, b)
if msg is not None:
message += ": " + msg
assert a >= b, message
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [issubclass(warning.category, warning_class) for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>, got: '%s'"
% (message, func.__name__, msg))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# XXX: once we may depend on python >= 2.6, this can be replaced by the
# warnings module context manager.
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: %s"
% (func.__name__, w))
return result
def ignore_warnings(obj=None):
""" Context manager and decorator to ignore warnings
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _ignore_warnings(obj)
else:
return _IgnoreWarnings()
def _ignore_warnings(fn):
"""Decorator to catch and hide warnings without visual nesting"""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
return fn(*args, **kwargs)
w[:] = []
return wrapper
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager
Copied from Python 2.7.5 and modified as required.
"""
def __init__(self):
"""
Parameters
==========
category : warning class
The category to filter. Defaults to Warning. If None,
all categories will be muted.
"""
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter('always')
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
if self._record:
self.log = []
def showwarning(*args, **kwargs):
self.log.append(warnings.WarningMessage(*args, **kwargs))
self._module.showwarning = showwarning
return self.log
else:
return None
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
try:
from nose.tools import assert_less
except ImportError:
assert_less = _assert_less
try:
from nose.tools import assert_greater
except ImportError:
assert_greater = _assert_greater
def _assert_allclose(actual, desired, rtol=1e-7, atol=0,
err_msg='', verbose=True):
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
if np.allclose(actual, desired, rtol=rtol, atol=atol):
return
msg = ('Array not equal to tolerance rtol=%g, atol=%g: '
'actual %s, desired %s') % (rtol, atol, actual, desired)
raise AssertionError(msg)
if hasattr(np.testing, 'assert_allclose'):
assert_allclose = np.testing.assert_allclose
else:
assert_allclose = _assert_allclose
def assert_raise_message(exceptions, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions
Parameters
----------
exceptions : exception or tuple of exception
Name of the estimator
func : callable
Calable object to raise error
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
"""
try:
function(*args, **kwargs)
except exceptions as e:
error_message = str(e)
if message not in error_message:
raise AssertionError("Error message does not include the expected"
" string: %r. Observed error message: %r" %
(message, error_message))
else:
# concatenate exception names
if isinstance(exceptions, tuple):
names = " or ".join(e.__name__ for e in exceptions)
else:
names = exceptions.__name__
raise AssertionError("%s not raised by %s" %
(names, function.__name__))
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict : dict, keys=str, values=ndarray
Contains data as columns_dict[column_name] = array of data.
dataname : string
Name of data set.
matfile : string or file object
The file name string or the file-like object of the output file.
ordering : list, default None
List of column_names, determines the ordering in the data set.
Notes
-----
This function transposes all arrays, while fetch_mldata only transposes
'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier",
"OutputCodeClassifier", "OneVsRestClassifier", "RFE",
"RFECV", "BaseEnsemble"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV",
"RandomizedSearchCV"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder',
'MultiLabelBinarizer', 'TfidfTransformer',
'TfidfVectorizer', 'IsotonicRegression',
'OneHotEncoder', 'RandomTreesEmbedding',
'FeatureHasher', 'DummyClassifier', 'DummyRegressor',
'TruncatedSVD', 'PolynomialFeatures',
'GaussianRandomProjectionHash', 'HashingVectorizer',
'CheckingClassifier', 'PatchExtractor', 'CountVectorizer',
# GradientBoosting base estimators, maybe should
# exclude them in another way
'ZeroEstimator', 'ScaledLogOddsEstimator',
'QuantileEstimator', 'MeanEstimator',
'LogOddsEstimator', 'PriorProbabilityEstimator',
'_SigmoidCalibration', 'VotingClassifier']
def all_estimators(include_meta_estimators=False,
include_other=False, type_filter=None,
include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if ".tests." in modname:
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator)
and c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {'classifier': ClassifierMixin,
'regressor': RegressorMixin,
'transformer': TransformerMixin,
'cluster': ClusterMixin}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators
if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or None, got"
" %s." % repr(type_filter))
# drop duplicates, sort for reproducibility
return sorted(set(estimators))
def set_random_state(estimator, random_state=0):
if "random_state" in estimator.get_params().keys():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed. """
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
import matplotlib.pyplot as plt
plt.figure()
except ImportError:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def if_not_mac_os(versions=('10.7', '10.8', '10.9'),
message='Multi-process bug in Mac OS X >= 10.7 '
'(see issue #636)'):
"""Test decorator that skips test if OS is Mac OS X and its
major version is one of ``versions``.
"""
mac_version, _, _ = platform.mac_ver()
skip = '.'.join(mac_version.split('.')[:2]) in versions
def decorator(func):
if skip:
@wraps(func)
def func(*args, **kwargs):
raise SkipTest(message)
return func
return decorator
def clean_warning_registry():
"""Safe way to reset warnings """
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
with_network = with_setup(check_skip_network)
with_travis = with_setup(check_skip_travis)
|
bsd-3-clause
|
rowanc1/simpegflow
|
docs/conf.py
|
3
|
7871
|
# -*- coding: utf-8 -*-
#
# SimPEG documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 30 18:42:44 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.append('../')
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo', 'sphinx.ext.mathjax', 'sphinx.ext.viewcode', 'sphinx.ext.autodoc', 'matplotlib.sphinxext.plot_directive']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'SimPEG'
copyright = u'2013, SimPEG Developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'SimPEGdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'SimPEG.tex', u'SimPEG Documentation',
u'Rowan Cockett', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'simpeg', u'SimPEG Documentation',
[u'Rowan Cockett'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'SimPEG', u'SimPEG Documentation',
u'Rowan Cockett', 'SimPEG', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
|
mit
|
1nsect/ScrabbleStream
|
main.py
|
1
|
2291
|
import sys
import time #sleep function
import numpy as np
from matplotlib import pyplot as plt
np.set_printoptions(threshold=np.nan)
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import cv2
import math #to use absolute value function 'fabs'
import pytesseract
from PIL import Image
#from pyimagesearch import imutils #can't find that modul...
import ToolboxScrabble as ts
import PictureAcquisition as pa
import ReadBoard as rb
#Setting - Setting - Setting - Setting - Setting - Setting - Setting - Setting - Setting - Setting - Setting -
ImageSize = 1000 #size of the board's image
EdgeRatio = float(31)/float(32)
Margin=ImageSize-ImageSize*EdgeRatio
CellSize=int(round((ImageSize-2*Margin)/15))
#Il faudra calibrer cette valeur
Threshold = 110
#get coordinates of all the columns
X_ = ts.getColumnsPixelPosition(Margin,CellSize)
print X_
TimeToSkip= 100
TimeToWait = 4000
#Init - Init - Init - Init - Init - Init - Init - Init - Init - Init - Init - Init - Init - Init - Init - Init -
'''Take picture from camera
im=pa.takePicture()
im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
ts.ShowImage('coucou',im,0)
'''
#Create the survey matrix
boardState = np.zeros((15, 15), dtype=object)
# load the query image
# to the new height, clone it, and resize it
im = cv2.imread('PlateauO.jpg')
orig = im.copy()
im = cv2.resize(im,None,ImageSize,0.5,0.5, interpolation = cv2.INTER_AREA)
# convert the image to grayscale
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
ts.ShowImage('title',gray,TimeToSkip)
#croping
perspective = pa.CropBoard(gray, ImageSize, TimeToSkip)
ts.ShowImage('Perspective',perspective,TimeToSkip)
#Loop - Loop - Loop - Loop - Loop - Loop - Loop - Loop - Loop - Loop - Loop - Loop - Loop - Loop - Loop - Loop -
#Scan the new board state and extract new caramels
newFilledCells = rb.getFilledCells(perspective,X_,boardState,CellSize,Threshold)
print newFilledCells
#add the new filled cells to the boardState matrix
boardState = boardState + newFilledCells
#draw line to know where the columns are
rb.drawGrid(perspective.copy(), X_, CellSize)
rb.ReadBoard(perspective,boardState,X_,CellSize)
print boardState
#letter = rb.getChar(perspective, 7, 7, CellSize)
#print letter
'''
While():
Protocole de calibration
'''
print("End")
|
gpl-3.0
|
nlholdem/icodoom
|
.venv/lib/python2.7/site-packages/tensorflow/contrib/learn/python/learn/learn_io/data_feeder.py
|
88
|
31139
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementations of different data feeders to provide data for TF trainer."""
# TODO(ipolosukhin): Replace this module with feed-dict queue runners & queues.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
# pylint: disable=g-multiple-import,g-bad-import-order
from .pandas_io import HAS_PANDAS, extract_pandas_data, extract_pandas_matrix, extract_pandas_labels
from .dask_io import HAS_DASK, extract_dask_data, extract_dask_labels
# pylint: enable=g-multiple-import,g-bad-import-order
def _get_in_out_shape(x_shape, y_shape, n_classes, batch_size=None):
"""Returns shape for input and output of the data feeder."""
x_is_dict, y_is_dict = isinstance(
x_shape, dict), y_shape is not None and isinstance(y_shape, dict)
if y_is_dict and n_classes is not None:
assert (isinstance(n_classes, dict))
if batch_size is None:
batch_size = list(x_shape.values())[0][0] if x_is_dict else x_shape[0]
elif batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
if x_is_dict:
input_shape = {}
for k, v in list(x_shape.items()):
input_shape[k] = [batch_size] + (list(v[1:]) if len(v) > 1 else [1])
else:
x_shape = list(x_shape[1:]) if len(x_shape) > 1 else [1]
input_shape = [batch_size] + x_shape
if y_shape is None:
return input_shape, None, batch_size
def out_el_shape(out_shape, num_classes):
out_shape = list(out_shape[1:]) if len(out_shape) > 1 else []
# Skip first dimension if it is 1.
if out_shape and out_shape[0] == 1:
out_shape = out_shape[1:]
if num_classes is not None and num_classes > 1:
return [batch_size] + out_shape + [num_classes]
else:
return [batch_size] + out_shape
if not y_is_dict:
output_shape = out_el_shape(y_shape, n_classes)
else:
output_shape = dict([
(k, out_el_shape(v, n_classes[k]
if n_classes is not None and k in n_classes else None))
for k, v in list(y_shape.items())
])
return input_shape, output_shape, batch_size
def _data_type_filter(x, y):
"""Filter data types into acceptable format."""
if HAS_DASK:
x = extract_dask_data(x)
if y is not None:
y = extract_dask_labels(y)
if HAS_PANDAS:
x = extract_pandas_data(x)
if y is not None:
y = extract_pandas_labels(y)
return x, y
def _is_iterable(x):
return hasattr(x, 'next') or hasattr(x, '__next__')
def setup_train_data_feeder(x,
y,
n_classes,
batch_size=None,
shuffle=True,
epochs=None):
"""Create data feeder, to sample inputs from dataset.
If `x` and `y` are iterators, use `StreamingDataFeeder`.
Args:
x: numpy, pandas or Dask matrix or dictionary of aforementioned. Also
supports iterables.
y: numpy, pandas or Dask array or dictionary of aforementioned. Also
supports
iterables.
n_classes: number of classes. Must be None or same type as y. In case, `y`
is `dict`
(or iterable which returns dict) such that `n_classes[key] = n_classes for
y[key]`
batch_size: size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
DataFeeder object that returns training data.
Raises:
ValueError: if one of `x` and `y` is iterable and the other is not.
"""
x, y = _data_type_filter(x, y)
if HAS_DASK:
# pylint: disable=g-import-not-at-top
import dask.dataframe as dd
if (isinstance(x, (dd.Series, dd.DataFrame)) and
(y is None or isinstance(y, (dd.Series, dd.DataFrame)))):
data_feeder_cls = DaskDataFeeder
else:
data_feeder_cls = DataFeeder
else:
data_feeder_cls = DataFeeder
if _is_iterable(x):
if y is not None and not _is_iterable(y):
raise ValueError('Both x and y should be iterators for '
'streaming learning to work.')
return StreamingDataFeeder(x, y, n_classes, batch_size)
return data_feeder_cls(
x, y, n_classes, batch_size, shuffle=shuffle, epochs=epochs)
def _batch_data(x, batch_size=None):
if (batch_size is not None) and (batch_size <= 0):
raise ValueError('Invalid batch_size %d.' % batch_size)
x_first_el = six.next(x)
x = itertools.chain([x_first_el], x)
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
for data in x:
if isinstance(data, dict):
for k, v in list(data.items()):
chunk[k].append(v)
if (batch_size is not None) and (len(chunk[k]) >= batch_size):
chunk[k] = np.matrix(chunk[k])
chunk_filled = True
if chunk_filled:
yield chunk
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
else:
chunk.append(data)
if (batch_size is not None) and (len(chunk) >= batch_size):
yield np.matrix(chunk)
chunk = []
if isinstance(x_first_el, dict):
for k, v in list(data.items()):
chunk[k] = np.matrix(chunk[k])
yield chunk
else:
yield np.matrix(chunk)
def setup_predict_data_feeder(x, batch_size=None):
"""Returns an iterable for feeding into predict step.
Args:
x: numpy, pandas, Dask array or dictionary of aforementioned. Also supports
iterable.
batch_size: Size of batches to split data into. If `None`, returns one
batch of full size.
Returns:
List or iterator (or dictionary thereof) of parts of data to predict on.
Raises:
ValueError: if `batch_size` <= 0.
"""
if HAS_DASK:
x = extract_dask_data(x)
if HAS_PANDAS:
x = extract_pandas_data(x)
if _is_iterable(x):
return _batch_data(x, batch_size)
if len(x.shape) == 1:
x = np.reshape(x, (-1, 1))
if batch_size is not None:
if batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
n_batches = int(math.ceil(float(len(x)) / batch_size))
return [x[i * batch_size:(i + 1) * batch_size] for i in xrange(n_batches)]
return [x]
def setup_processor_data_feeder(x):
"""Sets up processor iterable.
Args:
x: numpy, pandas or iterable.
Returns:
Iterable of data to process.
"""
if HAS_PANDAS:
x = extract_pandas_matrix(x)
return x
def check_array(array, dtype):
"""Checks array on dtype and converts it if different.
Args:
array: Input array.
dtype: Expected dtype.
Returns:
Original array or converted.
"""
# skip check if array is instance of other classes, e.g. h5py.Dataset
# to avoid copying array and loading whole data into memory
if isinstance(array, (np.ndarray, list)):
array = np.array(array, dtype=dtype, order=None, copy=False)
return array
def _access(data, iloc):
"""Accesses an element from collection, using integer location based indexing.
Args:
data: array-like. The collection to access
iloc: `int` or `list` of `int`s. Location(s) to access in `collection`
Returns:
The element of `a` found at location(s) `iloc`.
"""
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if isinstance(data, pd.Series) or isinstance(data, pd.DataFrame):
return data.iloc[iloc]
return data[iloc]
def _check_dtype(dtype):
if dtypes.as_dtype(dtype) == dtypes.float64:
logging.warn(
'float64 is not supported by many models, consider casting to float32.')
return dtype
class DataFeeder(object):
"""Data feeder is an example class to sample data for TF trainer."""
def __init__(self,
x,
y,
n_classes,
batch_size=None,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DataFeeder instance.
Args:
x: One feature sample which can either Nd numpy matrix of shape
`[n_samples, n_features, ...]` or dictionary of Nd numpy matrix.
y: label vector, either floats for regression or class id for
classification. If matrix, will consider as a sequence of labels.
Can be `None` for unsupervised setting. Also supports dictionary of
labels.
n_classes: Number of classes, 0 and 1 are considered regression, `None`
will pass through the input labels without one-hot conversion. Also, if
`y` is `dict`, then `n_classes` must be `dict` such that
`n_classes[key] = n_classes for label y[key]`, `None` otherwise.
batch_size: Mini-batch size to accumulate samples in one mini batch.
shuffle: Whether to shuffle `x`.
random_state: Numpy `RandomState` object to reproduce sampling.
epochs: Number of times to iterate over input data before raising
`StopIteration` exception.
Attributes:
x: Input features (ndarray or dictionary of ndarrays).
y: Input label (ndarray or dictionary of ndarrays).
n_classes: Number of classes (if `None`, pass through indices without
one-hot conversion).
batch_size: Mini-batch size to accumulate.
input_shape: Shape of the input (or dictionary of shapes).
output_shape: Shape of the output (or dictionary of shapes).
input_dtype: DType of input (or dictionary of shapes).
output_dtype: DType of output (or dictionary of shapes.
"""
x_is_dict, y_is_dict = isinstance(x, dict), y is not None and isinstance(
y, dict)
if isinstance(y, list):
y = np.array(y)
self._x = dict([(k, check_array(v, v.dtype)) for k, v in list(x.items())
]) if x_is_dict else check_array(x, x.dtype)
self._y = None if y is None else \
dict([(k, check_array(v, v.dtype)) for k, v in list(y.items())]) if x_is_dict else check_array(y, y.dtype)
# self.n_classes is not None means we're converting raw target indices to one-hot.
if n_classes is not None:
if not y_is_dict:
y_dtype = (np.int64
if n_classes is not None and n_classes > 1 else np.float32)
self._y = (None if y is None else check_array(y, dtype=y_dtype))
self.n_classes = n_classes
self.max_epochs = epochs
x_shape = dict([(k, v.shape) for k, v in list(self._x.items())
]) if x_is_dict else self._x.shape
y_shape = dict([(k, v.shape) for k, v in list(self._y.items())
]) if y_is_dict else None if y is None else self._y.shape
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
# Input dtype matches dtype of x.
self._input_dtype = dict([(k, _check_dtype(v.dtype)) for k, v in list(self._x.items())]) if x_is_dict \
else _check_dtype(self._x.dtype)
# note: self._output_dtype = np.float32 when y is None
self._output_dtype = dict([(k, _check_dtype(v.dtype)) for k, v in list(self._y.items())]) if y_is_dict \
else _check_dtype(self._y.dtype) if y is not None else np.float32
# self.n_classes is None means we're passing in raw target indices
if n_classes is not None and y_is_dict:
for key in list(n_classes.keys()):
if key in self._output_dtype:
self._output_dtype[key] = np.float32
self._shuffle = shuffle
self.random_state = np.random.RandomState(
42) if random_state is None else random_state
num_samples = list(self._x.values())[0].shape[
0] if x_is_dict else self._x.shape[0]
if self._shuffle:
self.indices = self.random_state.permutation(num_samples)
else:
self.indices = np.array(range(num_samples))
self.offset = 0
self.epoch = 0
self._epoch_placeholder = None
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def shuffle(self):
return self._shuffle
@property
def input_dtype(self):
return self._input_dtype
@property
def output_dtype(self):
return self._output_dtype
@property
def batch_size(self):
return self._batch_size
def make_epoch_variable(self):
"""Adds a placeholder variable for the epoch to the graph.
Returns:
The epoch placeholder.
"""
self._epoch_placeholder = array_ops.placeholder(
dtypes.int32, [1], name='epoch')
return self._epoch_placeholder
def input_builder(self):
"""Builds inputs in the graph.
Returns:
Two placeholders for inputs and outputs.
"""
def get_placeholder(shape, dtype, name_prepend):
if shape is None:
return None
if isinstance(shape, dict):
placeholder = {}
for key in list(shape.keys()):
placeholder[key] = array_ops.placeholder(
dtypes.as_dtype(dtype[key]), [None] + shape[key][1:],
name=name_prepend + '_' + key)
else:
placeholder = array_ops.placeholder(
dtypes.as_dtype(dtype), [None] + shape[1:], name=name_prepend)
return placeholder
self._input_placeholder = get_placeholder(self.input_shape,
self._input_dtype, 'input')
self._output_placeholder = get_placeholder(self.output_shape,
self._output_dtype, 'output')
return self._input_placeholder, self._output_placeholder
def set_placeholders(self, input_placeholder, output_placeholder):
"""Sets placeholders for this data feeder.
Args:
input_placeholder: Placeholder for `x` variable. Should match shape
of the examples in the x dataset.
output_placeholder: Placeholder for `y` variable. Should match
shape of the examples in the y dataset. Can be `None`.
"""
self._input_placeholder = input_placeholder
self._output_placeholder = output_placeholder
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {
'epoch': self.epoch,
'offset': self.offset,
'batch_size': self._batch_size
}
def get_feed_dict_fn(self):
"""Returns a function that samples data into given placeholders.
Returns:
A function that when called samples a random subset of batch size
from `x` and `y`.
"""
x_is_dict, y_is_dict = isinstance(
self._x, dict), self._y is not None and isinstance(self._y, dict)
# Assign input features from random indices.
def extract(data, indices):
return (np.array(_access(data, indices)).reshape((indices.shape[0], 1)) if
len(data.shape) == 1 else _access(data, indices))
# assign labels from random indices
def assign_label(data, shape, dtype, n_classes, indices):
shape[0] = indices.shape[0]
out = np.zeros(shape, dtype=dtype)
for i in xrange(out.shape[0]):
sample = indices[i]
# self.n_classes is None means we're passing in raw target indices
if n_classes is None:
out[i] = _access(data, sample)
else:
if n_classes > 1:
if len(shape) == 2:
out.itemset((i, int(_access(data, sample))), 1.0)
else:
for idx, value in enumerate(_access(data, sample)):
out.itemset(tuple([i, idx, value]), 1.0)
else:
out[i] = _access(data, sample)
return out
def _feed_dict_fn():
"""Function that samples data into given placeholders."""
if self.max_epochs is not None and self.epoch + 1 > self.max_epochs:
raise StopIteration
assert self._input_placeholder is not None
feed_dict = {}
if self._epoch_placeholder is not None:
feed_dict[self._epoch_placeholder.name] = [self.epoch]
# Take next batch of indices.
x_len = list(self._x.values())[0].shape[
0] if x_is_dict else self._x.shape[0]
end = min(x_len, self.offset + self._batch_size)
batch_indices = self.indices[self.offset:end]
# adding input placeholder
feed_dict.update(
dict([(self._input_placeholder[k].name, extract(v, batch_indices))
for k, v in list(self._x.items())]) if x_is_dict else
{self._input_placeholder.name: extract(self._x, batch_indices)})
# move offset and reset it if necessary
self.offset += self._batch_size
if self.offset >= x_len:
self.indices = self.random_state.permutation(
x_len) if self._shuffle else np.array(range(x_len))
self.offset = 0
self.epoch += 1
# return early if there are no labels
if self._output_placeholder is None:
return feed_dict
# adding output placeholders
if y_is_dict:
for k, v in list(self._y.items()):
n_classes = (self.n_classes[k] if k in self.n_classes else
None) if self.n_classes is not None else None
shape, dtype = self.output_shape[k], self._output_dtype[k]
feed_dict.update({
self._output_placeholder[k].name:
assign_label(v, shape, dtype, n_classes, batch_indices)
})
else:
shape, dtype, n_classes = self.output_shape, self._output_dtype, self.n_classes
feed_dict.update({
self._output_placeholder.name:
assign_label(self._y, shape, dtype, n_classes, batch_indices)
})
return feed_dict
return _feed_dict_fn
class StreamingDataFeeder(DataFeeder):
"""Data feeder for TF trainer that reads data from iterator.
Streaming data feeder allows to read data as it comes it from disk or
somewhere else. It's custom to have this iterators rotate infinetly over
the dataset, to allow control of how much to learn on the trainer side.
"""
def __init__(self, x, y, n_classes, batch_size):
"""Initializes a StreamingDataFeeder instance.
Args:
x: iterator each element of which returns one feature sample. Sample can
be a Nd numpy matrix or dictionary of Nd numpy matrices.
y: iterator each element of which returns one label sample. Sample can be
a Nd numpy matrix or dictionary of Nd numpy matrices with 1 or many
classes regression values.
n_classes: indicator of how many classes the corresponding label sample
has for the purposes of one-hot conversion of label. In case where `y`
is a dictionary, `n_classes` must be dictionary (with same keys as `y`)
of how many classes there are in each label in `y`. If key is
present in `y` and missing in `n_classes`, the value is assumed `None`
and no one-hot conversion will be applied to the label with that key.
batch_size: Mini batch size to accumulate samples in one batch. If set
`None`, then assumes that iterator to return already batched element.
Attributes:
x: input features (or dictionary of input features).
y: input label (or dictionary of output features).
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input (can be dictionary depending on `x`).
output_shape: shape of the output (can be dictionary depending on `y`).
input_dtype: dtype of input (can be dictionary depending on `x`).
output_dtype: dtype of output (can be dictionary depending on `y`).
"""
# pylint: disable=invalid-name,super-init-not-called
x_first_el = six.next(x)
self._x = itertools.chain([x_first_el], x)
if y is not None:
y_first_el = six.next(y)
self._y = itertools.chain([y_first_el], y)
else:
y_first_el = None
self._y = None
self.n_classes = n_classes
x_is_dict = isinstance(x_first_el, dict)
y_is_dict = y is not None and isinstance(y_first_el, dict)
if y_is_dict and n_classes is not None:
assert isinstance(n_classes, dict)
# extract shapes for first_elements
if x_is_dict:
x_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(x_first_el.items())])
else:
x_first_el_shape = [1] + list(x_first_el.shape)
if y_is_dict:
y_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(y_first_el.items())])
elif y is None:
y_first_el_shape = None
else:
y_first_el_shape = ([1] + list(y_first_el[0].shape if isinstance(
y_first_el, list) else y_first_el.shape))
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_first_el_shape, y_first_el_shape, n_classes, batch_size)
# Input dtype of x_first_el.
if x_is_dict:
self._input_dtype = dict(
[(k, _check_dtype(v.dtype)) for k, v in list(x_first_el.items())])
else:
self._input_dtype = _check_dtype(x_first_el.dtype)
# Output dtype of y_first_el.
def check_y_dtype(el):
if isinstance(el, np.ndarray):
return el.dtype
elif isinstance(el, list):
return check_y_dtype(el[0])
else:
return _check_dtype(np.dtype(type(el)))
# Output types are floats, due to both softmaxes and regression req.
if n_classes is not None and (y is None or not y_is_dict) and n_classes > 0:
self._output_dtype = np.float32
elif y_is_dict:
self._output_dtype = dict(
[(k, check_y_dtype(v)) for k, v in list(y_first_el.items())])
elif y is None:
self._output_dtype = None
else:
self._output_dtype = check_y_dtype(y_first_el)
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self):
"""Returns a function, that will sample data and provide it to placeholders.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
self.stopped = False
def _feed_dict_fn():
"""Samples data and provides it to placeholders.
Returns:
`dict` of input and output tensors.
"""
def init_array(shape, dtype):
"""Initialize array of given shape or dict of shapes and dtype."""
if shape is None:
return None
elif isinstance(shape, dict):
return dict([(k, np.zeros(shape[k], dtype[k]))
for k in list(shape.keys())])
else:
return np.zeros(shape, dtype=dtype)
def put_data_array(dest, index, source=None, n_classes=None):
"""Puts data array into container."""
if source is None:
dest = dest[:index]
elif n_classes is not None and n_classes > 1:
if len(self.output_shape) == 2:
dest.itemset((index, source), 1.0)
else:
for idx, value in enumerate(source):
dest.itemset(tuple([index, idx, value]), 1.0)
else:
if len(dest.shape) > 1:
dest[index, :] = source
else:
dest[index] = source[0] if isinstance(source, list) else source
return dest
def put_data_array_or_dict(holder, index, data=None, n_classes=None):
"""Puts data array or data dictionary into container."""
if holder is None:
return None
if isinstance(holder, dict):
if data is None:
data = {k: None for k in holder.keys()}
assert isinstance(data, dict)
for k in holder.keys():
num_classes = n_classes[k] if (n_classes is not None and
k in n_classes) else None
holder[k] = put_data_array(holder[k], index, data[k], num_classes)
else:
holder = put_data_array(holder, index, data, n_classes)
return holder
if self.stopped:
raise StopIteration
inp = init_array(self.input_shape, self._input_dtype)
out = init_array(self.output_shape, self._output_dtype)
for i in xrange(self._batch_size):
# Add handling when queue ends.
try:
next_inp = six.next(self._x)
inp = put_data_array_or_dict(inp, i, next_inp, None)
except StopIteration:
self.stopped = True
if i == 0:
raise
inp = put_data_array_or_dict(inp, i, None, None)
out = put_data_array_or_dict(out, i, None, None)
break
if self._y is not None:
next_out = six.next(self._y)
out = put_data_array_or_dict(out, i, next_out, self.n_classes)
# creating feed_dict
if isinstance(inp, dict):
feed_dict = dict([(self._input_placeholder[k].name, inp[k])
for k in list(self._input_placeholder.keys())])
else:
feed_dict = {self._input_placeholder.name: inp}
if self._y is not None:
if isinstance(out, dict):
feed_dict.update(
dict([(self._output_placeholder[k].name, out[k])
for k in list(self._output_placeholder.keys())]))
else:
feed_dict.update({self._output_placeholder.name: out})
return feed_dict
return _feed_dict_fn
class DaskDataFeeder(object):
"""Data feeder for that reads data from dask.Series and dask.DataFrame.
Numpy arrays can be serialized to disk and it's possible to do random seeks
into them. DaskDataFeeder will remove requirement to have full dataset in the
memory and still do random seeks for sampling of batches.
"""
def __init__(self,
x,
y,
n_classes,
batch_size,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DaskDataFeeder instance.
Args:
x: iterator that returns for each element, returns features.
y: iterator that returns for each element, returns 1 or many classes /
regression values.
n_classes: indicator of how many classes the label has.
batch_size: Mini batch size to accumulate.
shuffle: Whether to shuffle the inputs.
random_state: random state for RNG. Note that it will mutate so use a
int value for this if you want consistent sized batches.
epochs: Number of epochs to run.
Attributes:
x: input features.
y: input label.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
Raises:
ValueError: if `x` or `y` are `dict`, as they are not supported currently.
"""
if isinstance(x, dict) or isinstance(y, dict):
raise ValueError(
'DaskDataFeeder does not support dictionaries at the moment.')
# pylint: disable=invalid-name,super-init-not-called
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# TODO(terrytangyuan): check x and y dtypes in dask_io like pandas
self._x = x
self._y = y
# save column names
self._x_columns = list(x.columns)
if isinstance(y.columns[0], str):
self._y_columns = list(y.columns)
else:
# deal with cases where two DFs have overlapped default numeric colnames
self._y_columns = len(self._x_columns) + 1
self._y = self._y.rename(columns={y.columns[0]: self._y_columns})
# TODO(terrytangyuan): deal with unsupervised cases
# combine into a data frame
self.df = dd.multi.concat([self._x, self._y], axis=1)
self.n_classes = n_classes
x_count = x.count().compute()[0]
x_shape = (x_count, len(self._x.columns))
y_shape = (x_count, len(self._y.columns))
# TODO(terrytangyuan): Add support for shuffle and epochs.
self._shuffle = shuffle
self.epochs = epochs
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
self.sample_fraction = self._batch_size / float(x_count)
self._input_dtype = _check_dtype(self._x.dtypes[0])
self._output_dtype = _check_dtype(self._y.dtypes[self._y_columns])
if random_state is None:
self.random_state = 66
else:
self.random_state = random_state
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self, input_placeholder, output_placeholder):
"""Returns a function, that will sample data and provide it to placeholders.
Args:
input_placeholder: tf.Placeholder for input features mini batch.
output_placeholder: tf.Placeholder for output labels.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
def _feed_dict_fn():
"""Samples data and provides it to placeholders."""
# TODO(ipolosukhin): option for with/without replacement (dev version of
# dask)
sample = self.df.random_split(
[self.sample_fraction, 1 - self.sample_fraction],
random_state=self.random_state)
inp = extract_pandas_matrix(sample[0][self._x_columns].compute()).tolist()
out = extract_pandas_matrix(sample[0][self._y_columns].compute())
# convert to correct dtype
inp = np.array(inp, dtype=self._input_dtype)
# one-hot encode out for each class for cross entropy loss
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if not isinstance(out, pd.Series):
out = out.flatten()
out_max = self._y.max().compute().values[0]
encoded_out = np.zeros((out.size, out_max + 1), dtype=self._output_dtype)
encoded_out[np.arange(out.size), out] = 1
return {input_placeholder.name: inp, output_placeholder.name: encoded_out}
return _feed_dict_fn
|
gpl-3.0
|
jowr/le-logger
|
webapp/plotting.py
|
1
|
8629
|
import numpy as np
import pandas as pd
from bokeh.embed import components
from bokeh.plotting import figure
from bokeh.resources import INLINE
from bokeh.layouts import gridplot
from bokeh.models import DatetimeTickFormatter
from bokeh.charts import BoxPlot
from bokeh.palettes import viridis as palette
from database import DataSet
#FIGURE_OPTIONS = dict(plot_width=1200, plot_height=300, logo="grey")
FIGURE_OPTIONS = dict(logo="grey")
SCATTER_OPTIONS = dict(alpha=0.5)
LINE_OPTIONS = dict(line_width=2, alpha=0.95)
def _get_dummies():
data_sets = []
for i in range(4):
ds = DataSet()
ds.set_dummy_data()
data_sets.append(ds)
return data_sets
def alldata(data_sets=[]):
########## BUILD FIGURES ################
if len(data_sets) < 1:
data_sets = _get_dummies()
series_count = len(data_sets)
colours = palette(series_count)
all_data_temp = figure(responsive=True, x_axis_label = "Days", x_axis_type = "datetime", y_axis_label = "Temperature / C", y_axis_type = "linear", **FIGURE_OPTIONS)
for (clr, ds) in zip(colours, data_sets):
my_plot = all_data_temp.line(ds.time_series, ds.temp_series, color = clr, legend = ds.name, **LINE_OPTIONS)
all_data_humi = figure(x_range=all_data_temp.x_range, responsive=True, x_axis_label = "Days", x_axis_type = "datetime", y_axis_label = "Relative humidity / \%", y_axis_type = "linear", **FIGURE_OPTIONS)
for (clr, ds) in zip(colours, data_sets):
my_plot = all_data_humi.line(ds.time_series, ds.humi_series, color = clr, legend = ds.name, **LINE_OPTIONS)
for p in [all_data_temp, all_data_humi]:
p.xaxis.formatter=DatetimeTickFormatter(formats=dict(
hours=["%k:%M"],
days=["%d. %m. %y"],
months=["%m %Y"],
years=["%Y"],
))
all_data = gridplot([all_data_temp, all_data_humi], ncols=2, plot_width=500, plot_height=250, sizing_mode='scale_width',
toolbar_options=dict(logo="grey"))
#toolbar_options=dict(logo="grey", location='above'), merge_tools=False)
########## RENDER PLOTS ################
resources = INLINE
js_resources = resources.render_js()
css_resources = resources.render_css()
plot_script, plot_divs = components({'Oversigt over alt data': all_data})
return js_resources, css_resources, plot_script, plot_divs
def operating_hours(data_sets=[]):
########## BUILD FIGURES ################
if len(data_sets) < 1:
data_sets = _get_dummies()
series_count = len(data_sets)
colours = palette(series_count)
data_frames = []
for ds in data_sets:
df = ds.as_data_frame()
day_filter = (df['timestamp'].dt.dayofweek == 5) | (df['timestamp'].dt.dayofweek == 6)
#df = df.drop(df[day_filter].index)
hour_filter = (df['timestamp'].dt.hour < 15) | (df['timestamp'].dt.hour > 21)
#df = df.drop(df[hour_filter].index)
#df = df.drop(df[day_filter | hour_filter].index)
#df['temperature'][day_filter | hour_filter] = np.NaN
#df['humidity'][day_filter | hour_filter] = np.NaN
idx = df.ix[day_filter | hour_filter].index
#df.temperature[idx] = np.NaN
#df.humidity[idx] = np.NaN
df.loc[idx,'temperature'] = np.NaN
df.loc[idx,'humidity'] = np.NaN
#df.at[dates[5], 'E'] = 7
df['time'] = df['timestamp'].dt.time
data_frames.append(df)
all_data_temp = figure(responsive=True, x_axis_label = "Time of day", x_axis_type = "datetime", y_axis_label = "Temperature / C", y_axis_type = "linear", **FIGURE_OPTIONS)
for (clr, ds, df) in zip(colours, data_sets, data_frames):
#my_plot = all_data_temp.scatter(df.time, df.temperature, color = clr, legend = ds.name, **SCATTER_OPTIONS)
my_plot = all_data_temp.line(df.time, df.temperature, color = clr, legend = ds.name, **LINE_OPTIONS)
all_data_humi = figure(x_range=all_data_temp.x_range, responsive=True, x_axis_label = "Time of day", x_axis_type = "datetime", y_axis_label = "Relative humidity / \%", y_axis_type = "linear", **FIGURE_OPTIONS)
for (clr, ds, df) in zip(colours, data_sets, data_frames):
my_plot = all_data_humi.scatter(df.time, df.humidity, color = clr, legend = ds.name, **SCATTER_OPTIONS)
#my_plot = all_data_humi.line(df.time, df.humidity, color = clr, legend = ds.name, **LINE_OPTIONS)
for p in [all_data_temp, all_data_humi]:
p.xaxis.formatter=DatetimeTickFormatter(formats=dict(
hours=["%k:%M"],
days=["%d. %m. %y"],
months=["%m %Y"],
years=["%Y"],
))
all_data = gridplot([all_data_temp, all_data_humi], ncols=2, plot_width=500, plot_height=250, sizing_mode='scale_width',
toolbar_options=dict(logo="grey"))
#toolbar_options=dict(logo="grey", location='above'), merge_tools=False)
########## RENDER PLOTS ################
resources = INLINE
js_resources = resources.render_js()
css_resources = resources.render_css()
plot_script, plot_divs = components({'Data fra kl. 15 - 22, uden loerdag': all_data})
return js_resources, css_resources, plot_script, plot_divs
def statistics(data_sets=[]):
########## BUILD FIGURES ################
if len(data_sets) < 1:
data_sets = _get_dummies()
series_count = len(data_sets)
colours = palette(series_count)
data_frame = pd.DataFrame()
data_frames = []
for ds in data_sets:
df = ds.as_data_frame()
day_filter = (df['timestamp'].dt.dayofweek == 5) | (df['timestamp'].dt.dayofweek == 6)
#df = df.drop(df[day_filter].index)
hour_filter = (df['timestamp'].dt.hour < 15) | (df['timestamp'].dt.hour > 21)
#df = df.drop(df[hour_filter].index)
#df = df.drop(df[day_filter | hour_filter].index)
#df['temperature'][day_filter | hour_filter] = np.NaN
#df['humidity'][day_filter | hour_filter] = np.NaN
idx = df.ix[day_filter | hour_filter].index
#df.temperature[idx] = np.NaN
#df.humidity[idx] = np.NaN
df.loc[idx,'temperature'] = np.NaN
df.loc[idx,'humidity'] = np.NaN
#df.at[dates[5], 'E'] = 7
df = df.drop(idx)
df['time'] = df['timestamp'].dt.time
df['box_label'] = ["{1}-{2} - {0}".format(ds.name, tt, tt+1) for tt in df['timestamp'].dt.hour]
df['box_label_merged'] = ["kl. {1}-{2} - {0}".format(ds.name.split(',')[0], tt, tt+1) for tt in df['timestamp'].dt.hour]
df.loc[:,'colour'] = ds.name
df.loc[:,'colour_merged'] = ds.name.split(',')[0]
data_frames.append(df)
data_frame = pd.concat([data_frame,df], ignore_index=True)
#data_frame = pd.DataFrame(columns=['timestamp', 'temperature', 'humidity', 'box_label'])
all_data_temp = BoxPlot(data_frame, values='temperature', label='box_label', color='colour', responsive=True, xlabel = "Time and place", ylabel = "Temperature / C", legend=False)
all_data_humi = BoxPlot(data_frame, values='humidity', label='box_label', color='colour', responsive=True, xlabel = "Time and place", ylabel = "Relative humidity / \%", legend=False)
all_data = gridplot([all_data_temp, all_data_humi], ncols=2, plot_width=500, plot_height=400, sizing_mode='scale_width',
toolbar_options=dict(logo="grey"))
#toolbar_options=dict(logo="grey", location='above'), merge_tools=False)
merged_data_temp = BoxPlot(data_frame, values='temperature', label='box_label_merged', color='colour_merged', responsive=True, xlabel = "Time and place", ylabel = "Temperature / C", legend=False)
merged_data_humi = BoxPlot(data_frame, values='humidity', label='box_label_merged', color='colour_merged', responsive=True, xlabel = "Time and place", ylabel = "Relative humidity / \%", legend=False)
merged_data = gridplot([merged_data_temp, merged_data_humi], ncols=2, plot_width=500, plot_height=400, sizing_mode='scale_width',
toolbar_options=dict(logo="grey"))
#toolbar_options=dict(logo="grey", location='above'), merge_tools=False)
########## RENDER PLOTS ################
resources = INLINE
js_resources = resources.render_js()
css_resources = resources.render_css()
plot_script, plot_divs = components({'Data fra kl. 15 - 22, uden loerdag': all_data, 'Data fra kl. 15 - 22, uden loerdag, reference og uden udsugning': merged_data})
return js_resources, css_resources, plot_script, plot_divs
|
gpl-3.0
|
dr-guangtou/KungPao
|
kungpao/isophote/helper.py
|
1
|
5156
|
"""Helper functions for isophote analysis."""
import os
import platform
import subprocess
import numpy as np
from matplotlib.patches import Ellipse
import kungpao
__all__ = ['fits_to_pl', 'iraf_commands', 'fix_pa_profile', 'isophote_to_ellip',
'save_isophote_output', 'remove_index_from_output']
def fits_to_pl(ximage, fits, output=None, verbose=False):
"""Convert FITS image into the IRAF .pl format.
Parameters
----------
ximage: string
Location of the x_images.e executable file.
fits: string
Input FITS file name.
output: string, optional
Output .pl file name. Default: None.
If None, the file name will be "input.fits.pl".
verbose: bool, optional
Blah, Blah. Default: False.
"""
if not os.path.isfile(ximage) and not os.path.islink(ximage):
raise FileNotFoundError("Can not find x_images.e: {}".format(ximage))
if not os.path.isfile(fits):
raise FileNotFoundError("Can not find input FITS image: {}".format(fits))
if output is None:
# TODO: Need to test whether .fits.pl or .pl works.
output = fits.replace('.fits', '.fits.pl')
if os.path.isfile(output):
if verbose:
print("# Output file exists! Will remove {}".format(output))
os.remove(output)
imcopy = "{} imcopy input={} output={} verbose=no".format(
ximage, fits.strip(), output.strip()
)
os.system(imcopy)
return output
def iraf_commands():
"""Locate the exectuble files for IRAF functions.
Returns
-------
iraf: dict
Dictionary for the IRAF functions.
"""
if platform.system() == 'Darwin':
IRAF_DIR = os.path.join(
os.path.dirname(kungpao.__file__), 'iraf', 'macosx')
elif platform.system() == 'Linux':
IRAF_DIR = os.path.join(
os.path.dirname(kungpao.__file__), 'iraf', 'linux')
else:
raise ValueError(
'Wrong platform: only support MacOSX or Linux now')
return (os.path.join(IRAF_DIR, 'x_isophote.e'),
os.path.join(IRAF_DIR, 'x_ttools.e'),
os.path.join(IRAF_DIR, 'x_images.e'))
def fix_pa_profile(ellipse_output, pa_col='pa', delta_pa=75.0):
"""
Correct the position angle for large jump.
Parameters
----------
ellipse_output: astropy.table
Output table summarizing the result from `ellipse`.
pa_col: string, optional
Name of the position angle column. Default: pa
delta_pa: float, optional
Largest PA difference allowed for two adjacent radial bins. Default=75.
Return
------
ellipse_output with updated position angle column.
"""
pa = ellipse_output[pa_col]
for i in range(1, len(pa)):
if (pa[i] - pa[i - 1]) >= delta_pa:
pa[i] -= 180.0
elif pa[i] - pa[i - 1] <= (-1.0 * delta_pa):
pa[i] += 180.0
ellipse_output[pa_col] = pa
return ellipse_output
def isophote_to_ellip(ellipse_output, x_pad=0.0, y_pad=0.0):
"""
Convert ellipse results into ellipses for visualization.
Parameters
----------
ellipse_output: astropy.table
Output table summarizing the result from `ellipse`.
Return
------
ell_list: list
List of Matplotlib elliptical patches for making plot.
"""
x = ellipse_output['x0'] - x_pad
y = ellipse_output['y0'] - y_pad
pa = ellipse_output['pa']
a = ellipse_output['sma'] * 2.0
b = ellipse_output['sma'] * 2.0 * (1.0 - ellipse_output['ell'])
ell_list = [Ellipse(xy=np.array([x[i], y[i]]), width=np.array(b[i]),
height=np.array(a[i]), angle=np.array(pa[i]))
for i in range(x.shape[0])]
return ell_list
def save_isophote_output(ellip_output, prefix=None, ellip_config=None, location=''):
"""
Save the Ellipse output to file.
Parameters
----------
ellip_output: astropy.table
Output table for the isophote analysis.
ellip_config: dict
Configuration parameters for the isophote analysis.
prefix: string, optional
Prefix of the output file. Default: None
location: string, optional
Directory to keep the output.
Returns
-------
output_file: string
Name of the output numpy record.
"""
if prefix is None:
prefix = 'ellip_output'
output_file = os.path.join(location, prefix + ".npz")
# Save the output and configuration parameters in a 'npz'.
np.savez(output_file, output=ellip_output, config=ellip_config)
return output_file
def remove_index_from_output(output_tab, replace='NaN'):
"""
Remove the Indef values from the Ellipse output.
Parameters:
"""
if os.path.exists(output_tab):
subprocess.call(['sed', '-i_back', 's/INDEF/' + replace + '/g', output_tab])
# Remove the back-up file
if os.path.isfile(output_tab.replace('.tab', '_back.tab')):
os.remove(output_tab.replace('.tab', '_back.tab'))
else:
raise FileExistsError('Can not find the input catalog: {}'.format(output_tab))
return output_tab
|
gpl-3.0
|
Ziqi-Li/bknqgis
|
pandas/pandas/tests/indexes/test_frozen.py
|
18
|
2435
|
import numpy as np
from pandas.util import testing as tm
from pandas.tests.test_base import CheckImmutable, CheckStringMixin
from pandas.core.indexes.frozen import FrozenList, FrozenNDArray
from pandas.compat import u
class TestFrozenList(CheckImmutable, CheckStringMixin):
mutable_methods = ('extend', 'pop', 'remove', 'insert')
unicode_container = FrozenList([u("\u05d0"), u("\u05d1"), "c"])
def setup_method(self, method):
self.lst = [1, 2, 3, 4, 5]
self.container = FrozenList(self.lst)
self.klass = FrozenList
def test_add(self):
result = self.container + (1, 2, 3)
expected = FrozenList(self.lst + [1, 2, 3])
self.check_result(result, expected)
result = (1, 2, 3) + self.container
expected = FrozenList([1, 2, 3] + self.lst)
self.check_result(result, expected)
def test_inplace(self):
q = r = self.container
q += [5]
self.check_result(q, self.lst + [5])
# other shouldn't be mutated
self.check_result(r, self.lst)
class TestFrozenNDArray(CheckImmutable, CheckStringMixin):
mutable_methods = ('put', 'itemset', 'fill')
unicode_container = FrozenNDArray([u("\u05d0"), u("\u05d1"), "c"])
def setup_method(self, method):
self.lst = [3, 5, 7, -2]
self.container = FrozenNDArray(self.lst)
self.klass = FrozenNDArray
def test_shallow_copying(self):
original = self.container.copy()
assert isinstance(self.container.view(), FrozenNDArray)
assert not isinstance(self.container.view(np.ndarray), FrozenNDArray)
assert self.container.view() is not self.container
tm.assert_numpy_array_equal(self.container, original)
# Shallow copy should be the same too
assert isinstance(self.container._shallow_copy(), FrozenNDArray)
# setting should not be allowed
def testit(container):
container[0] = 16
self.check_mutable_error(testit, self.container)
def test_values(self):
original = self.container.view(np.ndarray).copy()
n = original[0] + 15
vals = self.container.values()
tm.assert_numpy_array_equal(original, vals)
assert original is not vals
vals[0] = n
assert isinstance(self.container, FrozenNDArray)
tm.assert_numpy_array_equal(self.container.values(), original)
assert vals[0] == n
|
gpl-2.0
|
sadol/voltlog
|
libs/plotFrame.py
|
1
|
3608
|
#/usr/lib/env python
import matplotlib as mpl
mpl.use('TkAgg')
#from matplotlib import pyplot as plt # DONT USE IT WITH TKINTER!!!!!!!!!!!!!!
from matplotlib.figure import Figure # USE THIS INSTEAD!!!!!!!!!!!!!
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg as tkCanvas
class PlotFrame():
"""tkinter frame with embeded matplotlib real-time suplot object"""
def __init__(self, root, VQueue, IQueue, delay, color):
"""PlotFrame constructor
Arguments:
root -> tkinter root frame
VQueue -> deque object with 50 samples of fresh V data from PSU
IQueue -> deque object with 50 samples of fresh I data from PSU
delay -> int(miliseconds) refresh delay
color -> background color"""
self.root = root
self.delay = delay
self.color = color
self.tData = range(50) # x axis for both V and I
self.VQueue = VQueue # internal queue of V values to plot
self.IQueue = IQueue # internal queue of I values to plot
# DONT USE PYPLOT WITK TKAGG CANVAS!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#self.fig = plt.figure(figsize=(5, 1.7))
# USE NATIVE matplotlib.figure.Figure() INSTEAD!!!!!!!!!!!!!!!!!!!!!!!
self.fig = Figure(figsize=(5, 1.2), facecolor=self.color,
edgecolor=self.color, frameon=False, linewidth=0.00)
self.fig.subplots_adjust(left=0.15, right=0.85) # important
self.canvas = tkCanvas(self.fig, master=self.root)
self.axesV = self.fig.add_subplot(1, 1, 1) # left y ax is V
self.axesI = self.axesV.twinx() # right y ax is I
self.labelsV = self.axesV.set_ylim([0, 20])
self.labelsI = self.axesI.set_ylim([0, 10])
self.axesV.set_ylabel('voltage [V]', color='g', size='small')
self.axesI.set_ylabel('current [A]', color='r', size='small')
self.axesV.tick_params(axis='y', colors='g')
self.axesI.tick_params(axis='y', colors='r')
self.axesV.spines['left'].set_color('g')
self.axesV.spines['right'].set_color('r')
self.lineV, = self.axesV.plot(self.tData, self.VQueue, 'g-',
label='V', linewidth=2)
self.lineI, = self.axesI.plot(self.tData, self.IQueue, 'r-',
label='I', linewidth=2)
lines = self.lineV, self.lineI
labels = [line.get_label() for line in lines]
self.axesV.legend(lines, labels, loc=2, fontsize='small',
frameon=False, framealpha=0.5) # stackoverflow trick
self.canvas.get_tk_widget().grid()
def plot(self):
"""draws V and I plot on the tkinter canvas
Arguments:
Rreturns:
"after" job ID which can be intercept for cancel thread"""
self.axesV.set_ylim(self._setLimits(self.VQueue))
self.axesI.set_ylim(self._setLimits(self.IQueue))
self.lineV.set_ydata(self.VQueue)
self.lineI.set_ydata(self.IQueue)
self.canvas.draw()
self.root.after(self.delay, self.plot)
def _setLimits(self, dequeObj):
"""sets y range limits for self.plotObj
Arguments:
dequeObj -> collection.deque object populated with values
Returns:
list [min, max] values (with offsets) of the argument"""
mi = min(dequeObj)
ma = max(dequeObj) + 0.1 # prevents overlaping min and max boundiaries
return [mi - (0.1 * mi), ma + (0.1 * ma)]
|
gpl-2.0
|
hsiaoyi0504/scikit-learn
|
sklearn/decomposition/tests/test_sparse_pca.py
|
142
|
5990
|
# Author: Vlad Niculae
# License: BSD 3 clause
import sys
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import if_not_mac_os
from sklearn.decomposition import SparsePCA, MiniBatchSparsePCA
from sklearn.utils import check_random_state
def generate_toy_data(n_components, n_samples, image_size, random_state=None):
n_features = image_size[0] * image_size[1]
rng = check_random_state(random_state)
U = rng.randn(n_samples, n_components)
V = rng.randn(n_components, n_features)
centers = [(3, 3), (6, 7), (8, 1)]
sz = [1, 2, 1]
for k in range(n_components):
img = np.zeros(image_size)
xmin, xmax = centers[k][0] - sz[k], centers[k][0] + sz[k]
ymin, ymax = centers[k][1] - sz[k], centers[k][1] + sz[k]
img[xmin:xmax][:, ymin:ymax] = 1.0
V[k, :] = img.ravel()
# Y is defined by : Y = UV + noise
Y = np.dot(U, V)
Y += 0.1 * rng.randn(Y.shape[0], Y.shape[1]) # Add noise
return Y, U, V
# SparsePCA can be a bit slow. To avoid having test times go up, we
# test different aspects of the code in the same test
def test_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
spca = SparsePCA(n_components=8, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
spca = SparsePCA(n_components=13, random_state=rng)
U = spca.fit_transform(X)
assert_equal(spca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_fit_transform():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
# Test that CD gives similar results
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=0,
alpha=alpha)
spca_lasso.fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
@if_not_mac_os()
def test_fit_transform_parallel():
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = SparsePCA(n_components=3, method='lars', alpha=alpha,
random_state=0)
spca_lars.fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
spca = SparsePCA(n_components=3, n_jobs=2, method='lars', alpha=alpha,
random_state=0).fit(Y)
U2 = spca.transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
def test_transform_nan():
# Test that SparsePCA won't return NaN when there is 0 feature in all
# samples.
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
Y[:, 0] = 0
estimator = SparsePCA(n_components=8)
assert_false(np.any(np.isnan(estimator.fit_transform(Y))))
def test_fit_transform_tall():
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 65, (8, 8), random_state=rng) # tall array
spca_lars = SparsePCA(n_components=3, method='lars',
random_state=rng)
U1 = spca_lars.fit_transform(Y)
spca_lasso = SparsePCA(n_components=3, method='cd', random_state=rng)
U2 = spca_lasso.fit(Y).transform(Y)
assert_array_almost_equal(U1, U2)
def test_initialization():
rng = np.random.RandomState(0)
U_init = rng.randn(5, 3)
V_init = rng.randn(3, 4)
model = SparsePCA(n_components=3, U_init=U_init, V_init=V_init, max_iter=0,
random_state=rng)
model.fit(rng.randn(5, 4))
assert_array_equal(model.components_, V_init)
def test_mini_batch_correct_shapes():
rng = np.random.RandomState(0)
X = rng.randn(12, 10)
pca = MiniBatchSparsePCA(n_components=8, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (8, 10))
assert_equal(U.shape, (12, 8))
# test overcomplete decomposition
pca = MiniBatchSparsePCA(n_components=13, random_state=rng)
U = pca.fit_transform(X)
assert_equal(pca.components_.shape, (13, 10))
assert_equal(U.shape, (12, 13))
def test_mini_batch_fit_transform():
raise SkipTest("skipping mini_batch_fit_transform.")
alpha = 1
rng = np.random.RandomState(0)
Y, _, _ = generate_toy_data(3, 10, (8, 8), random_state=rng) # wide array
spca_lars = MiniBatchSparsePCA(n_components=3, random_state=0,
alpha=alpha).fit(Y)
U1 = spca_lars.transform(Y)
# Test multiple CPUs
if sys.platform == 'win32': # fake parallelism for win32
import sklearn.externals.joblib.parallel as joblib_par
_mp = joblib_par.multiprocessing
joblib_par.multiprocessing = None
try:
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
finally:
joblib_par.multiprocessing = _mp
else: # we can efficiently use parallelism
U2 = MiniBatchSparsePCA(n_components=3, n_jobs=2, alpha=alpha,
random_state=0).fit(Y).transform(Y)
assert_true(not np.all(spca_lars.components_ == 0))
assert_array_almost_equal(U1, U2)
# Test that CD gives similar results
spca_lasso = MiniBatchSparsePCA(n_components=3, method='cd', alpha=alpha,
random_state=0).fit(Y)
assert_array_almost_equal(spca_lasso.components_, spca_lars.components_)
|
bsd-3-clause
|
CKrawczyk/densityplot
|
densityplot/hex_bin_subtract.py
|
1
|
4732
|
import matplotlib as mpl
import pylab as pl
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
def hex_difference(xy1,xy2,show_all=False,hkwargs={},color_bar=True,fignum=1):
"""A function that plots the difference between two hexbin plots.
Parameters
----------
xy1 : A tuple of (x,y) corrdianates for the first hexbin. A tuple of (x,y,C)
can also be passed in where C is the value for each (x,y) point.
xy2 : A tuple of (x,y) corrdianates for the second hexbin. A tuple of (x,y,C)
can also be passed in where C is the value for each (x,y) point.
NOTE : the 'C' functinality is untested and may not work as expected.
Keywords
--------
show_all : bool (optional)
If True all intermediate hexbin plots are returned.
Default: show_all=False
color_bar : bool (optional)
If True a colorbar is placed on the plot(s)
Default: colorbar=True
fignum : int (optional)
The number to give the resulting figure(s). If
show_all=True, the intermediate plots will be
fignum+1 and fignum+2 while the difference will
be fignum.
default: fignum=1
Passed Keywords
---------------
hkwargs:
a dictionary of keywords passed to hexbin
(see http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.hexbin
for additional keywords that can be set)
Returns
-------
d : pylab.hexbin object
Object returned by the difference hexbin
h1 : pylab.hexbin object
Object returned by hexbin of first data set
NOTE: only returned if show_all=True
h2 : pylab.hexbin object
Object returned by hexbin of second data set
NOTE: only returned if show_all=True
c : matplotlib.colorbar.Colorbar instance
NOTE: only returned if color_bar=True
Usage
-----
import numpy as np
n=100000
x1=np.random.standard_normal(n) #random x points
y1=2+3*x1+4*np.random.standard_normal(n) #random y points
x2=np.random.standard_normal(n) #random x points
y2=2-3*x2+4*np.random.standard_normal(n) #random y points
hex_difference((x1,y1),(x2,y2),show_all=True,color_bar=True,hkwargs={'gridsize':100,'extent':[-4.5,4.5,-25,25],'vmin':-180,'vmax':180})
pl.show()
"""
if show_all: #if shoing all hexbins then draw them as you go (you can't change the drawing axis object after creation)
pl.figure(fignum+1)
hex1=pl.hexbin(*xy1,**hkwargs)
if color_bar:
pl.colorbar()
pl.figure(fignum+2)
hex2=pl.hexbin(*xy2,**hkwargs)
if color_bar:
pl.colorbar()
else: #make but don't draw the 2 hexbins
hex1=pl.hexbin(*xy1,visible=False,**hkwargs) #make the hexbins, visible it False to avoid drawing them to a plot
hex2=pl.hexbin(*xy2,visible=False,**hkwargs)
pl.figure(fignum)
hex_dif=pl.hexbin(*xy1,visible=False,**hkwargs) #this will have the counts overwritten (so don't draw yet)
c1=hex1.get_array() #the counts for hex1
c2=hex2.get_array() #the counts for hex2
c_dif=c1-c2 #difference between plots
gdx=~((c1==0)&(c2==0)) #the bins to draw (removes where both hists had no counts)
#NOTE: if the 'C' values are set checking against 0 is NOT a good idea...
hex_dif.set_array(c_dif[gdx]) #set the defferences into the hex_dif object
h=hex_dif.get_paths() #get the hexagon Path object(s)
if len(h)>1: #you have an old version of matplotlib, use this bit of code
rem_me=pl.array(h)[~gdx] #bins to remove
for r in rem_me:
h.remove(r) #remove blank bins
else: #either you have a boaring hexbin or a newer version of matplotlib
h=hex_dif.get_offsets()
hex_dif.set_offsets(h[gdx])
hex_dif.set_visible(True) #this draws the new hex_dif
ret=[hex_dif]
if show_all:
ret.append(hex1)
ret.append(hex2)
if color_bar:
ains=inset_axes(pl.gca(),width='80%',height='5%',loc=9)
#TODO: externalize colorbar keywords
c=pl.colorbar(hex_dif,cax=ains,orientation='horizontal')
ret.append(c)
return tuple(ret)
if __name__=='__main__':
import numpy as np
n=100000
x1=np.random.standard_normal(n) #random x points
y1=2+3*x1+4*np.random.standard_normal(n) #random y points
x2=np.random.standard_normal(n) #random x points
y2=2-3*x2+4*np.random.standard_normal(n) #random y points
hex_difference((x1,y1),(x2,y2),show_all=True,color_bar=True,hkwargs={'gridsize':100,'extent':[-4.5,4.5,-25,25],'vmin':-180,'vmax':180})
pl.show()
|
mit
|
winklerand/pandas
|
asv_bench/benchmarks/frame_ctor.py
|
1
|
4201
|
import numpy as np
import pandas.util.testing as tm
from pandas import DataFrame, Series, MultiIndex, Timestamp, date_range
try:
from pandas.tseries import offsets
except:
from pandas.core.datetools import * # noqa
from .pandas_vb_common import setup # noqa
class FromDicts(object):
goal_time = 0.2
def setup(self):
N, K = 5000, 50
index = tm.makeStringIndex(N)
columns = tm.makeStringIndex(K)
frame = DataFrame(np.random.randn(N, K), index=index, columns=columns)
self.data = frame.to_dict()
self.some_dict = list(self.data.values())[0]
self.dict_list = frame.to_dict(orient='records')
self.data2 = {i: {j: float(j) for j in range(100)}
for i in range(2000)}
def time_frame_ctor_list_of_dict(self):
DataFrame(self.dict_list)
def time_frame_ctor_nested_dict(self):
DataFrame(self.data)
def time_series_ctor_from_dict(self):
Series(self.some_dict)
def time_frame_ctor_nested_dict_int64(self):
# nested dict, integer indexes, regression described in #621
DataFrame(self.data2)
class FromSeries(object):
goal_time = 0.2
def setup(self):
mi = MultiIndex.from_product([range(100), range(100)])
self.s = Series(np.random.randn(10000), index=mi)
def time_frame_from_mi_series(self):
DataFrame(self.s)
# ----------------------------------------------------------------------
# From dict with DatetimeIndex with all offsets
# dynamically generate benchmarks for every offset
#
# get_period_count & get_index_for_offset are there because blindly taking each
# offset times 1000 can easily go out of Timestamp bounds and raise errors.
def get_period_count(start_date, off):
ten_offsets_in_days = ((start_date + (off * 10)) - start_date).days
if (ten_offsets_in_days == 0):
return 1000
else:
periods = 9 * (Timestamp.max - start_date).days // ten_offsets_in_days
return min(periods, 1000)
def get_index_for_offset(off):
start_date = Timestamp('1/1/1900')
return date_range(start_date,
periods=get_period_count(start_date, off),
freq=off)
all_offsets = offsets.__all__
# extra cases
for off in ['FY5253', 'FY5253Quarter']:
all_offsets.pop(all_offsets.index(off))
all_offsets.extend([off + '_1', off + '_2'])
class FromDictwithTimestampOffsets(object):
params = [all_offsets, [1, 2]]
param_names = ['offset', 'n_steps']
offset_kwargs = {'WeekOfMonth': {'weekday': 1, 'week': 1},
'LastWeekOfMonth': {'weekday': 1, 'week': 1},
'FY5253': {'startingMonth': 1, 'weekday': 1},
'FY5253Quarter': {'qtr_with_extra_week': 1,
'startingMonth': 1,
'weekday': 1}}
offset_extra_cases = {'FY5253': {'variation': ['nearest', 'last']},
'FY5253Quarter': {'variation': ['nearest', 'last']}}
def setup(self, offset, n_steps):
np.random.seed(1234)
extra = False
if offset.endswith("_", None, -1):
extra = int(offset[-1])
offset = offset[:-2]
kwargs = {}
if offset in self.offset_kwargs:
kwargs = self.offset_kwargs[offset]
if extra:
extras = self.offset_extra_cases[offset]
for extra_arg in extras:
kwargs[extra_arg] = extras[extra_arg][extra - 1]
offset = getattr(offsets, offset)
self.idx = get_index_for_offset(offset(n_steps, **kwargs))
self.df = DataFrame(np.random.randn(len(self.idx), 10), index=self.idx)
self.d = self.df.to_dict()
def time_frame_ctor(self, offset, n_steps):
DataFrame(self.d)
class FromRecords(object):
goal_time = 0.2
params = [None, 1000]
param_names = ['nrows']
def setup(self, nrows):
N = 100000
self.gen = ((x, (x * 20), (x * 100)) for x in range(N))
def time_frame_from_records_generator(self, nrows):
# issue-6700
self.df = DataFrame.from_records(self.gen, nrows=nrows)
|
bsd-3-clause
|
danielvdende/incubator-airflow
|
airflow/contrib/operators/hive_to_dynamodb.py
|
21
|
4084
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
from airflow.contrib.hooks.aws_dynamodb_hook import AwsDynamoDBHook
from airflow.hooks.hive_hooks import HiveServer2Hook
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
class HiveToDynamoDBTransferOperator(BaseOperator):
"""
Moves data from Hive to DynamoDB, note that for now the data is loaded
into memory before being pushed to DynamoDB, so this operator should
be used for smallish amount of data.
:param sql: SQL query to execute against the hive database. (templated)
:type sql: str
:param table_name: target DynamoDB table
:type table_name: str
:param table_keys: partition key and sort key
:type table_keys: list
:param pre_process: implement pre-processing of source data
:type pre_process: function
:param pre_process_args: list of pre_process function arguments
:type pre_process_args: list
:param pre_process_kwargs: dict of pre_process function arguments
:type pre_process_kwargs: dict
:param region_name: aws region name (example: us-east-1)
:type region_name: str
:param schema: hive database schema
:type schema: str
:param hiveserver2_conn_id: source hive connection
:type hiveserver2_conn_id: str
:param aws_conn_id: aws connection
:type aws_conn_id: str
"""
template_fields = ('sql',)
template_ext = ('.sql',)
ui_color = '#a0e08c'
@apply_defaults
def __init__(
self,
sql,
table_name,
table_keys,
pre_process=None,
pre_process_args=None,
pre_process_kwargs=None,
region_name=None,
schema='default',
hiveserver2_conn_id='hiveserver2_default',
aws_conn_id='aws_default',
*args, **kwargs):
super(HiveToDynamoDBTransferOperator, self).__init__(*args, **kwargs)
self.sql = sql
self.table_name = table_name
self.table_keys = table_keys
self.pre_process = pre_process
self.pre_process_args = pre_process_args
self.pre_process_kwargs = pre_process_kwargs
self.region_name = region_name
self.schema = schema
self.hiveserver2_conn_id = hiveserver2_conn_id
self.aws_conn_id = aws_conn_id
def execute(self, context):
hive = HiveServer2Hook(hiveserver2_conn_id=self.hiveserver2_conn_id)
self.log.info('Extracting data from Hive')
self.log.info(self.sql)
data = hive.get_pandas_df(self.sql, schema=self.schema)
dynamodb = AwsDynamoDBHook(aws_conn_id=self.aws_conn_id,
table_name=self.table_name,
table_keys=self.table_keys,
region_name=self.region_name)
self.log.info('Inserting rows into dynamodb')
if self.pre_process is None:
dynamodb.write_batch_data(
json.loads(data.to_json(orient='records')))
else:
dynamodb.write_batch_data(
self.pre_process(data=data,
args=self.pre_process_args,
kwargs=self.pre_process_kwargs))
self.log.info('Done.')
|
apache-2.0
|
kjchalup/neural_networks
|
neural_networks/mtn.py
|
1
|
7773
|
""" Multi-task networks. """
import numpy as np
import tensorflow as tf
from neural_networks import nn
from neural_networks import scalers
class MTN(nn.NN):
def __init__(self, x_dim, y_dim,
arch=[128, 128], ntype='plain', **kwargs):
""" A multi-task network.
The output is a concatenation of the outputs for all n_task tasks.
Let the tasks have output dimensionalities y1, ..., yn. The input
then consists of:
1) A task-flag section: a bit vector of length sum(yi), containing
zeros everywhere except for coordinates corresponding to the task
of the current input (where the bits are set to 1).
2) The true input, which must have the same dimensionality
for all tasks.
These two input parts should be concatenated.
"""
super().__init__(x_dim, y_dim, arch=arch,
ntype=ntype, **kwargs)
def define_loss(self):
x_dim = self.x_tf.get_shape().as_list()[1]
y_dim = self.y_tf.get_shape().as_list()[1]
return tf.losses.mean_squared_error(
self.y_tf, self.y_pred * self.x_tf[:, :y_dim])
def define_scalers(self):
xscale = scalers.HalfScaler(ignore_dim=self.y_dim)
yscale = scalers.StandardScaler()
return xscale, yscale
def make_data(x, y, n_data, noise_std, degree=3):
""" Create a 'dataset' outputs: a random polynomial
over noisy MNIST labels.
Args:
x (n_samples, 28**2): MNIST images.
y (n_samples, 1): MNIST labels.
n_data (int): Extract a subset of this size.
noise_std (float): Standard deviation of Gaussian noise
to be added to the labels.
degree (int): Degree of the polynomial whose random
coefficients will define this dataset.
Returns:
x, y: The dataset.
"""
y_orig = np.array(y)
while True:
n_samples = x.shape[0]
data_ids = np.random.choice(n_samples, n_data, replace=False)
coeffs = np.random.rand(degree) * 2
y = np.sum(np.array([coeffs[i] * y_orig**i for i in range(degree)]),
axis=0).reshape(-1, 1)
y += np.random.rand(*y.shape) * noise_std
yield (x[data_ids], y[data_ids])
def concatenate_tasks(X, Y, task_start, task_end, samples, n_test):
""" Given a list of X and Y data, extract sublists and concatenate
into one dataset.
Args:
X (List((n_samples, x_dim))): Input data.
Y (List((n_samples, y_dim))): Output data.
task_start (int): First dataset to extract.
task_end (int): Last dataset to extract.
samples (int): Number of training samples to
extract from each dataset.
n_test (int): Number of test samples to extract from each data.
Returns:
X_multi, Y_multi: Training data, concatenated datasets between
task_start and task_end.
X_test, Y_test: As above, but test data.
"""
n_tasks = task_end - task_start
X_multi = np.zeros((samples * n_tasks, n_tasks + dim))
Y_multi = np.zeros((samples * n_tasks, n_tasks))
X_test = np.zeros((n_test * n_tasks, n_tasks + dim))
Y_test = np.zeros((n_test * n_tasks, n_tasks))
for task_id_id, task_id in enumerate(range(task_start, task_end)):
X_multi[task_id_id*samples : (task_id_id+1)*samples,
task_id_id:task_id_id+1] = 1.
data = np.array(X[task_id])
X_multi[task_id_id*samples : (task_id_id+1)*samples,
n_tasks:] = data[:samples]
Y_multi[task_id_id*samples : (task_id_id+1)*samples,
task_id_id:task_id_id+1] = Y[task_id_id][:samples]
X_test[task_id_id*n_test : (task_id_id+1)*n_test,
task_id_id:task_id_id+1] = 1.
data = np.array(X[task_id])
X_test[task_id_id*n_test : (task_id_id+1)*n_test,
n_tasks:] = data[samples:]
Y_test[task_id_id*n_test : (task_id_id+1)*n_test,
task_id_id:task_id+1] = Y[task_id_id][samples:]
return X_multi, Y_multi, X_test, Y_test
if __name__=="__main__":
""" Check that everything works as expected. """
print('===============================================================')
print('Evaluating MTN. Takes about 30min on a Titan X machine.')
print('===============================================================')
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
import matplotlib.pyplot as plt
# Load MNIST data.
mnist = input_data.read_data_sets("MNIST_data/", one_hot=False)
mY = mnist.train.labels
mX = mnist.train.images.reshape(mY.shape[0], -1)
dim = mX.shape[1]
# Fix task and nn parameters.
n_task_list = [1, 2, 4, 8, 16, 32]
max_tasks = max(n_task_list)
samples = 100
noise_std = .1
n_test = 10000
kwargs = {
'arch': [32]*30, #[32] * 30,
'ntype': 'highway',
'batch_size': 32,
'lr': 1e-4,
'valsize': .3,
'epochs': 10000
}
kwargs = {}
# Make data: X is a list of datasets, each with the same coordinates
# but potentially 1) different sample sizes and
# 2) different output tasks in Y.
np.random.seed(1)
data = make_data(mX, mY, samples + n_test, noise_std)
X, Y = zip(*[next(data) for _ in range(max_tasks)])
errs_mtn = np.zeros((len(n_task_list), max_tasks))
for n_tasks_id, n_tasks in enumerate(n_task_list):
print('=' * 70)
print('Starting {}-split training'.format(n_tasks))
print('=' * 70)
# Run multi-task prediction on a group of n_tasks tasks.
for task_start in range(0, max_tasks, n_tasks):
print('task_start = {}'.format(task_start))
X_multi, Y_multi, X_test, Y_test = concatenate_tasks(
X, Y, task_start, task_start + n_tasks, samples,
n_test)
# Create the Tensorflow graph.
mtnet = MTN(x_dim=X_multi.shape[1], y_dim=n_tasks, **kwargs)
with tf.Session() as sess:
# Define the Tensorflow session, and its initializer op.
sess.run(tf.global_variables_initializer())
# Fit the net.
mtnet.fit(X_multi, Y_multi, sess=sess, nn_verbose=True,
**kwargs)
# Run the prediction on the task-group
for task_id in range(n_tasks):
mtnpred = mtnet.predict(
X_test[task_id*n_test:(task_id+1)*n_test], sess=sess)
mtnpred = mtnpred[:, task_id:task_id+1]
errs_mtn[n_tasks_id, task_start+task_id] = (
np.sqrt(np.mean((
mtnpred - Y_test[task_id*n_test:(task_id+1)*n_test,
task_id:task_id+1])**2)))
# Reset the neural net graph.
tf.reset_default_graph()
print('Done\n.')
vmax = np.max(errs_mtn)
plt.figure(figsize=(10, 10))
plt.subplot(2, 1, 1)
barw = .8 / len(n_task_list)
for n_tasks_id in range(len(n_task_list)):
plt.title('MTN performance as number of tasks grows'.format(
n_task_list[n_tasks_id]))
plt.xlabel('task ID')
plt.ylabel('error')
plt.bar(np.arange(max_tasks) + n_tasks_id * barw,
width=barw,
height=errs_mtn[n_tasks_id],
label='{}'.format(n_task_list[n_tasks_id]))
plt.ylim([0, vmax])
plt.legend(loc=0)
plt.subplot(2, 1, 2)
plt.xlabel('n_tasks')
plt.ylabel('average error')
plt.plot(n_task_list, errs_mtn.mean(axis=1), 'o')
plt.savefig('res.png')
|
gpl-3.0
|
toastedcornflakes/scikit-learn
|
examples/svm/plot_svm_kernels.py
|
329
|
1971
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
SVM-Kernels
=========================================================
Three different types of SVM-Kernels are displayed below.
The polynomial and RBF are especially useful when the
data-points are not linearly separable.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# Our dataset and targets
X = np.c_[(.4, -.7),
(-1.5, -1),
(-1.4, -.9),
(-1.3, -1.2),
(-1.1, -.2),
(-1.2, -.4),
(-.5, 1.2),
(-1.5, 2.1),
(1, 1),
# --
(1.3, .8),
(1.2, .5),
(.2, -2),
(.5, -2.4),
(.2, -2.3),
(0, -2.7),
(1.3, 2.1)].T
Y = [0] * 8 + [1] * 8
# figure number
fignum = 1
# fit the model
for kernel in ('linear', 'poly', 'rbf'):
clf = svm.SVC(kernel=kernel, gamma=2)
clf.fit(X, Y)
# plot the line, the points, and the nearest vectors to the plane
plt.figure(fignum, figsize=(4, 3))
plt.clf()
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1], s=80,
facecolors='none', zorder=10)
plt.scatter(X[:, 0], X[:, 1], c=Y, zorder=10, cmap=plt.cm.Paired)
plt.axis('tight')
x_min = -3
x_max = 3
y_min = -3
y_max = 3
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.figure(fignum, figsize=(4, 3))
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
fignum = fignum + 1
plt.show()
|
bsd-3-clause
|
fujicoin/electrum-fjc
|
electrum/gui/qt/history_list.py
|
2
|
31462
|
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import datetime
from datetime import date
from typing import TYPE_CHECKING, Tuple, Dict
import threading
from enum import IntEnum
from decimal import Decimal
from PyQt5.QtGui import QMouseEvent, QFont, QBrush, QColor
from PyQt5.QtCore import (Qt, QPersistentModelIndex, QModelIndex, QAbstractItemModel,
QSortFilterProxyModel, QVariant, QItemSelectionModel, QDate, QPoint)
from PyQt5.QtWidgets import (QMenu, QHeaderView, QLabel, QMessageBox,
QPushButton, QComboBox, QVBoxLayout, QCalendarWidget,
QGridLayout)
from electrum.address_synchronizer import TX_HEIGHT_LOCAL
from electrum.i18n import _
from electrum.util import (block_explorer_URL, profiler, TxMinedInfo,
OrderedDictWithIndex, timestamp_to_datetime)
from electrum.logging import get_logger, Logger
from .util import (read_QIcon, MONOSPACE_FONT, Buttons, CancelButton, OkButton,
filename_field, MyTreeView, AcceptFileDragDrop, WindowModalDialog,
CloseButton, webopen)
if TYPE_CHECKING:
from electrum.wallet import Abstract_Wallet
_logger = get_logger(__name__)
try:
from electrum.plot import plot_history, NothingToPlotException
except:
_logger.info("could not import electrum.plot. This feature needs matplotlib to be installed.")
plot_history = None
# note: this list needs to be kept in sync with another in kivy
TX_ICONS = [
"unconfirmed.png",
"warning.png",
"unconfirmed.png",
"offline_tx.png",
"clock1.png",
"clock2.png",
"clock3.png",
"clock4.png",
"clock5.png",
"confirmed.png",
]
class HistoryColumns(IntEnum):
STATUS_ICON = 0
STATUS_TEXT = 1
DESCRIPTION = 2
COIN_VALUE = 3
RUNNING_COIN_BALANCE = 4
FIAT_VALUE = 5
FIAT_ACQ_PRICE = 6
FIAT_CAP_GAINS = 7
TXID = 8
class HistorySortModel(QSortFilterProxyModel):
def lessThan(self, source_left: QModelIndex, source_right: QModelIndex):
item1 = self.sourceModel().data(source_left, Qt.UserRole)
item2 = self.sourceModel().data(source_right, Qt.UserRole)
if item1 is None or item2 is None:
raise Exception(f'UserRole not set for column {source_left.column()}')
v1 = item1.value()
v2 = item2.value()
if v1 is None or isinstance(v1, Decimal) and v1.is_nan(): v1 = -float("inf")
if v2 is None or isinstance(v2, Decimal) and v2.is_nan(): v2 = -float("inf")
try:
return v1 < v2
except:
return False
class HistoryModel(QAbstractItemModel, Logger):
def __init__(self, parent):
QAbstractItemModel.__init__(self, parent)
Logger.__init__(self)
self.parent = parent
self.view = None # type: HistoryList
self.transactions = OrderedDictWithIndex()
self.tx_status_cache = {} # type: Dict[str, Tuple[int, str]]
self.summary = None
def set_view(self, history_list: 'HistoryList'):
# FIXME HistoryModel and HistoryList mutually depend on each other.
# After constructing both, this method needs to be called.
self.view = history_list # type: HistoryList
self.set_visibility_of_columns()
def columnCount(self, parent: QModelIndex):
return len(HistoryColumns)
def rowCount(self, parent: QModelIndex):
return len(self.transactions)
def index(self, row: int, column: int, parent: QModelIndex):
return self.createIndex(row, column)
def data(self, index: QModelIndex, role: Qt.ItemDataRole) -> QVariant:
# note: this method is performance-critical.
# it is called a lot, and so must run extremely fast.
assert index.isValid()
col = index.column()
tx_item = self.transactions.value_from_pos(index.row())
tx_hash = tx_item['txid']
conf = tx_item['confirmations']
txpos = tx_item['txpos_in_block'] or 0
height = tx_item['height']
try:
status, status_str = self.tx_status_cache[tx_hash]
except KeyError:
tx_mined_info = self.tx_mined_info_from_tx_item(tx_item)
status, status_str = self.parent.wallet.get_tx_status(tx_hash, tx_mined_info)
if role == Qt.UserRole:
# for sorting
d = {
HistoryColumns.STATUS_ICON:
# height breaks ties for unverified txns
# txpos breaks ties for verified same block txns
(conf, -status, -height, -txpos),
HistoryColumns.STATUS_TEXT: status_str,
HistoryColumns.DESCRIPTION: tx_item['label'],
HistoryColumns.COIN_VALUE: tx_item['value'].value,
HistoryColumns.RUNNING_COIN_BALANCE: tx_item['balance'].value,
HistoryColumns.FIAT_VALUE:
tx_item['fiat_value'].value if 'fiat_value' in tx_item else None,
HistoryColumns.FIAT_ACQ_PRICE:
tx_item['acquisition_price'].value if 'acquisition_price' in tx_item else None,
HistoryColumns.FIAT_CAP_GAINS:
tx_item['capital_gain'].value if 'capital_gain' in tx_item else None,
HistoryColumns.TXID: tx_hash,
}
return QVariant(d[col])
if role not in (Qt.DisplayRole, Qt.EditRole):
if col == HistoryColumns.STATUS_ICON and role == Qt.DecorationRole:
return QVariant(read_QIcon(TX_ICONS[status]))
elif col == HistoryColumns.STATUS_ICON and role == Qt.ToolTipRole:
return QVariant(str(conf) + _(" confirmation" + ("s" if conf != 1 else "")))
elif col > HistoryColumns.DESCRIPTION and role == Qt.TextAlignmentRole:
return QVariant(Qt.AlignRight | Qt.AlignVCenter)
elif col != HistoryColumns.STATUS_TEXT and role == Qt.FontRole:
monospace_font = QFont(MONOSPACE_FONT)
return QVariant(monospace_font)
elif col == HistoryColumns.DESCRIPTION and role == Qt.DecorationRole \
and self.parent.wallet.invoices.paid.get(tx_hash):
return QVariant(read_QIcon("seal"))
elif col in (HistoryColumns.DESCRIPTION, HistoryColumns.COIN_VALUE) \
and role == Qt.ForegroundRole and tx_item['value'].value < 0:
red_brush = QBrush(QColor("#BC1E1E"))
return QVariant(red_brush)
elif col == HistoryColumns.FIAT_VALUE and role == Qt.ForegroundRole \
and not tx_item.get('fiat_default') and tx_item.get('fiat_value') is not None:
blue_brush = QBrush(QColor("#1E1EFF"))
return QVariant(blue_brush)
return QVariant()
if col == HistoryColumns.STATUS_TEXT:
return QVariant(status_str)
elif col == HistoryColumns.DESCRIPTION:
return QVariant(tx_item['label'])
elif col == HistoryColumns.COIN_VALUE:
value = tx_item['value'].value
v_str = self.parent.format_amount(value, is_diff=True, whitespaces=True)
return QVariant(v_str)
elif col == HistoryColumns.RUNNING_COIN_BALANCE:
balance = tx_item['balance'].value
balance_str = self.parent.format_amount(balance, whitespaces=True)
return QVariant(balance_str)
elif col == HistoryColumns.FIAT_VALUE and 'fiat_value' in tx_item:
value_str = self.parent.fx.format_fiat(tx_item['fiat_value'].value)
return QVariant(value_str)
elif col == HistoryColumns.FIAT_ACQ_PRICE and \
tx_item['value'].value < 0 and 'acquisition_price' in tx_item:
# fixme: should use is_mine
acq = tx_item['acquisition_price'].value
return QVariant(self.parent.fx.format_fiat(acq))
elif col == HistoryColumns.FIAT_CAP_GAINS and 'capital_gain' in tx_item:
cg = tx_item['capital_gain'].value
return QVariant(self.parent.fx.format_fiat(cg))
elif col == HistoryColumns.TXID:
return QVariant(tx_hash)
return QVariant()
def parent(self, index: QModelIndex):
return QModelIndex()
def hasChildren(self, index: QModelIndex):
return not index.isValid()
def update_label(self, row):
tx_item = self.transactions.value_from_pos(row)
tx_item['label'] = self.parent.wallet.get_label(tx_item['txid'])
topLeft = bottomRight = self.createIndex(row, 2)
self.dataChanged.emit(topLeft, bottomRight, [Qt.DisplayRole])
def get_domain(self):
'''Overridden in address_dialog.py'''
return self.parent.wallet.get_addresses()
@profiler
def refresh(self, reason: str):
self.logger.info(f"refreshing... reason: {reason}")
assert self.parent.gui_thread == threading.current_thread(), 'must be called from GUI thread'
assert self.view, 'view not set'
selected = self.view.selectionModel().currentIndex()
selected_row = None
if selected:
selected_row = selected.row()
fx = self.parent.fx
if fx: fx.history_used_spot = False
r = self.parent.wallet.get_full_history(domain=self.get_domain(), from_timestamp=None, to_timestamp=None, fx=fx)
self.set_visibility_of_columns()
if r['transactions'] == list(self.transactions.values()):
return
old_length = len(self.transactions)
if old_length != 0:
self.beginRemoveRows(QModelIndex(), 0, old_length)
self.transactions.clear()
self.endRemoveRows()
self.beginInsertRows(QModelIndex(), 0, len(r['transactions'])-1)
for tx_item in r['transactions']:
txid = tx_item['txid']
self.transactions[txid] = tx_item
self.endInsertRows()
if selected_row:
self.view.selectionModel().select(self.createIndex(selected_row, 0), QItemSelectionModel.Rows | QItemSelectionModel.SelectCurrent)
self.view.filter()
# update summary
self.summary = r['summary']
if not self.view.years and self.transactions:
start_date = date.today()
end_date = date.today()
if len(self.transactions) > 0:
start_date = self.transactions.value_from_pos(0).get('date') or start_date
end_date = self.transactions.value_from_pos(len(self.transactions) - 1).get('date') or end_date
self.view.years = [str(i) for i in range(start_date.year, end_date.year + 1)]
self.view.period_combo.insertItems(1, self.view.years)
# update tx_status_cache
self.tx_status_cache.clear()
for txid, tx_item in self.transactions.items():
tx_mined_info = self.tx_mined_info_from_tx_item(tx_item)
self.tx_status_cache[txid] = self.parent.wallet.get_tx_status(txid, tx_mined_info)
def set_visibility_of_columns(self):
def set_visible(col: int, b: bool):
self.view.showColumn(col) if b else self.view.hideColumn(col)
# txid
set_visible(HistoryColumns.TXID, False)
# fiat
history = self.parent.fx.show_history()
cap_gains = self.parent.fx.get_history_capital_gains_config()
set_visible(HistoryColumns.FIAT_VALUE, history)
set_visible(HistoryColumns.FIAT_ACQ_PRICE, history and cap_gains)
set_visible(HistoryColumns.FIAT_CAP_GAINS, history and cap_gains)
def update_fiat(self, row, idx):
tx_item = self.transactions.value_from_pos(row)
key = tx_item['txid']
fee = tx_item.get('fee')
value = tx_item['value'].value
fiat_fields = self.parent.wallet.get_tx_item_fiat(key, value, self.parent.fx, fee.value if fee else None)
tx_item.update(fiat_fields)
self.dataChanged.emit(idx, idx, [Qt.DisplayRole, Qt.ForegroundRole])
def update_tx_mined_status(self, tx_hash: str, tx_mined_info: TxMinedInfo):
try:
row = self.transactions.pos_from_key(tx_hash)
tx_item = self.transactions[tx_hash]
except KeyError:
return
self.tx_status_cache[tx_hash] = self.parent.wallet.get_tx_status(tx_hash, tx_mined_info)
tx_item.update({
'confirmations': tx_mined_info.conf,
'timestamp': tx_mined_info.timestamp,
'txpos_in_block': tx_mined_info.txpos,
'date': timestamp_to_datetime(tx_mined_info.timestamp),
})
topLeft = self.createIndex(row, 0)
bottomRight = self.createIndex(row, len(HistoryColumns) - 1)
self.dataChanged.emit(topLeft, bottomRight)
def on_fee_histogram(self):
for tx_hash, tx_item in list(self.transactions.items()):
tx_mined_info = self.tx_mined_info_from_tx_item(tx_item)
if tx_mined_info.conf > 0:
# note: we could actually break here if we wanted to rely on the order of txns in self.transactions
continue
self.update_tx_mined_status(tx_hash, tx_mined_info)
def headerData(self, section: int, orientation: Qt.Orientation, role: Qt.ItemDataRole):
assert orientation == Qt.Horizontal
if role != Qt.DisplayRole:
return None
fx = self.parent.fx
fiat_title = 'n/a fiat value'
fiat_acq_title = 'n/a fiat acquisition price'
fiat_cg_title = 'n/a fiat capital gains'
if fx and fx.show_history():
fiat_title = '%s '%fx.ccy + _('Value')
fiat_acq_title = '%s '%fx.ccy + _('Acquisition price')
fiat_cg_title = '%s '%fx.ccy + _('Capital Gains')
return {
HistoryColumns.STATUS_ICON: '',
HistoryColumns.STATUS_TEXT: _('Date'),
HistoryColumns.DESCRIPTION: _('Description'),
HistoryColumns.COIN_VALUE: _('Amount'),
HistoryColumns.RUNNING_COIN_BALANCE: _('Balance'),
HistoryColumns.FIAT_VALUE: fiat_title,
HistoryColumns.FIAT_ACQ_PRICE: fiat_acq_title,
HistoryColumns.FIAT_CAP_GAINS: fiat_cg_title,
HistoryColumns.TXID: 'TXID',
}[section]
def flags(self, idx):
extra_flags = Qt.NoItemFlags # type: Qt.ItemFlag
if idx.column() in self.view.editable_columns:
extra_flags |= Qt.ItemIsEditable
return super().flags(idx) | extra_flags
@staticmethod
def tx_mined_info_from_tx_item(tx_item):
tx_mined_info = TxMinedInfo(height=tx_item['height'],
conf=tx_item['confirmations'],
timestamp=tx_item['timestamp'])
return tx_mined_info
class HistoryList(MyTreeView, AcceptFileDragDrop):
filter_columns = [HistoryColumns.STATUS_TEXT,
HistoryColumns.DESCRIPTION,
HistoryColumns.COIN_VALUE,
HistoryColumns.TXID]
def tx_item_from_proxy_row(self, proxy_row):
hm_idx = self.model().mapToSource(self.model().index(proxy_row, 0))
return self.hm.transactions.value_from_pos(hm_idx.row())
def should_hide(self, proxy_row):
if self.start_timestamp and self.end_timestamp:
tx_item = self.tx_item_from_proxy_row(proxy_row)
date = tx_item['date']
if date:
in_interval = self.start_timestamp <= date <= self.end_timestamp
if not in_interval:
return True
return False
def __init__(self, parent, model: HistoryModel):
super().__init__(parent, self.create_menu, stretch_column=HistoryColumns.DESCRIPTION)
self.hm = model
self.proxy = HistorySortModel(self)
self.proxy.setSourceModel(model)
self.setModel(self.proxy)
self.config = parent.config
AcceptFileDragDrop.__init__(self, ".txn")
self.setSortingEnabled(True)
self.start_timestamp = None
self.end_timestamp = None
self.years = []
self.create_toolbar_buttons()
self.wallet = self.parent.wallet # type: Abstract_Wallet
self.sortByColumn(HistoryColumns.STATUS_ICON, Qt.AscendingOrder)
self.editable_columns |= {HistoryColumns.FIAT_VALUE}
self.header().setStretchLastSection(False)
for col in HistoryColumns:
sm = QHeaderView.Stretch if col == self.stretch_column else QHeaderView.ResizeToContents
self.header().setSectionResizeMode(col, sm)
def format_date(self, d):
return str(datetime.date(d.year, d.month, d.day)) if d else _('None')
def on_combo(self, x):
s = self.period_combo.itemText(x)
x = s == _('Custom')
self.start_button.setEnabled(x)
self.end_button.setEnabled(x)
if s == _('All'):
self.start_timestamp = None
self.end_timestamp = None
self.start_button.setText("-")
self.end_button.setText("-")
else:
try:
year = int(s)
except:
return
self.start_timestamp = start_date = datetime.datetime(year, 1, 1)
self.end_timestamp = end_date = datetime.datetime(year+1, 1, 1)
self.start_button.setText(_('From') + ' ' + self.format_date(start_date))
self.end_button.setText(_('To') + ' ' + self.format_date(end_date))
self.hide_rows()
def create_toolbar_buttons(self):
self.period_combo = QComboBox()
self.start_button = QPushButton('-')
self.start_button.pressed.connect(self.select_start_date)
self.start_button.setEnabled(False)
self.end_button = QPushButton('-')
self.end_button.pressed.connect(self.select_end_date)
self.end_button.setEnabled(False)
self.period_combo.addItems([_('All'), _('Custom')])
self.period_combo.activated.connect(self.on_combo)
def get_toolbar_buttons(self):
return self.period_combo, self.start_button, self.end_button
def on_hide_toolbar(self):
self.start_timestamp = None
self.end_timestamp = None
self.hide_rows()
def save_toolbar_state(self, state, config):
config.set_key('show_toolbar_history', state)
def select_start_date(self):
self.start_timestamp = self.select_date(self.start_button)
self.hide_rows()
def select_end_date(self):
self.end_timestamp = self.select_date(self.end_button)
self.hide_rows()
def select_date(self, button):
d = WindowModalDialog(self, _("Select date"))
d.setMinimumSize(600, 150)
d.date = None
vbox = QVBoxLayout()
def on_date(date):
d.date = date
cal = QCalendarWidget()
cal.setGridVisible(True)
cal.clicked[QDate].connect(on_date)
vbox.addWidget(cal)
vbox.addLayout(Buttons(OkButton(d), CancelButton(d)))
d.setLayout(vbox)
if d.exec_():
if d.date is None:
return None
date = d.date.toPyDate()
button.setText(self.format_date(date))
return datetime.datetime(date.year, date.month, date.day)
def show_summary(self):
h = self.model().sourceModel().summary
if not h:
self.parent.show_message(_("Nothing to summarize."))
return
start_date = h.get('start_date')
end_date = h.get('end_date')
format_amount = lambda x: self.parent.format_amount(x.value) + ' ' + self.parent.base_unit()
d = WindowModalDialog(self, _("Summary"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
grid = QGridLayout()
grid.addWidget(QLabel(_("Start")), 0, 0)
grid.addWidget(QLabel(self.format_date(start_date)), 0, 1)
grid.addWidget(QLabel(str(h.get('fiat_start_value')) + '/BTC'), 0, 2)
grid.addWidget(QLabel(_("Initial balance")), 1, 0)
grid.addWidget(QLabel(format_amount(h['start_balance'])), 1, 1)
grid.addWidget(QLabel(str(h.get('fiat_start_balance'))), 1, 2)
grid.addWidget(QLabel(_("End")), 2, 0)
grid.addWidget(QLabel(self.format_date(end_date)), 2, 1)
grid.addWidget(QLabel(str(h.get('fiat_end_value')) + '/BTC'), 2, 2)
grid.addWidget(QLabel(_("Final balance")), 4, 0)
grid.addWidget(QLabel(format_amount(h['end_balance'])), 4, 1)
grid.addWidget(QLabel(str(h.get('fiat_end_balance'))), 4, 2)
grid.addWidget(QLabel(_("Income")), 5, 0)
grid.addWidget(QLabel(format_amount(h.get('incoming'))), 5, 1)
grid.addWidget(QLabel(str(h.get('fiat_incoming'))), 5, 2)
grid.addWidget(QLabel(_("Expenditures")), 6, 0)
grid.addWidget(QLabel(format_amount(h.get('outgoing'))), 6, 1)
grid.addWidget(QLabel(str(h.get('fiat_outgoing'))), 6, 2)
grid.addWidget(QLabel(_("Capital gains")), 7, 0)
grid.addWidget(QLabel(str(h.get('fiat_capital_gains'))), 7, 2)
grid.addWidget(QLabel(_("Unrealized gains")), 8, 0)
grid.addWidget(QLabel(str(h.get('fiat_unrealized_gains', ''))), 8, 2)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
def plot_history_dialog(self):
if plot_history is None:
self.parent.show_message(
_("Can't plot history.") + '\n' +
_("Perhaps some dependencies are missing...") + " (matplotlib?)")
return
try:
plt = plot_history(list(self.hm.transactions.values()))
plt.show()
except NothingToPlotException as e:
self.parent.show_message(str(e))
def on_edited(self, index, user_role, text):
index = self.model().mapToSource(index)
row, column = index.row(), index.column()
tx_item = self.hm.transactions.value_from_pos(row)
key = tx_item['txid']
if column == HistoryColumns.DESCRIPTION:
if self.wallet.set_label(key, text): #changed
self.hm.update_label(row)
self.parent.update_completions()
elif column == HistoryColumns.FIAT_VALUE:
self.wallet.set_fiat_value(key, self.parent.fx.ccy, text, self.parent.fx, tx_item['value'].value)
value = tx_item['value'].value
if value is not None:
self.hm.update_fiat(row, index)
else:
assert False
def mouseDoubleClickEvent(self, event: QMouseEvent):
idx = self.indexAt(event.pos())
if not idx.isValid():
return
tx_item = self.tx_item_from_proxy_row(idx.row())
if self.hm.flags(self.model().mapToSource(idx)) & Qt.ItemIsEditable:
super().mouseDoubleClickEvent(event)
else:
self.show_transaction(tx_item['txid'])
def show_transaction(self, tx_hash):
tx = self.wallet.db.get_transaction(tx_hash)
if not tx:
return
label = self.wallet.get_label(tx_hash) or None # prefer 'None' if not defined (force tx dialog to hide Description field if missing)
self.parent.show_transaction(tx, label)
def create_menu(self, position: QPoint):
org_idx: QModelIndex = self.indexAt(position)
idx = self.proxy.mapToSource(org_idx)
if not idx.isValid():
# can happen e.g. before list is populated for the first time
return
tx_item = self.hm.transactions.value_from_pos(idx.row())
column = idx.column()
if column == HistoryColumns.STATUS_ICON:
column_title = _('Transaction ID')
column_data = tx_item['txid']
else:
column_title = self.hm.headerData(column, Qt.Horizontal, Qt.DisplayRole)
column_data = self.hm.data(idx, Qt.DisplayRole).value()
tx_hash = tx_item['txid']
tx = self.wallet.db.get_transaction(tx_hash)
if not tx:
return
tx_URL = block_explorer_URL(self.config, 'tx', tx_hash)
height = self.wallet.get_tx_height(tx_hash).height
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
is_unconfirmed = height <= 0
pr_key = self.wallet.invoices.paid.get(tx_hash)
menu = QMenu()
if height == TX_HEIGHT_LOCAL:
menu.addAction(_("Remove"), lambda: self.remove_local_tx(tx_hash))
amount_columns = [HistoryColumns.COIN_VALUE, HistoryColumns.RUNNING_COIN_BALANCE, HistoryColumns.FIAT_VALUE, HistoryColumns.FIAT_ACQ_PRICE, HistoryColumns.FIAT_CAP_GAINS]
if column in amount_columns:
column_data = column_data.strip()
menu.addAction(_("Copy {}").format(column_title), lambda: self.parent.app.clipboard().setText(column_data))
for c in self.editable_columns:
if self.isColumnHidden(c): continue
label = self.hm.headerData(c, Qt.Horizontal, Qt.DisplayRole)
# TODO use siblingAtColumn when min Qt version is >=5.11
persistent = QPersistentModelIndex(org_idx.sibling(org_idx.row(), c))
menu.addAction(_("Edit {}").format(label), lambda p=persistent: self.edit(QModelIndex(p)))
menu.addAction(_("Details"), lambda: self.show_transaction(tx_hash))
if is_unconfirmed and tx:
# note: the current implementation of RBF *needs* the old tx fee
rbf = is_mine and not tx.is_final() and fee is not None
if rbf:
menu.addAction(_("Increase fee"), lambda: self.parent.bump_fee_dialog(tx))
else:
child_tx = self.wallet.cpfp(tx, 0)
if child_tx:
menu.addAction(_("Child pays for parent"), lambda: self.parent.cpfp(tx, child_tx))
if pr_key:
menu.addAction(read_QIcon("seal"), _("View invoice"), lambda: self.parent.show_invoice(pr_key))
if tx_URL:
menu.addAction(_("View on block explorer"), lambda: webopen(tx_URL))
menu.exec_(self.viewport().mapToGlobal(position))
def remove_local_tx(self, delete_tx):
to_delete = {delete_tx}
to_delete |= self.wallet.get_depending_transactions(delete_tx)
question = _("Are you sure you want to remove this transaction?")
if len(to_delete) > 1:
question = (_("Are you sure you want to remove this transaction and {} child transactions?")
.format(len(to_delete) - 1))
if not self.parent.question(msg=question,
title=_("Please confirm")):
return
for tx in to_delete:
self.wallet.remove_transaction(tx)
self.wallet.storage.write()
# need to update at least: history_list, utxo_list, address_list
self.parent.need_update.set()
def onFileAdded(self, fn):
try:
with open(fn) as f:
tx = self.parent.tx_from_text(f.read())
self.parent.save_transaction_into_wallet(tx)
except IOError as e:
self.parent.show_error(e)
def export_history_dialog(self):
d = WindowModalDialog(self, _('Export History'))
d.setMinimumSize(400, 200)
vbox = QVBoxLayout(d)
defaultname = os.path.expanduser('~/electrum-history.csv')
select_msg = _('Select file to export your wallet transactions to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
vbox.addStretch(1)
hbox = Buttons(CancelButton(d), OkButton(d, _('Export')))
vbox.addLayout(hbox)
#run_hook('export_history_dialog', self, hbox)
self.update()
if not d.exec_():
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_history(filename, csv_button.isChecked())
except (IOError, os.error) as reason:
export_error_label = _("Electrum was unable to produce a transaction export.")
self.parent.show_critical(export_error_label + "\n" + str(reason), title=_("Unable to export history"))
return
self.parent.show_message(_("Your wallet history has been successfully exported."))
def do_export_history(self, file_name, is_csv):
hist = self.wallet.get_full_history(domain=self.hm.get_domain(),
from_timestamp=None,
to_timestamp=None,
fx=self.parent.fx,
show_fees=True)
txns = hist['transactions']
lines = []
if is_csv:
for item in txns:
lines.append([item['txid'],
item.get('label', ''),
item['confirmations'],
item['value'],
item.get('fiat_value', ''),
item.get('fee', ''),
item.get('fiat_fee', ''),
item['date']])
with open(file_name, "w+", encoding='utf-8') as f:
if is_csv:
import csv
transaction = csv.writer(f, lineterminator='\n')
transaction.writerow(["transaction_hash",
"label",
"confirmations",
"value",
"fiat_value",
"fee",
"fiat_fee",
"timestamp"])
for line in lines:
transaction.writerow(line)
else:
from electrum.util import json_encode
f.write(json_encode(txns))
def text_txid_from_coordinate(self, row, col):
idx = self.model().mapToSource(self.model().index(row, col))
tx_item = self.hm.transactions.value_from_pos(idx.row())
return self.hm.data(idx, Qt.DisplayRole).value(), tx_item['txid']
|
mit
|
mitschabaude/nanopores
|
nanopores/models/pughpoints.py
|
1
|
6275
|
# (c) 2016 Gregor Mitscha-Baude
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from nanopores.geometries.pughpore import params as pugh_params
from nanopores import Params
def grid_piecewise1D(nodes, h, N=100, ep=None):
# compute number of grid points in each section
# N = 1/scaling * (length1 / h1 + length2 / h2 + ...)
lengths = np.diff(np.array(nodes))
h = np.array(h)
n = lengths/h
n = np.round(n*N/sum(n))
# compute each grid
intervals = zip(nodes[:-1], nodes[1:])
k = len(lengths)
grids = []
# ep = endpoint preference = 0 or 1
if ep is None:
ep = [0]*(k-1)
for i in range(k):
a, b = intervals[i]
grid = list(np.linspace(a, b, n[i]+1)[1:-1])
#print i
#print grid
if i == 0 or ep[i-1] == 1:
grid.insert(0, a)
if i == k-1 or ep[i] == 0:
grid.append(b)
#print grid
grids.append(grid)
#print n
return grids
def lround(x, nd):
if hasattr(x, "__iter__"):
return [lround(t, nd) for t in x]
else:
return round(x, nd)
def tensor(xy, z, r):
tensorgrid = []
for i in range(len(z)):
# scale xy by radius
ri = r[i]
xyz = [(ri*xj, ri*yj, zi) for zi in z[i] for xj, yj in xy]
tensorgrid.extend(xyz)
return lround(tensorgrid, 3)
def plot_1Dgrid(z, grids):
totalgrid = list(set(reduce(lambda a, b: a+b, grids)) - set(z))
fig = plt.figure("line")
fig.set_size_inches(8, 1)
plt.axhline(y=0, color="black", zorder=-10)
plt.scatter(totalgrid, [0.]*len(totalgrid), color="black")
plt.scatter(z, [0.]*len(z), color="red")
plt.xlim(z[0]-1, z[-1]+1)
plt.axis('off')
def neg(x):
return [-t for t in x]
def plot_2Dgrid(xy):
xx = [xi for (xi, yi) in xy]
yy = [yi for (xi, yi) in xy]
fig = plt.figure("triangle")
fig.set_size_inches(4, 4)
plt.scatter(xx, yy, color="red")
plt.scatter(yy, xx, color="green")
plt.scatter(xx + yy, neg(yy + xx), color="green")
plt.scatter(neg(xx + yy + xx + yy), yy + xx + neg(yy + xx), color="green")
plt.plot([0, 1], [0, 1], "-k")
plt.plot([0, 1], [0, 0], "-k")
plt.xlim(-1, 1)
plt.ylim(-1, 1)
def plot_xz_grid(xyz):
# project to x-z plane
xy = list(set([(x, z) for x, y, z in xyz]))
xx = [xi for (xi, yi) in xy]
yy = [yi for (xi, yi) in xy]
fig = plt.figure("porexz")
fig.set_size_inches(4, 4)
plt.scatter(neg(xx) + xx, yy + yy)
def plot_polygon(ax, polygon, **settings):
settings = dict(dict(closed=True, facecolor="#eeeeee", linewidth=1.,
edgecolor="black"), **settings)
polygon = np.array(polygon)
polygon_m = np.column_stack([-polygon[:,0], polygon[:,1]])
patch = patches.Polygon(polygon, **settings)
patchm = patches.Polygon(polygon_m, **settings)
#patch.set_zorder(10)
#patchm.set_zorder(10)
ax.add_patch(patch)
ax.add_patch(patchm)
# will result in roughly nz * nr*(nr+1)/2 points
def tensorgrid(nz=30, nr=5, plot=False, eps=5e-2, eps2=1e-1, buf=7.,
**params):
params = Params(pugh_params) | Params(params)
r = params.rMolecule
r = r + eps
# ---- create z part of tensor grid -----
ztop = params.hpore/2.
zbot = -ztop
# 6 nodes => 5 sections
z = [zbot - buf,
zbot - r,
ztop - params.h2 + r,
ztop - params.h1 + r,
ztop + r,
ztop + buf]
# relative meshwidths, radii
hz = np.array([1., 1., .5, 1., 1.])
rpore = [params.l0/2. - r,
params.l3/2. - r,
params.l2/2. - r,
params.l1/2. - r,
params.l0/2. - r]
# to which of the two intervals the shared endpoint belongs
ep = [0, 1, 1, 1]
grids = grid_piecewise1D(z, hz, N=nz, ep=ep)
# ---- create xy (triangle) part of tensor grid -----
# points in the unit triangle
x = np.linspace(eps2, 1-eps, nr)
y = np.linspace(eps, 1-eps2, nr)
xy = [(xi, yi) for xi in x for yi in y if xi > yi]
# ---- tensor product
xyz = tensor(xy, grids, rpore)
if plot:
print "Created %d points in z direction." % (sum(len(g) for g in grids),)
print "Created %d points in xy direction." % (len(xy),)
print "Total number of points:", len(xyz)
#plot_1Dgrid(z, grids)
plot_2Dgrid(xy)
plot_xz_grid(xyz)
plt.ylim(-params.H*0.5, params.H*0.5)
ax = plt.gca()
from nanopores.models.pughpore import polygon
plot_polygon(ax, polygon())
return xyz
if __name__ == "__main__":
from nanopores.models.pughpore import tensorgrid as tg
xyz = tg(nz=30, nr=4, plot=True)
plt.show()
#........................R.............................
# .
# .
# .........l0.......... .
# . . .
# ._ _______________ _............... .
# |D| |D| . . . .
# |D|......l1.......|D| h1 . . .
# |D|_ ____l2_____ _|D|...... h2 . .
# |DDD|_ _______ _|DDD|.......... . .
# |DDDDD| |DDDDD| . .
# |DDDDD| |DDDDD| . .
# DNA--->|DDDDD| |DDDDD| hpore .
# |DDDDD| |DDDDD| . .
# |DDDDD|..l3...|DDDDD| . .
# MEMBRANE |DDDDD| |DDDDD| . H
# | |DDDDD| |DDDDD| . .
# | |DDDDD| |DDDDD|....h4 . .
#______V_________|DDD| |DDD|_____.________ .___ .......
#MMMMMMMMMMMMMMMM|DDD| |DDD|MMMMM.MMMMMMMMM.MMMM. hmem
#MMMMMMMMMMMMMMMM|DDD|_______|DDD|MMMMM.MMMMMMMMM.MMMM.......
# . . .
# .......l4........ .
# .
# .
# .
#......................................................
|
mit
|
mxjl620/scikit-learn
|
benchmarks/bench_20newsgroups.py
|
377
|
3555
|
from __future__ import print_function, division
from time import time
import argparse
import numpy as np
from sklearn.dummy import DummyClassifier
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.metrics import accuracy_score
from sklearn.utils.validation import check_array
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
ESTIMATORS = {
"dummy": DummyClassifier(),
"random_forest": RandomForestClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"extra_trees": ExtraTreesClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"logistic_regression": LogisticRegression(),
"naive_bayes": MultinomialNB(),
"adaboost": AdaBoostClassifier(n_estimators=10),
}
###############################################################################
# Data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--estimators', nargs="+", required=True,
choices=ESTIMATORS)
args = vars(parser.parse_args())
data_train = fetch_20newsgroups_vectorized(subset="train")
data_test = fetch_20newsgroups_vectorized(subset="test")
X_train = check_array(data_train.data, dtype=np.float32,
accept_sparse="csc")
X_test = check_array(data_test.data, dtype=np.float32, accept_sparse="csr")
y_train = data_train.target
y_test = data_test.target
print("20 newsgroups")
print("=============")
print("X_train.shape = {0}".format(X_train.shape))
print("X_train.format = {0}".format(X_train.format))
print("X_train.dtype = {0}".format(X_train.dtype))
print("X_train density = {0}"
"".format(X_train.nnz / np.product(X_train.shape)))
print("y_train {0}".format(y_train.shape))
print("X_test {0}".format(X_test.shape))
print("X_test.format = {0}".format(X_test.format))
print("X_test.dtype = {0}".format(X_test.dtype))
print("y_test {0}".format(y_test.shape))
print()
print("Classifier Training")
print("===================")
accuracy, train_time, test_time = {}, {}, {}
for name in sorted(args["estimators"]):
clf = ESTIMATORS[name]
try:
clf.set_params(random_state=0)
except (TypeError, ValueError):
pass
print("Training %s ... " % name, end="")
t0 = time()
clf.fit(X_train, y_train)
train_time[name] = time() - t0
t0 = time()
y_pred = clf.predict(X_test)
test_time[name] = time() - t0
accuracy[name] = accuracy_score(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print()
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time",
"Accuracy"))
print("-" * 44)
for name in sorted(accuracy, key=accuracy.get):
print("%s %s %s %s" % (name.ljust(16),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % accuracy[name]).center(10)))
print()
|
bsd-3-clause
|
mugizico/scikit-learn
|
sklearn/linear_model/stochastic_gradient.py
|
130
|
50966
|
# Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
import scipy.sparse as sp
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from ..base import BaseEstimator, RegressorMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (check_array, check_random_state, check_X_y,
deprecated)
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.fixes import astype
from ..utils.seq_dataset import ArrayDataset, CSRDataset
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
SPARSE_INTERCEPT_DECAY = 0.01
"""For sparse data intercept updates are scaled by this decay factor to avoid
intercept oscillation."""
DEFAULT_EPSILON = 0.1
"""Default value of ``epsilon`` parameter. """
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
self.coef_ = None
if self.average > 0:
self.standard_coef_ = None
self.average_coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _make_dataset(X, y_i, sample_weight):
"""Create ``Dataset`` abstraction for sparse and dense inputs.
This also returns the ``intercept_decay`` which is different
for sparse datasets.
"""
if sp.issparse(X):
dataset = CSRDataset(X.data, X.indptr, X.indices, y_i, sample_weight)
intercept_decay = SPARSE_INTERCEPT_DECAY
else:
dataset = ArrayDataset(X, y_i, sample_weight)
intercept_decay = 1.0
return dataset, intercept_decay
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = _make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight in ['balanced', 'auto']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
contructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
constant: eta = eta0
optimal: eta = 1.0 / (t + t0) [default]
invscaling: eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = astype(y, np.float64, copy=False)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
if self.average > 0 and self.average_coef_ is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
@deprecated(" and will be removed in 0.19.")
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = _make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(alpha * t)
invscaling: eta = eta0 / pow(t, power_t) [default]
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10 will`` begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
|
bsd-3-clause
|
badlogicmanpreet/nupic
|
examples/opf/clients/hotgym/prediction/one_gym/nupic_output.py
|
17
|
6193
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Provides two classes with the same signature for writing data out of NuPIC
models.
(This is a component of the One Hot Gym Prediction Tutorial.)
"""
import csv
from collections import deque
from abc import ABCMeta, abstractmethod
# Try to import matplotlib, but we don't have to.
try:
import matplotlib
matplotlib.use('TKAgg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.dates import date2num
except ImportError:
pass
WINDOW = 100
class NuPICOutput(object):
__metaclass__ = ABCMeta
def __init__(self, names, showAnomalyScore=False):
self.names = names
self.showAnomalyScore = showAnomalyScore
@abstractmethod
def write(self, timestamps, actualValues, predictedValues,
predictionStep=1):
pass
@abstractmethod
def close(self):
pass
class NuPICFileOutput(NuPICOutput):
def __init__(self, *args, **kwargs):
super(NuPICFileOutput, self).__init__(*args, **kwargs)
self.outputFiles = []
self.outputWriters = []
self.lineCounts = []
headerRow = ['timestamp', 'kw_energy_consumption', 'prediction']
for name in self.names:
self.lineCounts.append(0)
outputFileName = "%s_out.csv" % name
print "Preparing to output %s data to %s" % (name, outputFileName)
outputFile = open(outputFileName, "w")
self.outputFiles.append(outputFile)
outputWriter = csv.writer(outputFile)
self.outputWriters.append(outputWriter)
outputWriter.writerow(headerRow)
def write(self, timestamps, actualValues, predictedValues,
predictionStep=1):
assert len(timestamps) == len(actualValues) == len(predictedValues)
for index in range(len(self.names)):
timestamp = timestamps[index]
actual = actualValues[index]
prediction = predictedValues[index]
writer = self.outputWriters[index]
if timestamp is not None:
outputRow = [timestamp, actual, prediction]
writer.writerow(outputRow)
self.lineCounts[index] += 1
def close(self):
for index, name in enumerate(self.names):
self.outputFiles[index].close()
print "Done. Wrote %i data lines to %s." % (self.lineCounts[index], name)
class NuPICPlotOutput(NuPICOutput):
def __init__(self, *args, **kwargs):
super(NuPICPlotOutput, self).__init__(*args, **kwargs)
# Turn matplotlib interactive mode on.
plt.ion()
self.dates = []
self.convertedDates = []
self.actualValues = []
self.predictedValues = []
self.actualLines = []
self.predictedLines = []
self.linesInitialized = False
self.graphs = []
plotCount = len(self.names)
plotHeight = max(plotCount * 3, 6)
fig = plt.figure(figsize=(14, plotHeight))
gs = gridspec.GridSpec(plotCount, 1)
for index in range(len(self.names)):
self.graphs.append(fig.add_subplot(gs[index, 0]))
plt.title(self.names[index])
plt.ylabel('KW Energy Consumption')
plt.xlabel('Date')
plt.tight_layout()
def initializeLines(self, timestamps):
for index in range(len(self.names)):
print "initializing %s" % self.names[index]
# graph = self.graphs[index]
self.dates.append(deque([timestamps[index]] * WINDOW, maxlen=WINDOW))
self.convertedDates.append(deque(
[date2num(date) for date in self.dates[index]], maxlen=WINDOW
))
self.actualValues.append(deque([0.0] * WINDOW, maxlen=WINDOW))
self.predictedValues.append(deque([0.0] * WINDOW, maxlen=WINDOW))
actualPlot, = self.graphs[index].plot(
self.dates[index], self.actualValues[index]
)
self.actualLines.append(actualPlot)
predictedPlot, = self.graphs[index].plot(
self.dates[index], self.predictedValues[index]
)
self.predictedLines.append(predictedPlot)
self.linesInitialized = True
def write(self, timestamps, actualValues, predictedValues,
predictionStep=1):
assert len(timestamps) == len(actualValues) == len(predictedValues)
# We need the first timestamp to initialize the lines at the right X value,
# so do that check first.
if not self.linesInitialized:
self.initializeLines(timestamps)
for index in range(len(self.names)):
self.dates[index].append(timestamps[index])
self.convertedDates[index].append(date2num(timestamps[index]))
self.actualValues[index].append(actualValues[index])
self.predictedValues[index].append(predictedValues[index])
# Update data
self.actualLines[index].set_xdata(self.convertedDates[index])
self.actualLines[index].set_ydata(self.actualValues[index])
self.predictedLines[index].set_xdata(self.convertedDates[index])
self.predictedLines[index].set_ydata(self.predictedValues[index])
self.graphs[index].relim()
self.graphs[index].autoscale_view(True, True, True)
plt.draw()
plt.legend(('actual','predicted'), loc=3)
def refreshGUI(self):
"""Give plot a pause, so data is drawn and GUI's event loop can run.
"""
plt.pause(0.0001)
def close(self):
plt.ioff()
plt.show()
NuPICOutput.register(NuPICFileOutput)
NuPICOutput.register(NuPICPlotOutput)
|
agpl-3.0
|
mitdbg/modeldb
|
client/verta/tests/modelapi_hypothesis/test_value_generator.py
|
1
|
2083
|
import pytest
import six
import numbers
pytest.importorskip("numpy")
pytest.importorskip("pandas")
import hypothesis
from value_generator import api_and_values, series_api_and_values, dataframe_api_and_values
# Check if the given value fits the defined api
def fit_api(api, value):
if api['type'] == 'VertaNull':
return value is None
if api['type'] == 'VertaBool':
return isinstance(value, bool)
if api['type'] == 'VertaFloat':
return isinstance(value, numbers.Real)
if api['type'] == 'VertaString':
return isinstance(value, six.string_types)
if api['type'] == 'VertaList':
if not isinstance(value, list):
return False
for subapi, subvalue in zip(api['value'], value):
if not fit_api(subapi, subvalue):
return False
if api['type'] == 'VertaJson':
keys = sorted([v['name'] for v in api['value']])
actual_keys = sorted(list(value.keys()))
if keys != actual_keys:
return False
subapi_dict = {v['name']: v for v in api['value']}
for k in keys:
if not fit_api(subapi_dict[k], value[k]):
return False
return True
# Verify that the value generation system actually creates something that fits the api
@hypothesis.given(api_and_values)
def test_value_from_api(api_and_values):
api, values = api_and_values
for v in values:
assert fit_api(api, v)
@hypothesis.given(series_api_and_values)
def test_series_from_api(api_and_values):
api, values = api_and_values
assert api['name'] == values.name
for v in values.to_list():
assert fit_api(api, v)
@hypothesis.given(dataframe_api_and_values)
def test_dataframe_from_api(api_and_values):
api, values = api_and_values
assert api['name'] == ''
assert api['type'] == 'VertaList'
for subapi, c in zip(api['value'], values.columns):
subvalues = values[c]
assert subapi['name'] == subvalues.name
for v in subvalues.to_list():
assert fit_api(subapi, v)
|
mit
|
pjryan126/solid-start-careers
|
store/api/zillow/venv/lib/python2.7/site-packages/pandas/computation/tests/test_eval.py
|
1
|
70497
|
#!/usr/bin/env python
# flake8: noqa
import warnings
import operator
from itertools import product
from distutils.version import LooseVersion
import nose
from nose.tools import assert_raises
from numpy.random import randn, rand, randint
import numpy as np
from numpy.testing import assert_allclose
from numpy.testing.decorators import slow
import pandas as pd
from pandas.core import common as com
from pandas import DataFrame, Series, Panel, date_range
from pandas.util.testing import makeCustomDataframe as mkdf
from pandas.computation import pytables
from pandas.computation.engines import _engines, NumExprClobberingError
from pandas.computation.expr import PythonExprVisitor, PandasExprVisitor
from pandas.computation.ops import (_binary_ops_dict,
_special_case_arith_ops_syms,
_arith_ops_syms, _bool_ops_syms,
_unary_math_ops, _binary_math_ops)
import pandas.computation.expr as expr
import pandas.util.testing as tm
import pandas.lib as lib
from pandas.util.testing import (assert_frame_equal, randbool,
assertRaisesRegexp, assert_numpy_array_equal,
assert_produces_warning, assert_series_equal)
from pandas.compat import PY3, u, reduce
_series_frame_incompatible = _bool_ops_syms
_scalar_skip = 'in', 'not in'
def engine_has_neg_frac(engine):
return _engines[engine].has_neg_frac
def _eval_single_bin(lhs, cmp1, rhs, engine):
c = _binary_ops_dict[cmp1]
if engine_has_neg_frac(engine):
try:
return c(lhs, rhs)
except ValueError as e:
try:
msg = e.message
except AttributeError:
msg = e
msg = u(msg)
if msg == u('negative number cannot be raised to a fractional'
' power'):
return np.nan
raise
return c(lhs, rhs)
def _series_and_2d_ndarray(lhs, rhs):
return ((isinstance(lhs, Series) and
isinstance(rhs, np.ndarray) and rhs.ndim > 1)
or (isinstance(rhs, Series) and
isinstance(lhs, np.ndarray) and lhs.ndim > 1))
def _series_and_frame(lhs, rhs):
return ((isinstance(lhs, Series) and isinstance(rhs, DataFrame))
or (isinstance(rhs, Series) and isinstance(lhs, DataFrame)))
def _bool_and_frame(lhs, rhs):
return isinstance(lhs, bool) and isinstance(rhs, pd.core.generic.NDFrame)
def _is_py3_complex_incompat(result, expected):
return (PY3 and isinstance(expected, (complex, np.complexfloating)) and
np.isnan(result))
_good_arith_ops = com.difference(_arith_ops_syms, _special_case_arith_ops_syms)
class TestEvalNumexprPandas(tm.TestCase):
@classmethod
def setUpClass(cls):
super(TestEvalNumexprPandas, cls).setUpClass()
tm.skip_if_no_ne()
import numexpr as ne
cls.ne = ne
cls.engine = 'numexpr'
cls.parser = 'pandas'
@classmethod
def tearDownClass(cls):
super(TestEvalNumexprPandas, cls).tearDownClass()
del cls.engine, cls.parser
if hasattr(cls, 'ne'):
del cls.ne
def setup_data(self):
nan_df1 = DataFrame(rand(10, 5))
nan_df1[nan_df1 > 0.5] = np.nan
nan_df2 = DataFrame(rand(10, 5))
nan_df2[nan_df2 > 0.5] = np.nan
self.pandas_lhses = (DataFrame(randn(10, 5)), Series(randn(5)),
Series([1, 2, np.nan, np.nan, 5]), nan_df1)
self.pandas_rhses = (DataFrame(randn(10, 5)), Series(randn(5)),
Series([1, 2, np.nan, np.nan, 5]), nan_df2)
self.scalar_lhses = randn(),
self.scalar_rhses = randn(),
self.lhses = self.pandas_lhses + self.scalar_lhses
self.rhses = self.pandas_rhses + self.scalar_rhses
def setup_ops(self):
self.cmp_ops = expr._cmp_ops_syms
self.cmp2_ops = self.cmp_ops[::-1]
self.bin_ops = expr._bool_ops_syms
self.special_case_ops = _special_case_arith_ops_syms
self.arith_ops = _good_arith_ops
self.unary_ops = '-', '~', 'not '
def setUp(self):
self.setup_ops()
self.setup_data()
self.current_engines = filter(lambda x: x != self.engine, _engines)
def tearDown(self):
del self.lhses, self.rhses, self.scalar_rhses, self.scalar_lhses
del self.pandas_rhses, self.pandas_lhses, self.current_engines
@slow
def test_complex_cmp_ops(self):
cmp_ops = ('!=', '==', '<=', '>=', '<', '>')
cmp2_ops = ('>', '<')
for lhs, cmp1, rhs, binop, cmp2 in product(self.lhses, cmp_ops,
self.rhses, self.bin_ops,
cmp2_ops):
self.check_complex_cmp_op(lhs, cmp1, rhs, binop, cmp2)
def test_simple_cmp_ops(self):
bool_lhses = (DataFrame(randbool(size=(10, 5))),
Series(randbool((5,))), randbool())
bool_rhses = (DataFrame(randbool(size=(10, 5))),
Series(randbool((5,))), randbool())
for lhs, rhs, cmp_op in product(bool_lhses, bool_rhses, self.cmp_ops):
self.check_simple_cmp_op(lhs, cmp_op, rhs)
@slow
def test_binary_arith_ops(self):
for lhs, op, rhs in product(self.lhses, self.arith_ops, self.rhses):
self.check_binary_arith_op(lhs, op, rhs)
def test_modulus(self):
for lhs, rhs in product(self.lhses, self.rhses):
self.check_modulus(lhs, '%', rhs)
def test_floor_division(self):
for lhs, rhs in product(self.lhses, self.rhses):
self.check_floor_division(lhs, '//', rhs)
def test_pow(self):
tm._skip_if_windows()
# odd failure on win32 platform, so skip
for lhs, rhs in product(self.lhses, self.rhses):
self.check_pow(lhs, '**', rhs)
@slow
def test_single_invert_op(self):
for lhs, op, rhs in product(self.lhses, self.cmp_ops, self.rhses):
self.check_single_invert_op(lhs, op, rhs)
@slow
def test_compound_invert_op(self):
for lhs, op, rhs in product(self.lhses, self.cmp_ops, self.rhses):
self.check_compound_invert_op(lhs, op, rhs)
@slow
def test_chained_cmp_op(self):
mids = self.lhses
cmp_ops = '<', '>'
for lhs, cmp1, mid, cmp2, rhs in product(self.lhses, cmp_ops,
mids, cmp_ops, self.rhses):
self.check_chained_cmp_op(lhs, cmp1, mid, cmp2, rhs)
def check_complex_cmp_op(self, lhs, cmp1, rhs, binop, cmp2):
skip_these = _scalar_skip
ex = '(lhs {cmp1} rhs) {binop} (lhs {cmp2} rhs)'.format(cmp1=cmp1,
binop=binop,
cmp2=cmp2)
scalar_with_in_notin = (lib.isscalar(rhs) and (cmp1 in skip_these or
cmp2 in skip_these))
if scalar_with_in_notin:
with tm.assertRaises(TypeError):
pd.eval(ex, engine=self.engine, parser=self.parser)
self.assertRaises(TypeError, pd.eval, ex, engine=self.engine,
parser=self.parser, local_dict={'lhs': lhs,
'rhs': rhs})
else:
lhs_new = _eval_single_bin(lhs, cmp1, rhs, self.engine)
rhs_new = _eval_single_bin(lhs, cmp2, rhs, self.engine)
if (isinstance(lhs_new, Series) and isinstance(rhs_new, DataFrame)
and binop in _series_frame_incompatible):
pass
# TODO: the code below should be added back when left and right
# hand side bool ops are fixed.
# try:
# self.assertRaises(Exception, pd.eval, ex,
#local_dict={'lhs': lhs, 'rhs': rhs},
# engine=self.engine, parser=self.parser)
# except AssertionError:
#import ipdb; ipdb.set_trace()
# raise
else:
expected = _eval_single_bin(
lhs_new, binop, rhs_new, self.engine)
result = pd.eval(ex, engine=self.engine, parser=self.parser)
tm.assert_numpy_array_equal(result, expected)
def check_chained_cmp_op(self, lhs, cmp1, mid, cmp2, rhs):
skip_these = _scalar_skip
def check_operands(left, right, cmp_op):
return _eval_single_bin(left, cmp_op, right, self.engine)
lhs_new = check_operands(lhs, mid, cmp1)
rhs_new = check_operands(mid, rhs, cmp2)
if lhs_new is not None and rhs_new is not None:
ex1 = 'lhs {0} mid {1} rhs'.format(cmp1, cmp2)
ex2 = 'lhs {0} mid and mid {1} rhs'.format(cmp1, cmp2)
ex3 = '(lhs {0} mid) & (mid {1} rhs)'.format(cmp1, cmp2)
expected = _eval_single_bin(lhs_new, '&', rhs_new, self.engine)
for ex in (ex1, ex2, ex3):
result = pd.eval(ex, engine=self.engine,
parser=self.parser)
tm.assert_numpy_array_equal(result, expected)
def check_simple_cmp_op(self, lhs, cmp1, rhs):
ex = 'lhs {0} rhs'.format(cmp1)
if cmp1 in ('in', 'not in') and not com.is_list_like(rhs):
self.assertRaises(TypeError, pd.eval, ex, engine=self.engine,
parser=self.parser, local_dict={'lhs': lhs,
'rhs': rhs})
else:
expected = _eval_single_bin(lhs, cmp1, rhs, self.engine)
result = pd.eval(ex, engine=self.engine, parser=self.parser)
tm.assert_numpy_array_equal(result, expected)
def check_binary_arith_op(self, lhs, arith1, rhs):
ex = 'lhs {0} rhs'.format(arith1)
result = pd.eval(ex, engine=self.engine, parser=self.parser)
expected = _eval_single_bin(lhs, arith1, rhs, self.engine)
tm.assert_numpy_array_equal(result, expected)
ex = 'lhs {0} rhs {0} rhs'.format(arith1)
result = pd.eval(ex, engine=self.engine, parser=self.parser)
nlhs = _eval_single_bin(lhs, arith1, rhs,
self.engine)
self.check_alignment(result, nlhs, rhs, arith1)
def check_alignment(self, result, nlhs, ghs, op):
try:
nlhs, ghs = nlhs.align(ghs)
except (ValueError, TypeError, AttributeError):
# ValueError: series frame or frame series align
# TypeError, AttributeError: series or frame with scalar align
pass
else:
expected = self.ne.evaluate('nlhs {0} ghs'.format(op))
tm.assert_numpy_array_equal(result, expected)
# modulus, pow, and floor division require special casing
def check_modulus(self, lhs, arith1, rhs):
ex = 'lhs {0} rhs'.format(arith1)
result = pd.eval(ex, engine=self.engine, parser=self.parser)
expected = lhs % rhs
assert_allclose(result, expected)
expected = self.ne.evaluate('expected {0} rhs'.format(arith1))
assert_allclose(result, expected)
def check_floor_division(self, lhs, arith1, rhs):
ex = 'lhs {0} rhs'.format(arith1)
if self.engine == 'python':
res = pd.eval(ex, engine=self.engine, parser=self.parser)
expected = lhs // rhs
tm.assert_numpy_array_equal(res, expected)
else:
self.assertRaises(TypeError, pd.eval, ex, local_dict={'lhs': lhs,
'rhs': rhs},
engine=self.engine, parser=self.parser)
def get_expected_pow_result(self, lhs, rhs):
try:
expected = _eval_single_bin(lhs, '**', rhs, self.engine)
except ValueError as e:
msg = 'negative number cannot be raised to a fractional power'
try:
emsg = e.message
except AttributeError:
emsg = e
emsg = u(emsg)
if emsg == msg:
if self.engine == 'python':
raise nose.SkipTest(emsg)
else:
expected = np.nan
else:
raise
return expected
def check_pow(self, lhs, arith1, rhs):
ex = 'lhs {0} rhs'.format(arith1)
expected = self.get_expected_pow_result(lhs, rhs)
result = pd.eval(ex, engine=self.engine, parser=self.parser)
if (lib.isscalar(lhs) and lib.isscalar(rhs) and
_is_py3_complex_incompat(result, expected)):
self.assertRaises(AssertionError, tm.assert_numpy_array_equal,
result, expected)
else:
assert_allclose(result, expected)
ex = '(lhs {0} rhs) {0} rhs'.format(arith1)
result = pd.eval(ex, engine=self.engine, parser=self.parser)
expected = self.get_expected_pow_result(
self.get_expected_pow_result(lhs, rhs), rhs)
assert_allclose(result, expected)
def check_single_invert_op(self, lhs, cmp1, rhs):
# simple
for el in (lhs, rhs):
try:
elb = el.astype(bool)
except AttributeError:
elb = np.array([bool(el)])
expected = ~elb
result = pd.eval('~elb', engine=self.engine, parser=self.parser)
tm.assert_numpy_array_equal(expected, result)
for engine in self.current_engines:
tm.skip_if_no_ne(engine)
tm.assert_numpy_array_equal(result, pd.eval('~elb', engine=engine,
parser=self.parser))
def check_compound_invert_op(self, lhs, cmp1, rhs):
skip_these = 'in', 'not in'
ex = '~(lhs {0} rhs)'.format(cmp1)
if lib.isscalar(rhs) and cmp1 in skip_these:
self.assertRaises(TypeError, pd.eval, ex, engine=self.engine,
parser=self.parser, local_dict={'lhs': lhs,
'rhs': rhs})
else:
# compound
if lib.isscalar(lhs) and lib.isscalar(rhs):
lhs, rhs = map(lambda x: np.array([x]), (lhs, rhs))
expected = _eval_single_bin(lhs, cmp1, rhs, self.engine)
if lib.isscalar(expected):
expected = not expected
else:
expected = ~expected
result = pd.eval(ex, engine=self.engine, parser=self.parser)
tm.assert_numpy_array_equal(expected, result)
# make sure the other engines work the same as this one
for engine in self.current_engines:
tm.skip_if_no_ne(engine)
ev = pd.eval(ex, engine=self.engine, parser=self.parser)
tm.assert_numpy_array_equal(ev, result)
def ex(self, op, var_name='lhs'):
return '{0}{1}'.format(op, var_name)
def test_frame_invert(self):
expr = self.ex('~')
# ~ ##
# frame
# float always raises
lhs = DataFrame(randn(5, 2))
if self.engine == 'numexpr':
with tm.assertRaises(NotImplementedError):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
else:
with tm.assertRaises(TypeError):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
# int raises on numexpr
lhs = DataFrame(randint(5, size=(5, 2)))
if self.engine == 'numexpr':
with tm.assertRaises(NotImplementedError):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
else:
expect = ~lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
assert_frame_equal(expect, result)
# bool always works
lhs = DataFrame(rand(5, 2) > 0.5)
expect = ~lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
assert_frame_equal(expect, result)
# object raises
lhs = DataFrame({'b': ['a', 1, 2.0], 'c': rand(3) > 0.5})
if self.engine == 'numexpr':
with tm.assertRaises(ValueError):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
else:
with tm.assertRaises(TypeError):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
def test_series_invert(self):
# ~ ####
expr = self.ex('~')
# series
# float raises
lhs = Series(randn(5))
if self.engine == 'numexpr':
with tm.assertRaises(NotImplementedError):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
else:
with tm.assertRaises(TypeError):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
# int raises on numexpr
lhs = Series(randint(5, size=5))
if self.engine == 'numexpr':
with tm.assertRaises(NotImplementedError):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
else:
expect = ~lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
assert_series_equal(expect, result)
# bool
lhs = Series(rand(5) > 0.5)
expect = ~lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
assert_series_equal(expect, result)
# float
# int
# bool
# object
lhs = Series(['a', 1, 2.0])
if self.engine == 'numexpr':
with tm.assertRaises(ValueError):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
else:
with tm.assertRaises(TypeError):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
def test_frame_negate(self):
expr = self.ex('-')
# float
lhs = DataFrame(randn(5, 2))
expect = -lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
assert_frame_equal(expect, result)
# int
lhs = DataFrame(randint(5, size=(5, 2)))
expect = -lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
assert_frame_equal(expect, result)
# bool doesn't work with numexpr but works elsewhere
lhs = DataFrame(rand(5, 2) > 0.5)
if self.engine == 'numexpr':
with tm.assertRaises(NotImplementedError):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
else:
expect = -lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
assert_frame_equal(expect, result)
def test_series_negate(self):
expr = self.ex('-')
# float
lhs = Series(randn(5))
expect = -lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
assert_series_equal(expect, result)
# int
lhs = Series(randint(5, size=5))
expect = -lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
assert_series_equal(expect, result)
# bool doesn't work with numexpr but works elsewhere
lhs = Series(rand(5) > 0.5)
if self.engine == 'numexpr':
with tm.assertRaises(NotImplementedError):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
else:
expect = -lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
assert_series_equal(expect, result)
def test_frame_pos(self):
expr = self.ex('+')
# float
lhs = DataFrame(randn(5, 2))
if self.engine == 'python':
with tm.assertRaises(TypeError):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
else:
expect = lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
assert_frame_equal(expect, result)
# int
lhs = DataFrame(randint(5, size=(5, 2)))
if self.engine == 'python':
with tm.assertRaises(TypeError):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
else:
expect = lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
assert_frame_equal(expect, result)
# bool doesn't work with numexpr but works elsewhere
lhs = DataFrame(rand(5, 2) > 0.5)
if self.engine == 'python':
with tm.assertRaises(TypeError):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
else:
expect = lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
assert_frame_equal(expect, result)
def test_series_pos(self):
expr = self.ex('+')
# float
lhs = Series(randn(5))
if self.engine == 'python':
with tm.assertRaises(TypeError):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
else:
expect = lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
assert_series_equal(expect, result)
# int
lhs = Series(randint(5, size=5))
if self.engine == 'python':
with tm.assertRaises(TypeError):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
else:
expect = lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
assert_series_equal(expect, result)
# bool doesn't work with numexpr but works elsewhere
lhs = Series(rand(5) > 0.5)
if self.engine == 'python':
with tm.assertRaises(TypeError):
result = pd.eval(expr, engine=self.engine, parser=self.parser)
else:
expect = lhs
result = pd.eval(expr, engine=self.engine, parser=self.parser)
assert_series_equal(expect, result)
def test_scalar_unary(self):
with tm.assertRaises(TypeError):
pd.eval('~1.0', engine=self.engine, parser=self.parser)
self.assertEqual(
pd.eval('-1.0', parser=self.parser, engine=self.engine), -1.0)
self.assertEqual(
pd.eval('+1.0', parser=self.parser, engine=self.engine), +1.0)
self.assertEqual(
pd.eval('~1', parser=self.parser, engine=self.engine), ~1)
self.assertEqual(
pd.eval('-1', parser=self.parser, engine=self.engine), -1)
self.assertEqual(
pd.eval('+1', parser=self.parser, engine=self.engine), +1)
self.assertEqual(
pd.eval('~True', parser=self.parser, engine=self.engine), ~True)
self.assertEqual(
pd.eval('~False', parser=self.parser, engine=self.engine), ~False)
self.assertEqual(
pd.eval('-True', parser=self.parser, engine=self.engine), -True)
self.assertEqual(
pd.eval('-False', parser=self.parser, engine=self.engine), -False)
self.assertEqual(
pd.eval('+True', parser=self.parser, engine=self.engine), +True)
self.assertEqual(
pd.eval('+False', parser=self.parser, engine=self.engine), +False)
def test_unary_in_array(self):
# GH 11235
assert_numpy_array_equal(
pd.eval('[-True, True, ~True, +True,'
'-False, False, ~False, +False,'
'-37, 37, ~37, +37]'),
np.array([-True, True, ~True, +True,
-False, False, ~False, +False,
-37, 37, ~37, +37]))
def test_disallow_scalar_bool_ops(self):
exprs = '1 or 2', '1 and 2'
exprs += 'a and b', 'a or b'
exprs += '1 or 2 and (3 + 2) > 3',
exprs += '2 * x > 2 or 1 and 2',
exprs += '2 * df > 3 and 1 or a',
x, a, b, df = np.random.randn(3), 1, 2, DataFrame(randn(3, 2))
for ex in exprs:
with tm.assertRaises(NotImplementedError):
pd.eval(ex, engine=self.engine, parser=self.parser)
def test_identical(self):
# GH 10546
x = 1
result = pd.eval('x', engine=self.engine, parser=self.parser)
self.assertEqual(result, 1)
self.assertTrue(lib.isscalar(result))
x = 1.5
result = pd.eval('x', engine=self.engine, parser=self.parser)
self.assertEqual(result, 1.5)
self.assertTrue(lib.isscalar(result))
x = False
result = pd.eval('x', engine=self.engine, parser=self.parser)
self.assertEqual(result, False)
self.assertTrue(lib.isscalar(result))
x = np.array([1])
result = pd.eval('x', engine=self.engine, parser=self.parser)
tm.assert_numpy_array_equal(result, np.array([1]))
self.assertEqual(result.shape, (1, ))
x = np.array([1.5])
result = pd.eval('x', engine=self.engine, parser=self.parser)
tm.assert_numpy_array_equal(result, np.array([1.5]))
self.assertEqual(result.shape, (1, ))
x = np.array([False])
result = pd.eval('x', engine=self.engine, parser=self.parser)
tm.assert_numpy_array_equal(result, np.array([False]))
self.assertEqual(result.shape, (1, ))
def test_line_continuation(self):
# GH 11149
exp = """1 + 2 * \
5 - 1 + 2 """
result = pd.eval(exp, engine=self.engine, parser=self.parser)
self.assertEqual(result, 12)
class TestEvalNumexprPython(TestEvalNumexprPandas):
@classmethod
def setUpClass(cls):
super(TestEvalNumexprPython, cls).setUpClass()
tm.skip_if_no_ne()
import numexpr as ne
cls.ne = ne
cls.engine = 'numexpr'
cls.parser = 'python'
def setup_ops(self):
self.cmp_ops = list(filter(lambda x: x not in ('in', 'not in'),
expr._cmp_ops_syms))
self.cmp2_ops = self.cmp_ops[::-1]
self.bin_ops = [s for s in expr._bool_ops_syms
if s not in ('and', 'or')]
self.special_case_ops = _special_case_arith_ops_syms
self.arith_ops = _good_arith_ops
self.unary_ops = '+', '-', '~'
def check_chained_cmp_op(self, lhs, cmp1, mid, cmp2, rhs):
ex1 = 'lhs {0} mid {1} rhs'.format(cmp1, cmp2)
with tm.assertRaises(NotImplementedError):
pd.eval(ex1, engine=self.engine, parser=self.parser)
class TestEvalPythonPython(TestEvalNumexprPython):
@classmethod
def setUpClass(cls):
super(TestEvalPythonPython, cls).setUpClass()
cls.engine = 'python'
cls.parser = 'python'
def check_modulus(self, lhs, arith1, rhs):
ex = 'lhs {0} rhs'.format(arith1)
result = pd.eval(ex, engine=self.engine, parser=self.parser)
expected = lhs % rhs
assert_allclose(result, expected)
expected = _eval_single_bin(expected, arith1, rhs, self.engine)
assert_allclose(result, expected)
def check_alignment(self, result, nlhs, ghs, op):
try:
nlhs, ghs = nlhs.align(ghs)
except (ValueError, TypeError, AttributeError):
# ValueError: series frame or frame series align
# TypeError, AttributeError: series or frame with scalar align
pass
else:
expected = eval('nlhs {0} ghs'.format(op))
tm.assert_numpy_array_equal(result, expected)
class TestEvalPythonPandas(TestEvalPythonPython):
@classmethod
def setUpClass(cls):
super(TestEvalPythonPandas, cls).setUpClass()
cls.engine = 'python'
cls.parser = 'pandas'
def check_chained_cmp_op(self, lhs, cmp1, mid, cmp2, rhs):
TestEvalNumexprPandas.check_chained_cmp_op(self, lhs, cmp1, mid, cmp2,
rhs)
f = lambda *args, **kwargs: np.random.randn()
ENGINES_PARSERS = list(product(_engines, expr._parsers))
#-------------------------------------
# basic and complex alignment
def _is_datetime(x):
return issubclass(x.dtype.type, np.datetime64)
def should_warn(*args):
not_mono = not any(map(operator.attrgetter('is_monotonic'), args))
only_one_dt = reduce(operator.xor, map(_is_datetime, args))
return not_mono and only_one_dt
class TestAlignment(object):
index_types = 'i', 'u', 'dt'
lhs_index_types = index_types + ('s',) # 'p'
def check_align_nested_unary_op(self, engine, parser):
tm.skip_if_no_ne(engine)
s = 'df * ~2'
df = mkdf(5, 3, data_gen_f=f)
res = pd.eval(s, engine=engine, parser=parser)
assert_frame_equal(res, df * ~2)
def test_align_nested_unary_op(self):
for engine, parser in ENGINES_PARSERS:
yield self.check_align_nested_unary_op, engine, parser
def check_basic_frame_alignment(self, engine, parser):
tm.skip_if_no_ne(engine)
args = product(self.lhs_index_types, self.index_types,
self.index_types)
with warnings.catch_warnings(record=True):
warnings.simplefilter('always', RuntimeWarning)
for lr_idx_type, rr_idx_type, c_idx_type in args:
df = mkdf(10, 10, data_gen_f=f, r_idx_type=lr_idx_type,
c_idx_type=c_idx_type)
df2 = mkdf(20, 10, data_gen_f=f, r_idx_type=rr_idx_type,
c_idx_type=c_idx_type)
# only warns if not monotonic and not sortable
if should_warn(df.index, df2.index):
with tm.assert_produces_warning(RuntimeWarning):
res = pd.eval('df + df2', engine=engine, parser=parser)
else:
res = pd.eval('df + df2', engine=engine, parser=parser)
assert_frame_equal(res, df + df2)
def test_basic_frame_alignment(self):
for engine, parser in ENGINES_PARSERS:
yield self.check_basic_frame_alignment, engine, parser
def check_frame_comparison(self, engine, parser):
tm.skip_if_no_ne(engine)
args = product(self.lhs_index_types, repeat=2)
for r_idx_type, c_idx_type in args:
df = mkdf(10, 10, data_gen_f=f, r_idx_type=r_idx_type,
c_idx_type=c_idx_type)
res = pd.eval('df < 2', engine=engine, parser=parser)
assert_frame_equal(res, df < 2)
df3 = DataFrame(randn(*df.shape), index=df.index,
columns=df.columns)
res = pd.eval('df < df3', engine=engine, parser=parser)
assert_frame_equal(res, df < df3)
def test_frame_comparison(self):
for engine, parser in ENGINES_PARSERS:
yield self.check_frame_comparison, engine, parser
def check_medium_complex_frame_alignment(self, engine, parser):
tm.skip_if_no_ne(engine)
args = product(self.lhs_index_types, self.index_types,
self.index_types, self.index_types)
with warnings.catch_warnings(record=True):
warnings.simplefilter('always', RuntimeWarning)
for r1, c1, r2, c2 in args:
df = mkdf(3, 2, data_gen_f=f, r_idx_type=r1, c_idx_type=c1)
df2 = mkdf(4, 2, data_gen_f=f, r_idx_type=r2, c_idx_type=c2)
df3 = mkdf(5, 2, data_gen_f=f, r_idx_type=r2, c_idx_type=c2)
if should_warn(df.index, df2.index, df3.index):
with tm.assert_produces_warning(RuntimeWarning):
res = pd.eval('df + df2 + df3', engine=engine,
parser=parser)
else:
res = pd.eval('df + df2 + df3',
engine=engine, parser=parser)
assert_frame_equal(res, df + df2 + df3)
@slow
def test_medium_complex_frame_alignment(self):
for engine, parser in ENGINES_PARSERS:
yield self.check_medium_complex_frame_alignment, engine, parser
def check_basic_frame_series_alignment(self, engine, parser):
tm.skip_if_no_ne(engine)
def testit(r_idx_type, c_idx_type, index_name):
df = mkdf(10, 10, data_gen_f=f, r_idx_type=r_idx_type,
c_idx_type=c_idx_type)
index = getattr(df, index_name)
s = Series(np.random.randn(5), index[:5])
if should_warn(df.index, s.index):
with tm.assert_produces_warning(RuntimeWarning):
res = pd.eval('df + s', engine=engine, parser=parser)
else:
res = pd.eval('df + s', engine=engine, parser=parser)
if r_idx_type == 'dt' or c_idx_type == 'dt':
expected = df.add(s) if engine == 'numexpr' else df + s
else:
expected = df + s
assert_frame_equal(res, expected)
args = product(self.lhs_index_types, self.index_types,
('index', 'columns'))
with warnings.catch_warnings(record=True):
warnings.simplefilter('always', RuntimeWarning)
for r_idx_type, c_idx_type, index_name in args:
testit(r_idx_type, c_idx_type, index_name)
def test_basic_frame_series_alignment(self):
for engine, parser in ENGINES_PARSERS:
yield self.check_basic_frame_series_alignment, engine, parser
def check_basic_series_frame_alignment(self, engine, parser):
tm.skip_if_no_ne(engine)
def testit(r_idx_type, c_idx_type, index_name):
df = mkdf(10, 7, data_gen_f=f, r_idx_type=r_idx_type,
c_idx_type=c_idx_type)
index = getattr(df, index_name)
s = Series(np.random.randn(5), index[:5])
if should_warn(s.index, df.index):
with tm.assert_produces_warning(RuntimeWarning):
res = pd.eval('s + df', engine=engine, parser=parser)
else:
res = pd.eval('s + df', engine=engine, parser=parser)
if r_idx_type == 'dt' or c_idx_type == 'dt':
expected = df.add(s) if engine == 'numexpr' else s + df
else:
expected = s + df
assert_frame_equal(res, expected)
# only test dt with dt, otherwise weird joins result
args = product(['i', 'u', 's'], ['i', 'u', 's'], ('index', 'columns'))
with warnings.catch_warnings(record=True):
for r_idx_type, c_idx_type, index_name in args:
testit(r_idx_type, c_idx_type, index_name)
# dt with dt
args = product(['dt'], ['dt'], ('index', 'columns'))
with warnings.catch_warnings(record=True):
for r_idx_type, c_idx_type, index_name in args:
testit(r_idx_type, c_idx_type, index_name)
def test_basic_series_frame_alignment(self):
for engine, parser in ENGINES_PARSERS:
yield self.check_basic_series_frame_alignment, engine, parser
def check_series_frame_commutativity(self, engine, parser):
tm.skip_if_no_ne(engine)
args = product(self.lhs_index_types, self.index_types, ('+', '*'),
('index', 'columns'))
with warnings.catch_warnings(record=True):
warnings.simplefilter('always', RuntimeWarning)
for r_idx_type, c_idx_type, op, index_name in args:
df = mkdf(10, 10, data_gen_f=f, r_idx_type=r_idx_type,
c_idx_type=c_idx_type)
index = getattr(df, index_name)
s = Series(np.random.randn(5), index[:5])
lhs = 's {0} df'.format(op)
rhs = 'df {0} s'.format(op)
if should_warn(df.index, s.index):
with tm.assert_produces_warning(RuntimeWarning):
a = pd.eval(lhs, engine=engine, parser=parser)
with tm.assert_produces_warning(RuntimeWarning):
b = pd.eval(rhs, engine=engine, parser=parser)
else:
a = pd.eval(lhs, engine=engine, parser=parser)
b = pd.eval(rhs, engine=engine, parser=parser)
if r_idx_type != 'dt' and c_idx_type != 'dt':
if engine == 'numexpr':
assert_frame_equal(a, b)
def test_series_frame_commutativity(self):
for engine, parser in ENGINES_PARSERS:
yield self.check_series_frame_commutativity, engine, parser
def check_complex_series_frame_alignment(self, engine, parser):
tm.skip_if_no_ne(engine)
import random
args = product(self.lhs_index_types, self.index_types,
self.index_types, self.index_types)
n = 3
m1 = 5
m2 = 2 * m1
with warnings.catch_warnings(record=True):
warnings.simplefilter('always', RuntimeWarning)
for r1, r2, c1, c2 in args:
index_name = random.choice(['index', 'columns'])
obj_name = random.choice(['df', 'df2'])
df = mkdf(m1, n, data_gen_f=f, r_idx_type=r1, c_idx_type=c1)
df2 = mkdf(m2, n, data_gen_f=f, r_idx_type=r2, c_idx_type=c2)
index = getattr(locals().get(obj_name), index_name)
s = Series(np.random.randn(n), index[:n])
if r2 == 'dt' or c2 == 'dt':
if engine == 'numexpr':
expected2 = df2.add(s)
else:
expected2 = df2 + s
else:
expected2 = df2 + s
if r1 == 'dt' or c1 == 'dt':
if engine == 'numexpr':
expected = expected2.add(df)
else:
expected = expected2 + df
else:
expected = expected2 + df
if should_warn(df2.index, s.index, df.index):
with tm.assert_produces_warning(RuntimeWarning):
res = pd.eval('df2 + s + df', engine=engine,
parser=parser)
else:
res = pd.eval('df2 + s + df', engine=engine, parser=parser)
tm.assert_equal(res.shape, expected.shape)
assert_frame_equal(res, expected)
@slow
def test_complex_series_frame_alignment(self):
for engine, parser in ENGINES_PARSERS:
yield self.check_complex_series_frame_alignment, engine, parser
def check_performance_warning_for_poor_alignment(self, engine, parser):
tm.skip_if_no_ne(engine)
df = DataFrame(randn(1000, 10))
s = Series(randn(10000))
if engine == 'numexpr':
seen = pd.core.common.PerformanceWarning
else:
seen = False
with assert_produces_warning(seen):
pd.eval('df + s', engine=engine, parser=parser)
s = Series(randn(1000))
with assert_produces_warning(False):
pd.eval('df + s', engine=engine, parser=parser)
df = DataFrame(randn(10, 10000))
s = Series(randn(10000))
with assert_produces_warning(False):
pd.eval('df + s', engine=engine, parser=parser)
df = DataFrame(randn(10, 10))
s = Series(randn(10000))
is_python_engine = engine == 'python'
if not is_python_engine:
wrn = pd.core.common.PerformanceWarning
else:
wrn = False
with assert_produces_warning(wrn) as w:
pd.eval('df + s', engine=engine, parser=parser)
if not is_python_engine:
tm.assert_equal(len(w), 1)
msg = str(w[0].message)
expected = ("Alignment difference on axis {0} is larger"
" than an order of magnitude on term {1!r}, "
"by more than {2:.4g}; performance may suffer"
"".format(1, 'df', np.log10(s.size - df.shape[1])))
tm.assert_equal(msg, expected)
def test_performance_warning_for_poor_alignment(self):
for engine, parser in ENGINES_PARSERS:
yield (self.check_performance_warning_for_poor_alignment, engine,
parser)
#------------------------------------
# slightly more complex ops
class TestOperationsNumExprPandas(tm.TestCase):
@classmethod
def setUpClass(cls):
super(TestOperationsNumExprPandas, cls).setUpClass()
tm.skip_if_no_ne()
cls.engine = 'numexpr'
cls.parser = 'pandas'
cls.arith_ops = expr._arith_ops_syms + expr._cmp_ops_syms
@classmethod
def tearDownClass(cls):
super(TestOperationsNumExprPandas, cls).tearDownClass()
del cls.engine, cls.parser
def eval(self, *args, **kwargs):
kwargs['engine'] = self.engine
kwargs['parser'] = self.parser
kwargs['level'] = kwargs.pop('level', 0) + 1
return pd.eval(*args, **kwargs)
def test_simple_arith_ops(self):
ops = self.arith_ops
for op in filter(lambda x: x != '//', ops):
ex = '1 {0} 1'.format(op)
ex2 = 'x {0} 1'.format(op)
ex3 = '1 {0} (x + 1)'.format(op)
if op in ('in', 'not in'):
self.assertRaises(TypeError, pd.eval, ex,
engine=self.engine, parser=self.parser)
else:
expec = _eval_single_bin(1, op, 1, self.engine)
x = self.eval(ex, engine=self.engine, parser=self.parser)
tm.assert_equal(x, expec)
expec = _eval_single_bin(x, op, 1, self.engine)
y = self.eval(ex2, local_dict={'x': x}, engine=self.engine,
parser=self.parser)
tm.assert_equal(y, expec)
expec = _eval_single_bin(1, op, x + 1, self.engine)
y = self.eval(ex3, local_dict={'x': x},
engine=self.engine, parser=self.parser)
tm.assert_equal(y, expec)
def test_simple_bool_ops(self):
for op, lhs, rhs in product(expr._bool_ops_syms, (True, False),
(True, False)):
ex = '{0} {1} {2}'.format(lhs, op, rhs)
res = self.eval(ex)
exp = eval(ex)
self.assertEqual(res, exp)
def test_bool_ops_with_constants(self):
for op, lhs, rhs in product(expr._bool_ops_syms, ('True', 'False'),
('True', 'False')):
ex = '{0} {1} {2}'.format(lhs, op, rhs)
res = self.eval(ex)
exp = eval(ex)
self.assertEqual(res, exp)
def test_panel_fails(self):
x = Panel(randn(3, 4, 5))
y = Series(randn(10))
assert_raises(NotImplementedError, self.eval, 'x + y',
local_dict={'x': x, 'y': y})
def test_4d_ndarray_fails(self):
x = randn(3, 4, 5, 6)
y = Series(randn(10))
assert_raises(NotImplementedError, self.eval, 'x + y',
local_dict={'x': x, 'y': y})
def test_constant(self):
x = self.eval('1')
tm.assert_equal(x, 1)
def test_single_variable(self):
df = DataFrame(randn(10, 2))
df2 = self.eval('df', local_dict={'df': df})
assert_frame_equal(df, df2)
def test_truediv(self):
s = np.array([1])
ex = 's / 1'
d = {'s': s}
if PY3:
res = self.eval(ex, truediv=False)
tm.assert_numpy_array_equal(res, np.array([1.0]))
res = self.eval(ex, truediv=True)
tm.assert_numpy_array_equal(res, np.array([1.0]))
res = self.eval('1 / 2', truediv=True)
expec = 0.5
self.assertEqual(res, expec)
res = self.eval('1 / 2', truediv=False)
expec = 0.5
self.assertEqual(res, expec)
res = self.eval('s / 2', truediv=False)
expec = 0.5
self.assertEqual(res, expec)
res = self.eval('s / 2', truediv=True)
expec = 0.5
self.assertEqual(res, expec)
else:
res = self.eval(ex, truediv=False)
tm.assert_numpy_array_equal(res, np.array([1]))
res = self.eval(ex, truediv=True)
tm.assert_numpy_array_equal(res, np.array([1.0]))
res = self.eval('1 / 2', truediv=True)
expec = 0.5
self.assertEqual(res, expec)
res = self.eval('1 / 2', truediv=False)
expec = 0
self.assertEqual(res, expec)
res = self.eval('s / 2', truediv=False)
expec = 0
self.assertEqual(res, expec)
res = self.eval('s / 2', truediv=True)
expec = 0.5
self.assertEqual(res, expec)
def test_failing_subscript_with_name_error(self):
df = DataFrame(np.random.randn(5, 3))
with tm.assertRaises(NameError):
self.eval('df[x > 2] > 2')
def test_lhs_expression_subscript(self):
df = DataFrame(np.random.randn(5, 3))
result = self.eval('(df + 1)[df > 2]', local_dict={'df': df})
expected = (df + 1)[df > 2]
assert_frame_equal(result, expected)
def test_attr_expression(self):
df = DataFrame(np.random.randn(5, 3), columns=list('abc'))
expr1 = 'df.a < df.b'
expec1 = df.a < df.b
expr2 = 'df.a + df.b + df.c'
expec2 = df.a + df.b + df.c
expr3 = 'df.a + df.b + df.c[df.b < 0]'
expec3 = df.a + df.b + df.c[df.b < 0]
exprs = expr1, expr2, expr3
expecs = expec1, expec2, expec3
for e, expec in zip(exprs, expecs):
assert_series_equal(expec, self.eval(e, local_dict={'df': df}))
def test_assignment_fails(self):
df = DataFrame(np.random.randn(5, 3), columns=list('abc'))
df2 = DataFrame(np.random.randn(5, 3))
expr1 = 'df = df2'
self.assertRaises(ValueError, self.eval, expr1,
local_dict={'df': df, 'df2': df2})
def test_assignment_column(self):
tm.skip_if_no_ne('numexpr')
df = DataFrame(np.random.randn(5, 2), columns=list('ab'))
orig_df = df.copy()
# multiple assignees
self.assertRaises(SyntaxError, df.eval, 'd c = a + b')
# invalid assignees
self.assertRaises(SyntaxError, df.eval, 'd,c = a + b')
self.assertRaises(
SyntaxError, df.eval, 'Timestamp("20131001") = a + b')
# single assignment - existing variable
expected = orig_df.copy()
expected['a'] = expected['a'] + expected['b']
df = orig_df.copy()
df.eval('a = a + b', inplace=True)
assert_frame_equal(df, expected)
# single assignment - new variable
expected = orig_df.copy()
expected['c'] = expected['a'] + expected['b']
df = orig_df.copy()
df.eval('c = a + b', inplace=True)
assert_frame_equal(df, expected)
# with a local name overlap
def f():
df = orig_df.copy()
a = 1 # noqa
df.eval('a = 1 + b', inplace=True)
return df
df = f()
expected = orig_df.copy()
expected['a'] = 1 + expected['b']
assert_frame_equal(df, expected)
df = orig_df.copy()
def f():
a = 1 # noqa
old_a = df.a.copy()
df.eval('a = a + b', inplace=True)
result = old_a + df.b
assert_series_equal(result, df.a, check_names=False)
self.assertTrue(result.name is None)
f()
# multiple assignment
df = orig_df.copy()
df.eval('c = a + b', inplace=True)
self.assertRaises(SyntaxError, df.eval, 'c = a = b')
# explicit targets
df = orig_df.copy()
self.eval('c = df.a + df.b', local_dict={'df': df},
target=df, inplace=True)
expected = orig_df.copy()
expected['c'] = expected['a'] + expected['b']
assert_frame_equal(df, expected)
def test_column_in(self):
# GH 11235
df = DataFrame({'a': [11], 'b': [-32]})
result = df.eval('a in [11, -32]')
expected = Series([True])
assert_series_equal(result, expected)
def assignment_not_inplace(self):
# GH 9297
tm.skip_if_no_ne('numexpr')
df = DataFrame(np.random.randn(5, 2), columns=list('ab'))
actual = df.eval('c = a + b', inplace=False)
self.assertIsNotNone(actual)
expected = df.copy()
expected['c'] = expected['a'] + expected['b']
assert_frame_equal(df, expected)
# default for inplace will change
with tm.assert_produces_warnings(FutureWarning):
df.eval('c = a + b')
# but don't warn without assignment
with tm.assert_produces_warnings(None):
df.eval('a + b')
def test_multi_line_expression(self):
# GH 11149
tm.skip_if_no_ne('numexpr')
df = pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})
expected = df.copy()
expected['c'] = expected['a'] + expected['b']
expected['d'] = expected['c'] + expected['b']
ans = df.eval("""
c = a + b
d = c + b""", inplace=True)
assert_frame_equal(expected, df)
self.assertIsNone(ans)
expected['a'] = expected['a'] - 1
expected['e'] = expected['a'] + 2
ans = df.eval("""
a = a - 1
e = a + 2""", inplace=True)
assert_frame_equal(expected, df)
self.assertIsNone(ans)
# multi-line not valid if not all assignments
with tm.assertRaises(ValueError):
df.eval("""
a = b + 2
b - 2""", inplace=False)
def test_multi_line_expression_not_inplace(self):
# GH 11149
tm.skip_if_no_ne('numexpr')
df = pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})
expected = df.copy()
expected['c'] = expected['a'] + expected['b']
expected['d'] = expected['c'] + expected['b']
df = df.eval("""
c = a + b
d = c + b""", inplace=False)
assert_frame_equal(expected, df)
expected['a'] = expected['a'] - 1
expected['e'] = expected['a'] + 2
df = df.eval("""
a = a - 1
e = a + 2""", inplace=False)
assert_frame_equal(expected, df)
def test_assignment_in_query(self):
# GH 8664
df = pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})
df_orig = df.copy()
with tm.assertRaises(ValueError):
df.query('a = 1')
assert_frame_equal(df, df_orig)
def query_inplace(self):
# GH 11149
df = pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]})
expected = df.copy()
expected = expected[expected['a'] == 2]
df.query('a == 2', inplace=True)
assert_frame_equal(expected, df)
def test_basic_period_index_boolean_expression(self):
df = mkdf(2, 2, data_gen_f=f, c_idx_type='p', r_idx_type='i')
e = df < 2
r = self.eval('df < 2', local_dict={'df': df})
x = df < 2
assert_frame_equal(r, e)
assert_frame_equal(x, e)
def test_basic_period_index_subscript_expression(self):
df = mkdf(2, 2, data_gen_f=f, c_idx_type='p', r_idx_type='i')
r = self.eval('df[df < 2 + 3]', local_dict={'df': df})
e = df[df < 2 + 3]
assert_frame_equal(r, e)
def test_nested_period_index_subscript_expression(self):
df = mkdf(2, 2, data_gen_f=f, c_idx_type='p', r_idx_type='i')
r = self.eval('df[df[df < 2] < 2] + df * 2', local_dict={'df': df})
e = df[df[df < 2] < 2] + df * 2
assert_frame_equal(r, e)
def test_date_boolean(self):
df = DataFrame(randn(5, 3))
df['dates1'] = date_range('1/1/2012', periods=5)
res = self.eval('df.dates1 < 20130101', local_dict={'df': df},
engine=self.engine, parser=self.parser)
expec = df.dates1 < '20130101'
assert_series_equal(res, expec, check_names=False)
def test_simple_in_ops(self):
if self.parser != 'python':
res = pd.eval('1 in [1, 2]', engine=self.engine,
parser=self.parser)
self.assertTrue(res)
res = pd.eval('2 in (1, 2)', engine=self.engine,
parser=self.parser)
self.assertTrue(res)
res = pd.eval('3 in (1, 2)', engine=self.engine,
parser=self.parser)
self.assertFalse(res)
res = pd.eval('3 not in (1, 2)', engine=self.engine,
parser=self.parser)
self.assertTrue(res)
res = pd.eval('[3] not in (1, 2)', engine=self.engine,
parser=self.parser)
self.assertTrue(res)
res = pd.eval('[3] in ([3], 2)', engine=self.engine,
parser=self.parser)
self.assertTrue(res)
res = pd.eval('[[3]] in [[[3]], 2]', engine=self.engine,
parser=self.parser)
self.assertTrue(res)
res = pd.eval('(3,) in [(3,), 2]', engine=self.engine,
parser=self.parser)
self.assertTrue(res)
res = pd.eval('(3,) not in [(3,), 2]', engine=self.engine,
parser=self.parser)
self.assertFalse(res)
res = pd.eval('[(3,)] in [[(3,)], 2]', engine=self.engine,
parser=self.parser)
self.assertTrue(res)
else:
with tm.assertRaises(NotImplementedError):
pd.eval('1 in [1, 2]', engine=self.engine, parser=self.parser)
with tm.assertRaises(NotImplementedError):
pd.eval('2 in (1, 2)', engine=self.engine, parser=self.parser)
with tm.assertRaises(NotImplementedError):
pd.eval('3 in (1, 2)', engine=self.engine, parser=self.parser)
with tm.assertRaises(NotImplementedError):
pd.eval('3 not in (1, 2)', engine=self.engine,
parser=self.parser)
with tm.assertRaises(NotImplementedError):
pd.eval('[(3,)] in (1, 2, [(3,)])', engine=self.engine,
parser=self.parser)
with tm.assertRaises(NotImplementedError):
pd.eval('[3] not in (1, 2, [[3]])', engine=self.engine,
parser=self.parser)
class TestOperationsNumExprPython(TestOperationsNumExprPandas):
@classmethod
def setUpClass(cls):
super(TestOperationsNumExprPython, cls).setUpClass()
cls.engine = 'numexpr'
cls.parser = 'python'
tm.skip_if_no_ne(cls.engine)
cls.arith_ops = expr._arith_ops_syms + expr._cmp_ops_syms
cls.arith_ops = filter(lambda x: x not in ('in', 'not in'),
cls.arith_ops)
def test_check_many_exprs(self):
a = 1
expr = ' * '.join('a' * 33)
expected = 1
res = pd.eval(expr, engine=self.engine, parser=self.parser)
tm.assert_equal(res, expected)
def test_fails_and(self):
df = DataFrame(np.random.randn(5, 3))
self.assertRaises(NotImplementedError, pd.eval, 'df > 2 and df > 3',
local_dict={'df': df}, parser=self.parser,
engine=self.engine)
def test_fails_or(self):
df = DataFrame(np.random.randn(5, 3))
self.assertRaises(NotImplementedError, pd.eval, 'df > 2 or df > 3',
local_dict={'df': df}, parser=self.parser,
engine=self.engine)
def test_fails_not(self):
df = DataFrame(np.random.randn(5, 3))
self.assertRaises(NotImplementedError, pd.eval, 'not df > 2',
local_dict={'df': df}, parser=self.parser,
engine=self.engine)
def test_fails_ampersand(self):
df = DataFrame(np.random.randn(5, 3))
ex = '(df + 2)[df > 1] > 0 & (df > 0)'
with tm.assertRaises(NotImplementedError):
pd.eval(ex, parser=self.parser, engine=self.engine)
def test_fails_pipe(self):
df = DataFrame(np.random.randn(5, 3))
ex = '(df + 2)[df > 1] > 0 | (df > 0)'
with tm.assertRaises(NotImplementedError):
pd.eval(ex, parser=self.parser, engine=self.engine)
def test_bool_ops_with_constants(self):
for op, lhs, rhs in product(expr._bool_ops_syms, ('True', 'False'),
('True', 'False')):
ex = '{0} {1} {2}'.format(lhs, op, rhs)
if op in ('and', 'or'):
with tm.assertRaises(NotImplementedError):
self.eval(ex)
else:
res = self.eval(ex)
exp = eval(ex)
self.assertEqual(res, exp)
def test_simple_bool_ops(self):
for op, lhs, rhs in product(expr._bool_ops_syms, (True, False),
(True, False)):
ex = 'lhs {0} rhs'.format(op)
if op in ('and', 'or'):
with tm.assertRaises(NotImplementedError):
pd.eval(ex, engine=self.engine, parser=self.parser)
else:
res = pd.eval(ex, engine=self.engine, parser=self.parser)
exp = eval(ex)
self.assertEqual(res, exp)
class TestOperationsPythonPython(TestOperationsNumExprPython):
@classmethod
def setUpClass(cls):
super(TestOperationsPythonPython, cls).setUpClass()
cls.engine = cls.parser = 'python'
cls.arith_ops = expr._arith_ops_syms + expr._cmp_ops_syms
cls.arith_ops = filter(lambda x: x not in ('in', 'not in'),
cls.arith_ops)
class TestOperationsPythonPandas(TestOperationsNumExprPandas):
@classmethod
def setUpClass(cls):
super(TestOperationsPythonPandas, cls).setUpClass()
cls.engine = 'python'
cls.parser = 'pandas'
cls.arith_ops = expr._arith_ops_syms + expr._cmp_ops_syms
class TestMathPythonPython(tm.TestCase):
@classmethod
def setUpClass(cls):
super(TestMathPythonPython, cls).setUpClass()
tm.skip_if_no_ne()
cls.engine = 'python'
cls.parser = 'pandas'
cls.unary_fns = _unary_math_ops
cls.binary_fns = _binary_math_ops
@classmethod
def tearDownClass(cls):
del cls.engine, cls.parser
def eval(self, *args, **kwargs):
kwargs['engine'] = self.engine
kwargs['parser'] = self.parser
kwargs['level'] = kwargs.pop('level', 0) + 1
return pd.eval(*args, **kwargs)
def test_unary_functions(self):
df = DataFrame({'a': np.random.randn(10)})
a = df.a
for fn in self.unary_fns:
expr = "{0}(a)".format(fn)
got = self.eval(expr)
expect = getattr(np, fn)(a)
tm.assert_series_equal(got, expect, check_names=False)
def test_binary_functions(self):
df = DataFrame({'a': np.random.randn(10),
'b': np.random.randn(10)})
a = df.a
b = df.b
for fn in self.binary_fns:
expr = "{0}(a, b)".format(fn)
got = self.eval(expr)
expect = getattr(np, fn)(a, b)
np.testing.assert_allclose(got, expect)
def test_df_use_case(self):
df = DataFrame({'a': np.random.randn(10),
'b': np.random.randn(10)})
df.eval("e = arctan2(sin(a), b)",
engine=self.engine,
parser=self.parser, inplace=True)
got = df.e
expect = np.arctan2(np.sin(df.a), df.b)
tm.assert_series_equal(got, expect, check_names=False)
def test_df_arithmetic_subexpression(self):
df = DataFrame({'a': np.random.randn(10),
'b': np.random.randn(10)})
df.eval("e = sin(a + b)",
engine=self.engine,
parser=self.parser, inplace=True)
got = df.e
expect = np.sin(df.a + df.b)
tm.assert_series_equal(got, expect, check_names=False)
def check_result_type(self, dtype, expect_dtype):
df = DataFrame({'a': np.random.randn(10).astype(dtype)})
self.assertEqual(df.a.dtype, dtype)
df.eval("b = sin(a)",
engine=self.engine,
parser=self.parser, inplace=True)
got = df.b
expect = np.sin(df.a)
self.assertEqual(expect.dtype, got.dtype)
self.assertEqual(expect_dtype, got.dtype)
tm.assert_series_equal(got, expect, check_names=False)
def test_result_types(self):
self.check_result_type(np.int32, np.float64)
self.check_result_type(np.int64, np.float64)
self.check_result_type(np.float32, np.float32)
self.check_result_type(np.float64, np.float64)
def test_result_types2(self):
# xref https://github.com/pydata/pandas/issues/12293
raise nose.SkipTest("unreliable tests on complex128")
# Did not test complex64 because DataFrame is converting it to
# complex128. Due to https://github.com/pydata/pandas/issues/10952
self.check_result_type(np.complex128, np.complex128)
def test_undefined_func(self):
df = DataFrame({'a': np.random.randn(10)})
with tm.assertRaisesRegexp(ValueError,
"\"mysin\" is not a supported function"):
df.eval("mysin(a)",
engine=self.engine,
parser=self.parser)
def test_keyword_arg(self):
df = DataFrame({'a': np.random.randn(10)})
with tm.assertRaisesRegexp(TypeError,
"Function \"sin\" does not support "
"keyword arguments"):
df.eval("sin(x=a)",
engine=self.engine,
parser=self.parser)
class TestMathPythonPandas(TestMathPythonPython):
@classmethod
def setUpClass(cls):
super(TestMathPythonPandas, cls).setUpClass()
cls.engine = 'python'
cls.parser = 'pandas'
class TestMathNumExprPandas(TestMathPythonPython):
@classmethod
def setUpClass(cls):
super(TestMathNumExprPandas, cls).setUpClass()
cls.engine = 'numexpr'
cls.parser = 'pandas'
class TestMathNumExprPython(TestMathPythonPython):
@classmethod
def setUpClass(cls):
super(TestMathNumExprPython, cls).setUpClass()
cls.engine = 'numexpr'
cls.parser = 'python'
_var_s = randn(10)
class TestScope(object):
def check_global_scope(self, e, engine, parser):
tm.skip_if_no_ne(engine)
tm.assert_numpy_array_equal(_var_s * 2, pd.eval(e, engine=engine,
parser=parser))
def test_global_scope(self):
e = '_var_s * 2'
for engine, parser in product(_engines, expr._parsers):
yield self.check_global_scope, e, engine, parser
def check_no_new_locals(self, engine, parser):
tm.skip_if_no_ne(engine)
x = 1
lcls = locals().copy()
pd.eval('x + 1', local_dict=lcls, engine=engine, parser=parser)
lcls2 = locals().copy()
lcls2.pop('lcls')
tm.assert_equal(lcls, lcls2)
def test_no_new_locals(self):
for engine, parser in product(_engines, expr._parsers):
yield self.check_no_new_locals, engine, parser
def check_no_new_globals(self, engine, parser):
tm.skip_if_no_ne(engine)
x = 1
gbls = globals().copy()
pd.eval('x + 1', engine=engine, parser=parser)
gbls2 = globals().copy()
tm.assert_equal(gbls, gbls2)
def test_no_new_globals(self):
for engine, parser in product(_engines, expr._parsers):
yield self.check_no_new_globals, engine, parser
def test_invalid_engine():
tm.skip_if_no_ne()
assertRaisesRegexp(KeyError, 'Invalid engine \'asdf\' passed',
pd.eval, 'x + y', local_dict={'x': 1, 'y': 2},
engine='asdf')
def test_invalid_parser():
tm.skip_if_no_ne()
assertRaisesRegexp(KeyError, 'Invalid parser \'asdf\' passed',
pd.eval, 'x + y', local_dict={'x': 1, 'y': 2},
parser='asdf')
_parsers = {'python': PythonExprVisitor, 'pytables': pytables.ExprVisitor,
'pandas': PandasExprVisitor}
def check_disallowed_nodes(engine, parser):
tm.skip_if_no_ne(engine)
VisitorClass = _parsers[parser]
uns_ops = VisitorClass.unsupported_nodes
inst = VisitorClass('x + 1', engine, parser)
for ops in uns_ops:
assert_raises(NotImplementedError, getattr(inst, ops))
def test_disallowed_nodes():
for engine, visitor in product(_parsers, repeat=2):
yield check_disallowed_nodes, engine, visitor
def check_syntax_error_exprs(engine, parser):
tm.skip_if_no_ne(engine)
e = 's +'
assert_raises(SyntaxError, pd.eval, e, engine=engine, parser=parser)
def test_syntax_error_exprs():
for engine, parser in ENGINES_PARSERS:
yield check_syntax_error_exprs, engine, parser
def check_name_error_exprs(engine, parser):
tm.skip_if_no_ne(engine)
e = 's + t'
with tm.assertRaises(NameError):
pd.eval(e, engine=engine, parser=parser)
def test_name_error_exprs():
for engine, parser in ENGINES_PARSERS:
yield check_name_error_exprs, engine, parser
def check_invalid_local_variable_reference(engine, parser):
tm.skip_if_no_ne(engine)
a, b = 1, 2
exprs = 'a + @b', '@a + b', '@a + @b'
for expr in exprs:
if parser != 'pandas':
with tm.assertRaisesRegexp(SyntaxError, "The '@' prefix is only"):
pd.eval(exprs, engine=engine, parser=parser)
else:
with tm.assertRaisesRegexp(SyntaxError, "The '@' prefix is not"):
pd.eval(exprs, engine=engine, parser=parser)
def test_invalid_local_variable_reference():
for engine, parser in ENGINES_PARSERS:
yield check_invalid_local_variable_reference, engine, parser
def check_numexpr_builtin_raises(engine, parser):
tm.skip_if_no_ne(engine)
sin, dotted_line = 1, 2
if engine == 'numexpr':
with tm.assertRaisesRegexp(NumExprClobberingError,
'Variables in expression .+'):
pd.eval('sin + dotted_line', engine=engine, parser=parser)
else:
res = pd.eval('sin + dotted_line', engine=engine, parser=parser)
tm.assert_equal(res, sin + dotted_line)
def test_numexpr_builtin_raises():
for engine, parser in ENGINES_PARSERS:
yield check_numexpr_builtin_raises, engine, parser
def check_bad_resolver_raises(engine, parser):
tm.skip_if_no_ne(engine)
cannot_resolve = 42, 3.0
with tm.assertRaisesRegexp(TypeError, 'Resolver of type .+'):
pd.eval('1 + 2', resolvers=cannot_resolve, engine=engine,
parser=parser)
def test_bad_resolver_raises():
for engine, parser in ENGINES_PARSERS:
yield check_bad_resolver_raises, engine, parser
def check_more_than_one_expression_raises(engine, parser):
tm.skip_if_no_ne(engine)
with tm.assertRaisesRegexp(SyntaxError,
'only a single expression is allowed'):
pd.eval('1 + 1; 2 + 2', engine=engine, parser=parser)
def test_more_than_one_expression_raises():
for engine, parser in ENGINES_PARSERS:
yield check_more_than_one_expression_raises, engine, parser
def check_bool_ops_fails_on_scalars(gen, lhs, cmp, rhs, engine, parser):
tm.skip_if_no_ne(engine)
mid = gen[type(lhs)]()
ex1 = 'lhs {0} mid {1} rhs'.format(cmp, cmp)
ex2 = 'lhs {0} mid and mid {1} rhs'.format(cmp, cmp)
ex3 = '(lhs {0} mid) & (mid {1} rhs)'.format(cmp, cmp)
for ex in (ex1, ex2, ex3):
with tm.assertRaises(NotImplementedError):
pd.eval(ex, engine=engine, parser=parser)
def test_bool_ops_fails_on_scalars():
_bool_ops_syms = 'and', 'or'
dtypes = int, float
gen = {int: lambda: np.random.randint(10), float: np.random.randn}
for engine, parser, dtype1, cmp, dtype2 in product(_engines, expr._parsers,
dtypes, _bool_ops_syms,
dtypes):
yield (check_bool_ops_fails_on_scalars, gen, gen[dtype1](), cmp,
gen[dtype2](), engine, parser)
def check_inf(engine, parser):
tm.skip_if_no_ne(engine)
s = 'inf + 1'
expected = np.inf
result = pd.eval(s, engine=engine, parser=parser)
tm.assert_equal(result, expected)
def test_inf():
for engine, parser in ENGINES_PARSERS:
yield check_inf, engine, parser
def check_negate_lt_eq_le(engine, parser):
tm.skip_if_no_ne(engine)
df = pd.DataFrame([[0, 10], [1, 20]], columns=['cat', 'count'])
expected = df[~(df.cat > 0)]
result = df.query('~(cat > 0)', engine=engine, parser=parser)
tm.assert_frame_equal(result, expected)
if parser == 'python':
with tm.assertRaises(NotImplementedError):
df.query('not (cat > 0)', engine=engine, parser=parser)
else:
result = df.query('not (cat > 0)', engine=engine, parser=parser)
tm.assert_frame_equal(result, expected)
def test_negate_lt_eq_le():
for engine, parser in product(_engines, expr._parsers):
yield check_negate_lt_eq_le, engine, parser
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
gpl-2.0
|
antoinearnoud/openfisca-france-indirect-taxation
|
openfisca_france_indirect_taxation/examples/utils_example.py
|
4
|
6597
|
# -*- coding: utf-8 -*-
from __future__ import division
from pandas import DataFrame
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import openfisca_france_indirect_taxation
from openfisca_france_indirect_taxation.surveys import get_input_data_frame
from openfisca_survey_manager.survey_collections import SurveyCollection
from openfisca_france_indirect_taxation.surveys import SurveyScenario
from openfisca_france_indirect_taxation.examples.calage_bdf_cn import \
build_df_calee_on_grospostes, build_df_calee_on_ticpe
def create_survey_scenario(year = None):
assert year is not None
input_data_frame = get_input_data_frame(year)
TaxBenefitSystem = openfisca_france_indirect_taxation.init_country()
tax_benefit_system = TaxBenefitSystem()
survey_scenario = SurveyScenario().init_from_data_frame(
input_data_frame = input_data_frame,
tax_benefit_system = tax_benefit_system,
year = year,
)
return survey_scenario
def simulate(simulated_variables, year):
'''
Construction de la DataFrame à partir de laquelle sera faite l'analyse des données
'''
input_data_frame = get_input_data_frame(year)
TaxBenefitSystem = openfisca_france_indirect_taxation.init_country()
tax_benefit_system = TaxBenefitSystem()
survey_scenario = SurveyScenario().init_from_data_frame(
input_data_frame = input_data_frame,
tax_benefit_system = tax_benefit_system,
year = year,
)
simulation = survey_scenario.new_simulation()
return DataFrame(
dict([
(name, simulation.calculate(name)) for name in simulated_variables
])
)
def simulate_df_calee_by_grosposte(simulated_variables, year):
'''
Construction de la DataFrame à partir de laquelle sera faite l'analyse des données
'''
input_data_frame = get_input_data_frame(year)
input_data_frame_calee = build_df_calee_on_grospostes(input_data_frame, year, year)
TaxBenefitSystem = openfisca_france_indirect_taxation.init_country()
tax_benefit_system = TaxBenefitSystem()
survey_scenario = SurveyScenario().init_from_data_frame(
input_data_frame = input_data_frame_calee,
tax_benefit_system = tax_benefit_system,
year = year,
)
simulation = survey_scenario.new_simulation()
return DataFrame(
dict([
(name, simulation.calculate(name)) for name in simulated_variables
])
)
def simulate_df_calee_on_ticpe(simulated_variables, year):
'''
Construction de la DataFrame à partir de laquelle sera faite l'analyse des données
'''
input_data_frame = get_input_data_frame(year)
input_data_frame_calee = build_df_calee_on_ticpe(input_data_frame, year, year)
TaxBenefitSystem = openfisca_france_indirect_taxation.init_country()
tax_benefit_system = TaxBenefitSystem()
survey_scenario = SurveyScenario().init_from_data_frame(
input_data_frame = input_data_frame_calee,
tax_benefit_system = tax_benefit_system,
year = year,
)
simulation = survey_scenario.new_simulation()
return DataFrame(
dict([
(name, simulation.calculate(name)) for name in simulated_variables
])
)
def wavg(groupe, var):
'''
Fonction qui calcule la moyenne pondérée par groupe d'une variable
'''
d = groupe[var]
w = groupe['pondmen']
return (d * w).sum() / w.sum()
def collapse(dataframe, groupe, var):
'''
Pour une variable, fonction qui calcule la moyenne pondérée au sein de chaque groupe.
'''
grouped = dataframe.groupby([groupe])
var_weighted_grouped = grouped.apply(lambda x: wavg(groupe = x, var = var))
return var_weighted_grouped
def df_weighted_average_grouped(dataframe, groupe, varlist):
'''
Agrège les résultats de weighted_average_grouped() en une unique dataframe pour la liste de variable 'varlist'.
'''
return DataFrame(
dict([
(var, collapse(dataframe, groupe, var)) for var in varlist
])
)
# To choose color when doing graph, could put a list of colors in argument
def graph_builder_bar(graph):
axes = graph.plot(
kind = 'bar',
stacked = True,
)
plt.axhline(0, color = 'k')
axes.yaxis.set_major_formatter(ticker.FuncFormatter(percent_formatter))
axes.legend(
bbox_to_anchor = (1.5, 1.05),
)
return plt.show()
def graph_builder_bar_list(graph, a, b):
axes = graph.plot(
kind = 'bar',
stacked = True,
color = ['#FF0000']
)
plt.axhline(0, color = 'k')
axes.legend(
bbox_to_anchor = (a, b),
)
return plt.show()
def graph_builder_line_percent(graph, a, b):
axes = graph.plot(
)
plt.axhline(0, color = 'k')
axes.yaxis.set_major_formatter(ticker.FuncFormatter(percent_formatter))
axes.legend(
bbox_to_anchor = (a, b),
)
return plt.show()
def graph_builder_line(graph):
axes = graph.plot(
)
plt.axhline(0, color = 'k')
axes.legend(
bbox_to_anchor = (1, 0.25),
)
return plt.show()
def graph_builder_carburants(data_frame, name, legend1, legend2, color1, color2, color3, color4):
axes = data_frame.plot(
color = [color1, color2, color3, color4])
fig = axes.get_figure()
plt.axhline(0, color = 'k')
# axes.xaxis(data_frame['annee'])
axes.legend(
bbox_to_anchor = (legend1, legend2),
)
return plt.show(), fig.savefig('C:/Users/thomas.douenne/Documents/data/graphs_transports/{}.png'.format(name))
def graph_builder_carburants_no_color(data_frame, name, legend1, legend2):
axes = data_frame.plot()
fig = axes.get_figure()
plt.axhline(0, color = 'k')
# axes.xaxis(data_frame['annee'])
axes.legend(
bbox_to_anchor = (legend1, legend2),
)
return plt.show(), fig.savefig('C:/Users/thomas.douenne/Documents/data/graphs_transports/{}.png'.format(name))
def percent_formatter(x, pos = 0):
return '%1.0f%%' % (100 * x)
def save_dataframe_to_graph(dataframe, file_name):
return dataframe.to_csv('C:/Users/thomas.douenne/Documents/data/Stats_rapport/' + file_name, sep = ';')
# assets_directory = os.path.join(
# pkg_resources.get_distribution('openfisca_france_indirect_taxation').location
# )
# return dataframe.to_csv(os.path.join(assets_directory, 'openfisca_france_indirect_taxation', 'assets',
# file_name), sep = ';')
|
agpl-3.0
|
othercriteria/StochasticBlockmodel
|
old/rasch_normal_bayes.py
|
1
|
7147
|
#!/usr/bin/env python
import numpy as np
import numexpr as ne
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
from sklearn.linear_model import LogisticRegression
# Parameters
params = { 'N': 200,
'edge_precision': 1.0,
'prior_precision': 0.01,
'alpha_sd': 2.0,
'beta_shank': 3.0,
'num_shank': 8,
'beta_self': 4.0,
'kappa': -0.5,
'N_subs': [10, 25, 40, 55, 70],
'num_fits': 10,
'logistic_fit_alpha': True,
'plot_heatmap': False }
# Set random seed for reproducible output
np.random.seed(137)
# Calculate edge means from parameters and covariates
def edge_means(alpha, beta, kappa, x):
N = x.shape[0]
mu = np.zeros((N,N))
for i in range(N):
mu[i,:] += alpha[0,i]
for j in range(N):
mu[:,j] += alpha[0,j]
mu += np.dot(x, beta)
mu += kappa
return mu
# Inverse-logit
def sigma(x):
return 1.0 / (1.0 + np.exp(-x))
# Procedure to find posterior mean and covariance via Bayesian inference
def infer_normal(A, x):
N = A.shape[0]
B = x.shape[2]
t = A.reshape((N*N,))
Phi = np.zeros((N*N,(B + 1 + 2 * N_sub)))
Phi_trans = np.transpose(Phi)
for b in range(B):
Phi[:,b] = x_sub[:,:,b].reshape((N*N,))
Phi[:,B] = 1.0
for i in range(N):
phi_row = np.zeros((N,N))
phi_row[i,:] = 1.0
Phi[:,B + 1 + i] = phi_row.reshape((N*N,))
for j in range(N_sub):
phi_col = np.zeros((N,N))
phi_col[:,j] = 1.0
Phi[:,B + 1 + N + j] = phi_col.reshape((N*N,))
S_N_inv = (params['prior_precision'] * np.eye(B + 1 + 2 * N) +
params['edge_precision'] * np.dot(Phi_trans, Phi))
S_N = np.linalg.inv(S_N_inv)
m_N = params['edge_precision'] * np.dot(S_N, np.dot(Phi_trans, t))
return m_N, S_N
# Procedure to find MLE via logistic regression
def infer_logistic(A, x, fit_alpha = False):
N = A.shape[0]
B = x.shape[2]
lr = LogisticRegression(fit_intercept = True,
C = 1.0 / params['prior_precision'], penalty = 'l2')
y = A.reshape((N*N,))
if fit_alpha:
Phi = np.zeros((N*N,(B + 2*N)))
else:
Phi = np.zeros((N*N,B))
Phi[:,0] = 1.0
for b in range(B):
Phi[:,b] = x[:,:,b].reshape((N*N,))
if fit_alpha:
for i in range(N):
phi_row = np.zeros((N,N))
phi_row[i,:] = 1.0
Phi[:,B + i] = phi_row.reshape((N*N,))
for j in range(N):
phi_col = np.zeros((N,N))
phi_col[:,j] = 1.0
Phi[:,B + N + j] = phi_col.reshape((N*N,))
lr.fit(Phi, y)
coefs = lr.coef_[0]
intercept = lr.intercept_[0]
alpha = np.zeros((2,N))
out = {'alpha': alpha, 'beta': coefs[0:B], 'kappa': intercept}
if fit_alpha:
out['alpha'][0] = coefs[B:(B + N)]
out['alpha'][1] = coefs[(B + N):(B + 2*N)]
# Compute posterior covariance via Laplace approximation
if fit_alpha:
S_0_inv = params['prior_precision'] * np.eye(B + 1 + 2*N)
Phi_kappa = np.empty((N*N,(B + 1 + 2*N)))
Phi_kappa[:,(B + 1):(B + 1 + 2*N)] = Phi[:,B:(B + 2*N)]
w = np.empty(B + 1 + 2*N)
w[(B + 1):(B + 1 + 2*N)] = coefs[B:(B + 2*N)]
else:
S_0_inv = params['prior_precision'] * np.eye(B + 1)
Phi_kappa = np.empty((N*N,(B + 1)))
w = np.empty(B + 1)
Phi_kappa[:,0:B] = Phi[:,0:B]
Phi_kappa[:,B] = 1.0
w[0:B] = coefs[0:B]
w[B] = intercept
C = 0.0
for i in range(N*N):
y = sigma(np.dot(w, Phi_kappa[i,:]))
C += y * (1.0 - y) * (np.outer(Phi_kappa[i,:], Phi_kappa[i,:]))
S_N = np.linalg.inv(S_0_inv + C)
out['S_N'] = S_N
return out
# Generate random network, using randomly generated latent parameters
if params['alpha_sd'] > 0.0:
alpha = np.random.normal(0, params['alpha_sd'], (2,params['N']))
alpha[0] -= np.mean(alpha[0])
alpha[1] -= np.mean(alpha[1])
else:
alpha = np.zeros((2,params['N']))
beta = np.array([params['beta_shank'], params['beta_self']])
shank = np.random.randint(0, params['num_shank'], params['N'])
x = np.empty((params['N'],params['N'],2))
for i in range(params['N']):
for j in range(params['N']):
x[i,j,0] = (shank[i] == shank[j])
x[i,j,1] = (i == j)
kappa = params['kappa']
mu = edge_means(alpha, beta, kappa, x)
A_n = np.random.normal(mu, np.sqrt(1.0 / params['edge_precision']))
A_l = np.random.random((params['N'],params['N'])) < sigma(mu)
# Show heatmap of the underlying network
if params['plot_heatmap']:
plt.figure()
plt.subplot(1,2,1)
plt.imshow(A_n)
plt.subplot(1,2,2)
plt.imshow(A_l)
plt.title('Unordered')
plt.figure()
o = np.argsort(shank)
plt.subplot(1,2,1)
plt.imshow(A_n[o][:,o])
plt.subplot(1,2,2)
plt.imshow(A_l[o][:,o])
plt.title('Grouped by shank')
plt.figure()
o = np.argsort(alpha[0])
plt.subplot(1,2,1)
plt.imshow(A_n[o][:,o])
plt.subplot(1,2,2)
plt.imshow(A_l[o][:,o])
plt.title('Ordered by alpha_out')
plt.figure()
o = np.argsort(alpha[1])
plt.subplot(1,2,1)
plt.imshow(A_n[o][:,o])
plt.subplot(1,2,2)
plt.imshow(A_l[o][:,o])
plt.title('Ordered by alpha_in')
plt.figure()
o = np.argsort(np.sum(alpha, axis = 0))
plt.subplot(1,2,1)
plt.imshow(A_n[o][:,o])
plt.subplot(1,2,2)
plt.imshow(A_l[o][:,o])
plt.title('Ordered by alpha_total')
# Convenience functions for plotting
#
# Finding the right settings for Ellipse is surprisingly tricky so I follow:
# http://scikit-learn.org/stable/auto_examples/plot_lda_qda.html
def make_axis(f, n, title):
ax = f.add_subplot(2, len(params['N_subs']), (n+1), aspect = 'equal')
ax.set_xlim(beta[0] - 2.0, beta[0] + 2.0)
ax.set_ylim(beta[1] - 2.0, beta[1] + 2.0)
ax.set_xlabel('beta_shank')
ax.set_ylabel('beta_self')
ax.set_title(title)
return ax
def draw_ellipse(a, m, S):
v, w = np.linalg.eigh(S)
u = w[0] / np.linalg.norm(w[0])
angle = (180.0 / np.pi) * np.arctan(u[1] / u[0])
e = Ellipse(m, 2.0 * np.sqrt(v[0]), 2.0 * np.sqrt(v[1]),
180.0 + angle, color = 'k')
a.add_artist(e)
e.set_clip_box(ax.bbox)
e.set_alpha(0.5)
# Fit model to subset of data, displaying beta posterior
fig = plt.figure()
inds = np.arange(params['N'])
for n, N_sub in enumerate(params['N_subs']):
for num_fit in range(params['num_fits']):
np.random.shuffle(inds)
sub = inds[0:N_sub]
# Sample subnetwork
A_n_sub = A_n[sub][:,sub]
A_l_sub = A_l[sub][:,sub]
x_sub = x[sub][:,sub]
# Fit normal model
m_N, S_N = infer_normal(A_n_sub, x_sub)
ax = make_axis(fig, n, 'Normal (N_sub = %d)' % N_sub)
draw_ellipse(ax, m_N[0:2], S_N[0:2,0:2])
# Fit logistic model
fit = infer_logistic(A_l_sub, x_sub, params['logistic_fit_alpha'])
ax = make_axis(fig, len(params['N_subs']) + n, 'Logistic')
draw_ellipse(ax, fit['beta'], fit['S_N'][0:2,0:2])
# Display all pending graphs
plt.show()
|
mit
|
sandias42/mlware
|
models/SVM.py
|
1
|
1693
|
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import SGDClassifier
#from sklearn.model_selection import cross_val_score
from scipy.io import mmread
import numpy as np
malware_classes = ["Agent", "AutoRun", "FraudLoad", "FraudPack", "Hupigon", "Krap",
"Lipler", "Magania", "None", "Poison", "Swizzor", "Tdss",
"VB", "Virut", "Zbot"]
# a function for writing predictions in the required format
def write_predictions(predictions, ids, outfile):
"""
assumes len(predictions) == len(ids), and that predictions[i] is the
index of the predicted class with the malware_classes list above for
the executable corresponding to ids[i].
outfile will be overwritten
"""
with open(outfile,"w+") as f:
# write header
f.write("Id,Prediction\n")
for i, history_id in enumerate(ids):
f.write("%s,%d\n" % (history_id, predictions[i]))
def classes_to_Y(classes):
output = []
for cls in classes:
output.append(malware_classes.index(cls))
return np.array(output)
# load training classes
classes = np.load("../data/features/train_classes.npy")
# convert csr to a numpy array
sparse = np.load("/n/regal/scrb152/Students/sandias42/cs181/bow.npy")
# pull out training examples
X = sparse[:classes.shape[0],:]
X_test = sparse[classes.shape[0]:,:]
print X_test.shape
Y = classes_to_Y(classes)
model = SGDClassifier(n_jobs=-1, n_iter=100, verbose=1, loss="modified_huber")
model.fit(X,Y)
test_pred = model.predict(X_test)
print test_pred
test_ids = np.load("../data/features/test_ids.npy")
print test_ids
write_predictions(test_pred, test_ids, "../predictions/sgd_bow.csv")
|
mit
|
kelseyoo14/Wander
|
venv_2_7/lib/python2.7/site-packages/pandas/tests/test_expressions.py
|
9
|
16557
|
# -*- coding: utf-8 -*-
from __future__ import print_function
# pylint: disable-msg=W0612,E1101
import nose
import re
from numpy.random import randn
import operator
import numpy as np
from pandas.core.api import DataFrame, Panel
from pandas.computation import expressions as expr
from pandas import compat
from pandas.util.testing import (assert_almost_equal, assert_series_equal,
assert_frame_equal, assert_panel_equal,
assert_panel4d_equal)
import pandas.util.testing as tm
from numpy.testing.decorators import slow
if not expr._USE_NUMEXPR:
try:
import numexpr
except ImportError:
msg = "don't have"
else:
msg = "not using"
raise nose.SkipTest("{0} numexpr".format(msg))
_frame = DataFrame(randn(10000, 4), columns=list('ABCD'), dtype='float64')
_frame2 = DataFrame(randn(100, 4), columns = list('ABCD'), dtype='float64')
_mixed = DataFrame({ 'A' : _frame['A'].copy(), 'B' : _frame['B'].astype('float32'), 'C' : _frame['C'].astype('int64'), 'D' : _frame['D'].astype('int32') })
_mixed2 = DataFrame({ 'A' : _frame2['A'].copy(), 'B' : _frame2['B'].astype('float32'), 'C' : _frame2['C'].astype('int64'), 'D' : _frame2['D'].astype('int32') })
_integer = DataFrame(np.random.randint(1, 100, size=(10001, 4)), columns = list('ABCD'), dtype='int64')
_integer2 = DataFrame(np.random.randint(1, 100, size=(101, 4)),
columns=list('ABCD'), dtype='int64')
_frame_panel = Panel(dict(ItemA=_frame.copy(), ItemB=(_frame.copy() + 3), ItemC=_frame.copy(), ItemD=_frame.copy()))
_frame2_panel = Panel(dict(ItemA=_frame2.copy(), ItemB=(_frame2.copy() + 3),
ItemC=_frame2.copy(), ItemD=_frame2.copy()))
_integer_panel = Panel(dict(ItemA=_integer,
ItemB=(_integer + 34).astype('int64')))
_integer2_panel = Panel(dict(ItemA=_integer2,
ItemB=(_integer2 + 34).astype('int64')))
_mixed_panel = Panel(dict(ItemA=_mixed, ItemB=(_mixed + 3)))
_mixed2_panel = Panel(dict(ItemA=_mixed2, ItemB=(_mixed2 + 3)))
class TestExpressions(tm.TestCase):
_multiprocess_can_split_ = False
def setUp(self):
self.frame = _frame.copy()
self.frame2 = _frame2.copy()
self.mixed = _mixed.copy()
self.mixed2 = _mixed2.copy()
self.integer = _integer.copy()
self._MIN_ELEMENTS = expr._MIN_ELEMENTS
def tearDown(self):
expr._MIN_ELEMENTS = self._MIN_ELEMENTS
@nose.tools.nottest
def run_arithmetic_test(self, df, other, assert_func, check_dtype=False,
test_flex=True):
expr._MIN_ELEMENTS = 0
operations = ['add', 'sub', 'mul', 'mod', 'truediv', 'floordiv', 'pow']
if not compat.PY3:
operations.append('div')
for arith in operations:
operator_name = arith
if arith == 'div':
operator_name = 'truediv'
if test_flex:
op = lambda x, y: getattr(df, arith)(y)
op.__name__ = arith
else:
op = getattr(operator, operator_name)
expr.set_use_numexpr(False)
expected = op(df, other)
expr.set_use_numexpr(True)
result = op(df, other)
try:
if check_dtype:
if arith == 'truediv':
assert expected.dtype.kind == 'f'
assert_func(expected, result)
except Exception:
com.pprint_thing("Failed test with operator %r" % op.__name__)
raise
def test_integer_arithmetic(self):
self.run_arithmetic_test(self.integer, self.integer,
assert_frame_equal)
self.run_arithmetic_test(self.integer.iloc[:,0], self.integer.iloc[:, 0],
assert_series_equal, check_dtype=True)
@nose.tools.nottest
def run_binary_test(self, df, other, assert_func,
test_flex=False, numexpr_ops=set(['gt', 'lt', 'ge',
'le', 'eq', 'ne'])):
"""
tests solely that the result is the same whether or not numexpr is
enabled. Need to test whether the function does the correct thing
elsewhere.
"""
expr._MIN_ELEMENTS = 0
expr.set_test_mode(True)
operations = ['gt', 'lt', 'ge', 'le', 'eq', 'ne']
for arith in operations:
if test_flex:
op = lambda x, y: getattr(df, arith)(y)
op.__name__ = arith
else:
op = getattr(operator, arith)
expr.set_use_numexpr(False)
expected = op(df, other)
expr.set_use_numexpr(True)
expr.get_test_result()
result = op(df, other)
used_numexpr = expr.get_test_result()
try:
if arith in numexpr_ops:
assert used_numexpr, "Did not use numexpr as expected."
else:
assert not used_numexpr, "Used numexpr unexpectedly."
assert_func(expected, result)
except Exception:
com.pprint_thing("Failed test with operation %r" % arith)
com.pprint_thing("test_flex was %r" % test_flex)
raise
def run_frame(self, df, other, binary_comp=None, run_binary=True,
**kwargs):
self.run_arithmetic_test(df, other, assert_frame_equal,
test_flex=False, **kwargs)
self.run_arithmetic_test(df, other, assert_frame_equal, test_flex=True,
**kwargs)
if run_binary:
if binary_comp is None:
expr.set_use_numexpr(False)
binary_comp = other + 1
expr.set_use_numexpr(True)
self.run_binary_test(df, binary_comp, assert_frame_equal,
test_flex=False, **kwargs)
self.run_binary_test(df, binary_comp, assert_frame_equal,
test_flex=True, **kwargs)
def run_series(self, ser, other, binary_comp=None, **kwargs):
self.run_arithmetic_test(ser, other, assert_series_equal,
test_flex=False, **kwargs)
self.run_arithmetic_test(ser, other, assert_almost_equal,
test_flex=True, **kwargs)
# series doesn't uses vec_compare instead of numexpr...
# if binary_comp is None:
# binary_comp = other + 1
# self.run_binary_test(ser, binary_comp, assert_frame_equal, test_flex=False,
# **kwargs)
# self.run_binary_test(ser, binary_comp, assert_frame_equal, test_flex=True,
# **kwargs)
def run_panel(self, panel, other, binary_comp=None, run_binary=True,
assert_func=assert_panel_equal, **kwargs):
self.run_arithmetic_test(panel, other, assert_func, test_flex=False,
**kwargs)
self.run_arithmetic_test(panel, other, assert_func, test_flex=True,
**kwargs)
if run_binary:
if binary_comp is None:
binary_comp = other + 1
self.run_binary_test(panel, binary_comp, assert_func,
test_flex=False, **kwargs)
self.run_binary_test(panel, binary_comp, assert_func,
test_flex=True, **kwargs)
def test_integer_arithmetic_frame(self):
self.run_frame(self.integer, self.integer)
def test_integer_arithmetic_series(self):
self.run_series(self.integer.iloc[:, 0], self.integer.iloc[:, 0])
@slow
def test_integer_panel(self):
self.run_panel(_integer2_panel, np.random.randint(1, 100))
def test_float_arithemtic_frame(self):
self.run_frame(self.frame2, self.frame2)
def test_float_arithmetic_series(self):
self.run_series(self.frame2.iloc[:, 0], self.frame2.iloc[:, 0])
@slow
def test_float_panel(self):
self.run_panel(_frame2_panel, np.random.randn() + 0.1, binary_comp=0.8)
@slow
def test_panel4d(self):
self.run_panel(tm.makePanel4D(), np.random.randn() + 0.5,
assert_func=assert_panel4d_equal, binary_comp=3)
def test_mixed_arithmetic_frame(self):
# TODO: FIGURE OUT HOW TO GET IT TO WORK...
# can't do arithmetic because comparison methods try to do *entire*
# frame instead of by-column
self.run_frame(self.mixed2, self.mixed2, run_binary=False)
def test_mixed_arithmetic_series(self):
for col in self.mixed2.columns:
self.run_series(self.mixed2[col], self.mixed2[col], binary_comp=4)
@slow
def test_mixed_panel(self):
self.run_panel(_mixed2_panel, np.random.randint(1, 100),
binary_comp=-2)
def test_float_arithemtic(self):
self.run_arithmetic_test(self.frame, self.frame, assert_frame_equal)
self.run_arithmetic_test(self.frame.iloc[:, 0], self.frame.iloc[:, 0],
assert_series_equal, check_dtype=True)
def test_mixed_arithmetic(self):
self.run_arithmetic_test(self.mixed, self.mixed, assert_frame_equal)
for col in self.mixed.columns:
self.run_arithmetic_test(self.mixed[col], self.mixed[col],
assert_series_equal)
def test_integer_with_zeros(self):
self.integer *= np.random.randint(0, 2, size=np.shape(self.integer))
self.run_arithmetic_test(self.integer, self.integer, assert_frame_equal)
self.run_arithmetic_test(self.integer.iloc[:, 0], self.integer.iloc[:, 0],
assert_series_equal)
def test_invalid(self):
# no op
result = expr._can_use_numexpr(operator.add, None, self.frame, self.frame, 'evaluate')
self.assertFalse(result)
# mixed
result = expr._can_use_numexpr(operator.add, '+', self.mixed, self.frame, 'evaluate')
self.assertFalse(result)
# min elements
result = expr._can_use_numexpr(operator.add, '+', self.frame2, self.frame2, 'evaluate')
self.assertFalse(result)
# ok, we only check on first part of expression
result = expr._can_use_numexpr(operator.add, '+', self.frame, self.frame2, 'evaluate')
self.assertTrue(result)
def test_binary_ops(self):
def testit():
for f, f2 in [ (self.frame, self.frame2), (self.mixed, self.mixed2) ]:
for op, op_str in [('add','+'),('sub','-'),('mul','*'),('div','/'),('pow','**')]:
if op == 'div':
op = getattr(operator, 'truediv', None)
else:
op = getattr(operator, op, None)
if op is not None:
result = expr._can_use_numexpr(op, op_str, f, f, 'evaluate')
self.assertNotEqual(result, f._is_mixed_type)
result = expr.evaluate(op, op_str, f, f, use_numexpr=True)
expected = expr.evaluate(op, op_str, f, f, use_numexpr=False)
tm.assert_numpy_array_equal(result,expected.values)
result = expr._can_use_numexpr(op, op_str, f2, f2, 'evaluate')
self.assertFalse(result)
expr.set_use_numexpr(False)
testit()
expr.set_use_numexpr(True)
expr.set_numexpr_threads(1)
testit()
expr.set_numexpr_threads()
testit()
def test_boolean_ops(self):
def testit():
for f, f2 in [ (self.frame, self.frame2), (self.mixed, self.mixed2) ]:
f11 = f
f12 = f + 1
f21 = f2
f22 = f2 + 1
for op, op_str in [('gt','>'),('lt','<'),('ge','>='),('le','<='),('eq','=='),('ne','!=')]:
op = getattr(operator,op)
result = expr._can_use_numexpr(op, op_str, f11, f12, 'evaluate')
self.assertNotEqual(result, f11._is_mixed_type)
result = expr.evaluate(op, op_str, f11, f12, use_numexpr=True)
expected = expr.evaluate(op, op_str, f11, f12, use_numexpr=False)
tm.assert_numpy_array_equal(result,expected.values)
result = expr._can_use_numexpr(op, op_str, f21, f22, 'evaluate')
self.assertFalse(result)
expr.set_use_numexpr(False)
testit()
expr.set_use_numexpr(True)
expr.set_numexpr_threads(1)
testit()
expr.set_numexpr_threads()
testit()
def test_where(self):
def testit():
for f in [ self.frame, self.frame2, self.mixed, self.mixed2 ]:
for cond in [ True, False ]:
c = np.empty(f.shape,dtype=np.bool_)
c.fill(cond)
result = expr.where(c, f.values, f.values+1)
expected = np.where(c, f.values, f.values+1)
tm.assert_numpy_array_equal(result,expected)
expr.set_use_numexpr(False)
testit()
expr.set_use_numexpr(True)
expr.set_numexpr_threads(1)
testit()
expr.set_numexpr_threads()
testit()
def test_bool_ops_raise_on_arithmetic(self):
df = DataFrame({'a': np.random.rand(10) > 0.5,
'b': np.random.rand(10) > 0.5})
names = 'div', 'truediv', 'floordiv', 'pow'
ops = '/', '/', '//', '**'
msg = 'operator %r not implemented for bool dtypes'
for op, name in zip(ops, names):
if not compat.PY3 or name != 'div':
f = getattr(operator, name)
err_msg = re.escape(msg % op)
with tm.assertRaisesRegexp(NotImplementedError, err_msg):
f(df, df)
with tm.assertRaisesRegexp(NotImplementedError, err_msg):
f(df.a, df.b)
with tm.assertRaisesRegexp(NotImplementedError, err_msg):
f(df.a, True)
with tm.assertRaisesRegexp(NotImplementedError, err_msg):
f(False, df.a)
with tm.assertRaisesRegexp(TypeError, err_msg):
f(False, df)
with tm.assertRaisesRegexp(TypeError, err_msg):
f(df, True)
def test_bool_ops_warn_on_arithmetic(self):
n = 10
df = DataFrame({'a': np.random.rand(n) > 0.5,
'b': np.random.rand(n) > 0.5})
names = 'add', 'mul', 'sub'
ops = '+', '*', '-'
subs = {'+': '|', '*': '&', '-': '^'}
sub_funcs = {'|': 'or_', '&': 'and_', '^': 'xor'}
for op, name in zip(ops, names):
f = getattr(operator, name)
fe = getattr(operator, sub_funcs[subs[op]])
with tm.use_numexpr(True, min_elements=5):
with tm.assert_produces_warning(check_stacklevel=False):
r = f(df, df)
e = fe(df, df)
tm.assert_frame_equal(r, e)
with tm.assert_produces_warning(check_stacklevel=False):
r = f(df.a, df.b)
e = fe(df.a, df.b)
tm.assert_series_equal(r, e)
with tm.assert_produces_warning(check_stacklevel=False):
r = f(df.a, True)
e = fe(df.a, True)
tm.assert_series_equal(r, e)
with tm.assert_produces_warning(check_stacklevel=False):
r = f(False, df.a)
e = fe(False, df.a)
tm.assert_series_equal(r, e)
with tm.assert_produces_warning(check_stacklevel=False):
r = f(False, df)
e = fe(False, df)
tm.assert_frame_equal(r, e)
with tm.assert_produces_warning(check_stacklevel=False):
r = f(df, True)
e = fe(df, True)
tm.assert_frame_equal(r, e)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
artistic-2.0
|
EmbodiedCognition/pagoda
|
pagoda/cooper.py
|
1
|
27980
|
# -*- coding: utf-8 -*-
'''This module contains a Python implementation of a forward-dynamics solver.
For detailed information about the solver, its raison d'être, and how it works,
please see the documentation for the :class:`World` class.
Further comments and documentation are available in this source file. Eventually
I hope to integrate these comments into some sort of online documentation for
the package as a whole.
'''
from __future__ import division, print_function, absolute_import
import logging
import numpy as np
import ode
import re
from . import physics
from . import skeleton
class Markers:
'''
'''
DEFAULT_CFM = 1e-6
DEFAULT_ERP = 0.3
def __init__(self, world):
self.world = world
self.jointgroup = ode.JointGroup()
self.bodies = {}
self.joints = {}
self.targets = {}
self.offsets = {}
self.channels = {}
self.data = None
self.cfms = None
self.erp = Markers.DEFAULT_ERP
# these arrays are derived from the data array.
self.visibility = None
self.positions = None
self.velocities = None
self._frame_no = -1
@property
def num_frames(self):
'''Return the number of frames of marker data.'''
return self.data.shape[0]
@property
def num_markers(self):
'''Return the number of markers in each frame of data.'''
return self.data.shape[1]
@property
def labels(self):
'''Return the names of our marker labels in canonical order.'''
return sorted(self.channels, key=lambda c: self.channels[c])
def __iter__(self):
return iter(self.data)
def __getitem__(self, idx):
return self.data[idx]
def _map_labels_to_channels(self, labels):
if isinstance(labels, str):
labels = labels.strip().split()
if isinstance(labels, (tuple, list)):
return dict((c, i) for i, c in enumerate(labels))
return labels or {}
def load_csv(self, filename, start_frame=10, max_frames=int(1e300)):
'''Load marker data from a CSV file.
The file will be imported using Pandas, which must be installed to use
this method. (``pip install pandas``)
The first line of the CSV file will be used for header information. The
"time" column will be used as the index for the data frame. There must
be columns named 'markerAB-foo-x','markerAB-foo-y','markerAB-foo-z', and
'markerAB-foo-c' for marker 'foo' to be included in the model.
Parameters
----------
filename : str
Name of the CSV file to load.
'''
import pandas as pd
compression = None
if filename.endswith('.gz'):
compression = 'gzip'
df = pd.read_csv(filename, compression=compression).set_index('time').fillna(-1)
# make sure the data frame's time index matches our world.
assert self.world.dt == pd.Series(df.index).diff().mean()
markers = []
for c in df.columns:
m = re.match(r'^marker\d\d-(.*)-c$', c)
if m:
markers.append(m.group(1))
self.channels = self._map_labels_to_channels(markers)
cols = [c for c in df.columns if re.match(r'^marker\d\d-.*-[xyzc]$', c)]
self.data = df[cols].values.reshape((len(df), len(markers), 4))[start_frame:]
self.data[:, :, [1, 2]] = self.data[:, :, [2, 1]]
logging.info('%s: loaded marker data %s', filename, self.data.shape)
self.process_data()
self.create_bodies()
def load_c3d(self, filename, start_frame=0, max_frames=int(1e300)):
'''Load marker data from a C3D file.
The file will be imported using the c3d module, which must be installed
to use this method. (``pip install c3d``)
Parameters
----------
filename : str
Name of the C3D file to load.
start_frame : int, optional
Discard the first N frames. Defaults to 0.
max_frames : int, optional
Maximum number of frames to load. Defaults to loading all frames.
'''
import c3d
with open(filename, 'rb') as handle:
reader = c3d.Reader(handle)
logging.info('world frame rate %s, marker frame rate %s',
1 / self.world.dt, reader.point_rate)
# set up a map from marker label to index in the data stream.
self.channels = self._map_labels_to_channels([
s.strip() for s in reader.point_labels])
# read the actual c3d data into a numpy array.
data = []
for i, (_, frame, _) in enumerate(reader.read_frames()):
if i >= start_frame:
data.append(frame[:, [0, 1, 2, 4]])
if len(data) > max_frames:
break
self.data = np.array(data)
# scale the data to meters -- mm is a very common C3D unit.
if reader.get('POINT:UNITS').string_value.strip().lower() == 'mm':
logging.info('scaling point data from mm to m')
self.data[:, :, :3] /= 1000.
logging.info('%s: loaded marker data %s', filename, self.data.shape)
self.process_data()
self.create_bodies()
def process_data(self):
'''Process data to produce velocity and dropout information.'''
self.visibility = self.data[:, :, 3]
self.positions = self.data[:, :, :3]
self.velocities = np.zeros_like(self.positions) + 1000
for frame_no in range(1, len(self.data) - 1):
prev = self.data[frame_no - 1]
next = self.data[frame_no + 1]
for c in range(self.num_markers):
if -1 < prev[c, 3] < 100 and -1 < next[c, 3] < 100:
self.velocities[frame_no, c] = (
next[c, :3] - prev[c, :3]) / (2 * self.world.dt)
self.cfms = np.zeros_like(self.visibility) + self.DEFAULT_CFM
def create_bodies(self):
'''Create physics bodies corresponding to each marker in our data.'''
self.bodies = {}
for label in self.channels:
body = self.world.create_body(
'sphere', name='marker:{}'.format(label), radius=0.02)
body.is_kinematic = True
body.color = 0.9, 0.1, 0.1, 0.5
self.bodies[label] = body
def load_attachments(self, source, skeleton):
'''Load attachment configuration from the given text source.
The attachment configuration file has a simple format. After discarding
Unix-style comments (any part of a line that starts with the pound (#)
character), each line in the file is then expected to have the following
format::
marker-name body-name X Y Z
The marker name must correspond to an existing "channel" in our marker
data. The body name must correspond to a rigid body in the skeleton. The
X, Y, and Z coordinates specify the body-relative offsets where the
marker should be attached: 0 corresponds to the center of the body along
the given axis, while -1 and 1 correspond to the minimal (maximal,
respectively) extent of the body's bounding box along the corresponding
dimension.
Parameters
----------
source : str or file-like
A filename or file-like object that we can use to obtain text
configuration that describes how markers are attached to skeleton
bodies.
skeleton : :class:`pagoda.skeleton.Skeleton`
The skeleton to attach our marker data to.
'''
self.targets = {}
self.offsets = {}
filename = source
if isinstance(source, str):
source = open(source)
else:
filename = '(file-{})'.format(id(source))
for i, line in enumerate(source):
tokens = line.split('#')[0].strip().split()
if not tokens:
continue
label = tokens.pop(0)
if label not in self.channels:
logging.info('%s:%d: unknown marker %s', filename, i, label)
continue
if not tokens:
continue
name = tokens.pop(0)
bodies = [b for b in skeleton.bodies if b.name == name]
if len(bodies) != 1:
logging.info('%s:%d: %d skeleton bodies match %s',
filename, i, len(bodies), name)
continue
b = self.targets[label] = bodies[0]
o = self.offsets[label] = \
np.array(list(map(float, tokens))) * b.dimensions / 2
logging.info('%s <--> %s, offset %s', label, b.name, o)
def detach(self):
'''Detach all marker bodies from their associated skeleton bodies.'''
self.jointgroup.empty()
self.joints = {}
def attach(self, frame_no):
'''Attach marker bodies to the corresponding skeleton bodies.
Attachments are only made for markers that are not in a dropout state in
the given frame.
Parameters
----------
frame_no : int
The frame of data we will use for attaching marker bodies.
'''
assert not self.joints
for label, j in self.channels.items():
target = self.targets.get(label)
if target is None:
continue
if self.visibility[frame_no, j] < 0:
continue
if np.linalg.norm(self.velocities[frame_no, j]) > 10:
continue
joint = ode.BallJoint(self.world.ode_world, self.jointgroup)
joint.attach(self.bodies[label].ode_body, target.ode_body)
joint.setAnchor1Rel([0, 0, 0])
joint.setAnchor2Rel(self.offsets[label])
joint.setParam(ode.ParamCFM, self.cfms[frame_no, j])
joint.setParam(ode.ParamERP, self.erp)
joint.name = label
self.joints[label] = joint
self._frame_no = frame_no
def reposition(self, frame_no):
'''Reposition markers to a specific frame of data.
Parameters
----------
frame_no : int
The frame of data where we should reposition marker bodies. Markers
will be positioned in the appropriate places in world coordinates.
In addition, linear velocities of the markers will be set according
to the data as long as there are no dropouts in neighboring frames.
'''
for label, j in self.channels.items():
body = self.bodies[label]
body.position = self.positions[frame_no, j]
body.linear_velocity = self.velocities[frame_no, j]
def distances(self):
'''Get a list of the distances between markers and their attachments.
Returns
-------
distances : ndarray of shape (num-markers, 3)
Array of distances for each marker joint in our attachment setup. If
a marker does not currently have an associated joint (e.g. because
it is not currently visible) this will contain NaN for that row.
'''
distances = []
for label in self.labels:
joint = self.joints.get(label)
distances.append([np.nan, np.nan, np.nan] if joint is None else
np.array(joint.getAnchor()) - joint.getAnchor2())
return np.array(distances)
def forces(self, dx_tm1=None):
'''Return an array of the forces exerted by marker springs.
Notes
-----
The forces exerted by the marker springs can be approximated by::
F = kp * dx
where ``dx`` is the current array of marker distances. An even more
accurate value is computed by approximating the velocity of the spring
displacement::
F = kp * dx + kd * (dx - dx_tm1) / dt
where ``dx_tm1`` is an array of distances from the previous time step.
Parameters
----------
dx_tm1 : ndarray
An array of distances from markers to their attachment targets,
measured at the previous time step.
Returns
-------
F : ndarray
An array of forces that the markers are exerting on the skeleton.
'''
cfm = self.cfms[self._frame_no][:, None]
kp = self.erp / (cfm * self.world.dt)
kd = (1 - self.erp) / cfm
dx = self.distances()
F = kp * dx
if dx_tm1 is not None:
bad = np.isnan(dx) | np.isnan(dx_tm1)
F[~bad] += (kd * (dx - dx_tm1) / self.world.dt)[~bad]
return F
class World(physics.World):
'''Simulate a physics world that includes an articulated skeleton model.
The "cooper" method, originally described by Cooper & Ballard (2012 Proc.
Motion in Games), uses a forward physics simulator (here, the Open Dynamics
Engine; ODE) to compute inverse motion quantities like angles and torques
using motion-capture data and a structured, articulated model of the human
skeleton. The prerequisites for this method are:
- Record some motion-capture data from a human. This is expected to result
in the locations, in world coordinates, of several motion-capture markers
at regularly-spaced intervals over time.
- Construct a simulated skeleton that matches the size and shape of the
human to some reasonable degree of accuracy. The more accurate the
skeleton, the more accurate the resulting measurements.
In broad strokes, the cooper method proceeds in two stages:
1. :func:`Inverse Kinematics <inverse_kinematics>`. The motion-capture data
are attached to the simulated skeleton using ball joints. These ball
joints are configured so that their constraints (namely, placing both
anchor points of the joint at the same location in space) are allowed to
slip; ODE implements this slippage using a spring dynamics, which
provides a natural mechanism for the articulated skeleton to interpolate
the marker data as well as possible.
At each frame during the first pass, the motion-capture markers are
placed at the appropriate location in space, and the attached articulated
skeleton "snaps" toward the markers using its inertia (from the motion in
preceding frames) as well as the spring constraints provided by the
marker joint slippage.
At each frame of this process, the articulated skeleton can be queried to
obtain joint angles for each degree of freedom. In addition, the markers
can be queried to find their constraint slippage.
2. :func:`Inverse Dynamics <inverse_dynamics>`. The marker constraints are
removed, and the joint angles computed in the first pass are used to
constrain the skeleton's movements.
At each frame during the second pass, the joints in the skeleton attempt
to follow the angles computed in the first pass; a PID controller is used
to convert the angular error value into a target angular velocity for
each joint.
The torques that ODE computes to solve this forward angle-following
problem are returned as a result of the second pass.
In general, the cooper model is a useful way of getting a physics simulator,
a model of a human skeleton, and some motion-capture data to interact
smoothly. Particularly useful for almost any simulations of human motion are
the :func:`settle_to_markers` and :func:`follow_markers` methods.
'''
def load_skeleton(self, filename, pid_params=None):
'''Create and configure a skeleton in our model.
Parameters
----------
filename : str
The name of a file containing skeleton configuration data.
pid_params : dict, optional
If given, use this dictionary to set the PID controller
parameters on each joint in the skeleton. See
:func:`pagoda.skeleton.pid` for more information.
'''
self.skeleton = skeleton.Skeleton(self)
self.skeleton.load(filename, color=(0.3, 0.5, 0.9, 0.8))
if pid_params:
self.skeleton.set_pid_params(**pid_params)
self.skeleton.erp = 0.1
self.skeleton.cfm = 0
def load_markers(self, filename, attachments, max_frames=1e100):
'''Load marker data and attachment preferences into the model.
Parameters
----------
filename : str
The name of a file containing marker data. This currently needs to
be either a .C3D or a .CSV file. CSV files must adhere to a fairly
strict column naming convention; see :func:`Markers.load_csv` for
more information.
attachments : str
The name of a text file specifying how markers are attached to
skeleton bodies.
max_frames : number, optional
Only read in this many frames of marker data. By default, the entire
data file is read into memory.
Returns
-------
markers : :class:`Markers`
Returns a markers object containing loaded marker data as well as
skeleton attachment configuration.
'''
self.markers = Markers(self)
fn = filename.lower()
if fn.endswith('.c3d'):
self.markers.load_c3d(filename, max_frames=max_frames)
elif fn.endswith('.csv') or fn.endswith('.csv.gz'):
self.markers.load_csv(filename, max_frames=max_frames)
else:
logging.fatal('%s: not sure how to load markers!', filename)
self.markers.load_attachments(attachments, self.skeleton)
def step(self, substeps=2):
'''Advance the physics world by one step.
Typically this is called as part of a :class:`pagoda.viewer.Viewer`, but
it can also be called manually (or some other stepping mechanism
entirely can be used).
'''
# by default we step by following our loaded marker data.
self.frame_no += 1
try:
next(self.follower)
except (AttributeError, StopIteration) as err:
self.reset()
def reset(self):
'''Reset the automatic process that gets called by :func:`step`.
By default this follows whatever marker data is loaded into our model.
Provide an override for this method to customize the default behavior of
the :func:`step` method.
'''
self.follower = self.follow_markers()
def settle_to_markers(self, frame_no=0, max_distance=0.05, max_iters=300,
states=None):
'''Settle the skeleton to our marker data at a specific frame.
Parameters
----------
frame_no : int, optional
Settle the skeleton to marker data at this frame. Defaults to 0.
max_distance : float, optional
The settling process will stop when the mean marker distance falls
below this threshold. Defaults to 0.1m (10cm). Setting this too
small prevents the settling process from finishing (it will loop
indefinitely), and setting it too large prevents the skeleton from
settling to a stable state near the markers.
max_iters : int, optional
Attempt to settle markers for at most this many iterations. Defaults
to 1000.
states : list of body states, optional
If given, set the bodies in our skeleton to these kinematic states
before starting the settling process.
'''
if states is not None:
self.skeleton.set_body_states(states)
dist = None
for _ in range(max_iters):
for _ in self._step_to_marker_frame(frame_no):
pass
dist = np.nanmean(abs(self.markers.distances()))
logging.info('settling to frame %d: marker distance %.3f', frame_no, dist)
if dist < max_distance:
return self.skeleton.get_body_states()
for b in self.skeleton.bodies:
b.linear_velocity = 0, 0, 0
b.angular_velocity = 0, 0, 0
return states
def follow_markers(self, start=0, end=1e100, states=None):
'''Iterate over a set of marker data, dragging its skeleton along.
Parameters
----------
start : int, optional
Start following marker data after this frame. Defaults to 0.
end : int, optional
Stop following marker data after this frame. Defaults to the end of
the marker data.
states : list of body states, optional
If given, set the states of the skeleton bodies to these values
before starting to follow the marker data.
'''
if states is not None:
self.skeleton.set_body_states(states)
for frame_no, frame in enumerate(self.markers):
if frame_no < start:
continue
if frame_no >= end:
break
for states in self._step_to_marker_frame(frame_no):
yield states
def _step_to_marker_frame(self, frame_no, dt=None):
'''Update the simulator to a specific frame of marker data.
This method returns a generator of body states for the skeleton! This
generator must be exhausted (e.g., by consuming this call in a for loop)
for the simulator to work properly.
This process involves the following steps:
- Move the markers to their new location:
- Detach from the skeleton
- Update marker locations
- Reattach to the skeleton
- Detect ODE collisions
- Yield the states of the bodies in the skeleton
- Advance the ODE world one step
Parameters
----------
frame_no : int
Step to this frame of marker data.
dt : float, optional
Step with this time duration. Defaults to ``self.dt``.
Returns
-------
states : sequence of state tuples
A generator of a sequence of one body state for the skeleton. This
generator must be exhausted for the simulation to work properly.
'''
# update the positions and velocities of the markers.
self.markers.detach()
self.markers.reposition(frame_no)
self.markers.attach(frame_no)
# detect collisions.
self.ode_space.collide(None, self.on_collision)
# record the state of each skeleton body.
states = self.skeleton.get_body_states()
self.skeleton.set_body_states(states)
# yield the current simulation state to our caller.
yield states
# update the ode world.
self.ode_world.step(dt or self.dt)
# clear out contact joints to prepare for the next frame.
self.ode_contactgroup.empty()
def inverse_kinematics(self, start=0, end=1e100, states=None, max_force=20):
'''Follow a set of marker data, yielding kinematic joint angles.
Parameters
----------
start : int, optional
Start following marker data after this frame. Defaults to 0.
end : int, optional
Stop following marker data after this frame. Defaults to the end of
the marker data.
states : list of body states, optional
If given, set the states of the skeleton bodies to these values
before starting to follow the marker data.
max_force : float, optional
Allow each degree of freedom in the skeleton to exert at most this
force when attempting to maintain its equilibrium position. This
defaults to 20N. Set this value higher to simulate a stiff skeleton
while following marker data.
Returns
-------
angles : sequence of angle frames
Returns a generator of joint angle data for the skeleton. One set of
joint angles will be generated for each frame of marker data between
`start` and `end`.
'''
zeros = None
if max_force > 0:
self.skeleton.enable_motors(max_force)
zeros = np.zeros(self.skeleton.num_dofs)
for _ in self.follow_markers(start, end, states):
if zeros is not None:
self.skeleton.set_target_angles(zeros)
yield self.skeleton.joint_angles
def inverse_dynamics(self, angles, start=0, end=1e100, states=None, max_force=100):
'''Follow a set of angle data, yielding dynamic joint torques.
Parameters
----------
angles : ndarray (num-frames x num-dofs)
Follow angle data provided by this array of angle values.
start : int, optional
Start following angle data after this frame. Defaults to the start
of the angle data.
end : int, optional
Stop following angle data after this frame. Defaults to the end of
the angle data.
states : list of body states, optional
If given, set the states of the skeleton bodies to these values
before starting to follow the marker data.
max_force : float, optional
Allow each degree of freedom in the skeleton to exert at most this
force when attempting to follow the given joint angles. Defaults to
100N. Setting this value to be large results in more accurate
following but can cause oscillations in the PID controllers,
resulting in noisy torques.
Returns
-------
torques : sequence of torque frames
Returns a generator of joint torque data for the skeleton. One set
of joint torques will be generated for each frame of angle data
between `start` and `end`.
'''
if states is not None:
self.skeleton.set_body_states(states)
for frame_no, frame in enumerate(angles):
if frame_no < start:
continue
if frame_no >= end:
break
self.ode_space.collide(None, self.on_collision)
states = self.skeleton.get_body_states()
self.skeleton.set_body_states(states)
# joseph's stability fix: step to compute torques, then reset the
# skeleton to the start of the step, and then step using computed
# torques. thus any numerical errors between the body states after
# stepping using angle constraints will be removed, because we
# will be stepping the model using the computed torques.
self.skeleton.enable_motors(max_force)
self.skeleton.set_target_angles(angles[frame_no])
self.ode_world.step(self.dt)
torques = self.skeleton.joint_torques
self.skeleton.disable_motors()
self.skeleton.set_body_states(states)
self.skeleton.add_torques(torques)
yield torques
self.ode_world.step(self.dt)
self.ode_contactgroup.empty()
def forward_dynamics(self, torques, start=0, states=None):
'''Move the body according to a set of torque data.'''
if states is not None:
self.skeleton.set_body_states(states)
for frame_no, torque in enumerate(torques):
if frame_no < start:
continue
if frame_no >= end:
break
self.ode_space.collide(None, self.on_collision)
self.skeleton.add_torques(torque)
self.ode_world.step(self.dt)
yield
self.ode_contactgroup.empty()
|
mit
|
maciekcc/tensorflow
|
tensorflow/contrib/learn/python/learn/estimators/multioutput_test.py
|
136
|
1696
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-output tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.estimators._sklearn import mean_squared_error
from tensorflow.python.platform import test
class MultiOutputTest(test.TestCase):
"""Multi-output tests."""
def testMultiRegression(self):
random.seed(42)
rng = np.random.RandomState(1)
x = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(x).ravel(), np.pi * np.cos(x).ravel()]).T
regressor = learn.LinearRegressor(
feature_columns=learn.infer_real_valued_columns_from_input(x),
label_dimension=2)
regressor.fit(x, y, steps=100)
score = mean_squared_error(np.array(list(regressor.predict_scores(x))), y)
self.assertLess(score, 10, "Failed with score = {0}".format(score))
if __name__ == "__main__":
test.main()
|
apache-2.0
|
mgahsan/QuantEcon.py
|
examples/preim1.py
|
7
|
1294
|
"""
QE by Tom Sargent and John Stachurski.
Illustrates preimages of functions
"""
import matplotlib.pyplot as plt
import numpy as np
def f(x):
return 0.6 * np.cos(4 * x) + 1.4
xmin, xmax = -1, 1
x = np.linspace(xmin, xmax, 160)
y = f(x)
ya, yb = np.min(y), np.max(y)
fig, axes = plt.subplots(2, 1, figsize=(8, 8))
for ax in axes:
# Set the axes through the origin
for spine in ['left', 'bottom']:
ax.spines[spine].set_position('zero')
for spine in ['right', 'top']:
ax.spines[spine].set_color('none')
ax.set_ylim(-0.6, 3.2)
ax.set_xlim(xmin, xmax)
ax.set_yticks(())
ax.set_xticks(())
ax.plot(x, y, 'k-', lw=2, label=r'$f$')
ax.fill_between(x, ya, yb, facecolor='blue', alpha=0.05)
ax.vlines([0], ya, yb, lw=3, color='blue', label=r'range of $f$')
ax.text(0.04, -0.3, '$0$', fontsize=16)
ax = axes[0]
ax.legend(loc='upper right', frameon=False)
ybar = 1.5
ax.plot(x, x * 0 + ybar, 'k--', alpha=0.5)
ax.text(0.05, 0.8 * ybar, r'$y$', fontsize=16)
for i, z in enumerate((-0.35, 0.35)):
ax.vlines(z, 0, f(z), linestyle='--', alpha=0.5)
ax.text(z, -0.2, r'$x_{}$'.format(i), fontsize=16)
ax = axes[1]
ybar = 2.6
ax.plot(x, x * 0 + ybar, 'k--', alpha=0.5)
ax.text(0.04, 0.91 * ybar, r'$y$', fontsize=16)
plt.show()
|
bsd-3-clause
|
MaxParsons/amo-physics
|
liexperiment/raman/coherent_population_transfer_sims.py
|
1
|
4059
|
'''
Created on Feb 19, 2015
@author: Max
'''
import liexperiment.raman.coherent_population_transfer as cpt
import numpy as np
import matplotlib.pyplot as plt
import os
import os.path
from itertools import product
def export_figure_numerical_index(filename, fig):
head, tail = os.path.split(filename)
fig_nums = [int(fname[-8:-4]) for fname in os.listdir(head) if fname.split('_', 1)[0] == tail]
if not fig_nums:
next_num = 0
else:
next_num = np.max(np.array(fig_nums)) + 1
newname = tail + "_" + "{:0>4d}".format(next_num)
fig.savefig(os.path.join(head, newname + ".svg"))
def spectrum_constant_pulse():
fig_directory = "C:\\Users\\Max\\amo-physics\\liexperiment\\raman\\coherent_population_transfer\\constant_detuning_rabi"
subname = "spectrum"
raman = cpt.RamanTransition()
detunings = np.linspace(-2.0e6, 2.0e6, 100)
four_pops = np.zeros_like(detunings)
nbars = np.zeros_like(detunings)
raman.n_vibrational = 5;
raman.initial_state = np.zeros(2 * raman.n_vibrational, dtype="complex64")
raman.constant_rabi = 300.0e3
raman.anharmonicity = 26.0e3
raman.simulation_duration = 10.0e-6
raman.simulation_nsteps = 50
raman.trap_frequency = 1.0e6
raman.lamb_dicke = 0.28
raman.initial_state[0] = np.sqrt(0.7)
raman.initial_state[1] = np.sqrt(0.3)
fig, ax = plt.subplots(1, 1)
fig.name = "spectrum"
ax.set_title("simulated raman spectrum\n ")
ax.set_xlabel("detuning (kHz)")
ax.set_ylabel("population in |4> (blue)\n nbar (black)")
for idx, detuning in enumerate(detunings):
print "idx = " + str(idx)
raman.constant_detuning = detuning
raman.compute_dynamics()
four_pops[idx] = raman.pops_excited[-1]
nbars[idx] = raman.nbars[-1]
ax.plot(detunings / 1.0e3, four_pops, color="b", marker="o")
ax.plot(detunings / 1.0e3, nbars, color="k", marker="o")
export_figure_numerical_index(os.path.join(fig_directory, fig.name), fig)
plt.show()
def rabi_flopping():
fig_directory = "C:\\Users\\Max\\amo-physics\\liexperiment\\raman\\coherent_population_transfer\\constant_detuning_rabi"
subname = "spectrum"
raman = cpt.RamanTransition()
raman.constant_detuning = -1.00e6
raman.n_vibrational = 5;
raman.initial_state = np.zeros(2 * raman.n_vibrational, dtype="complex64")
raman.constant_rabi = 100.0e3
raman.anharmonicity = 0.0e3
raman.simulation_duration = 100.0e-6
raman.simulation_nsteps = 100
raman.trap_frequency = 1.0e6
raman.lamb_dicke = 0.28
raman.initial_state[0] = np.sqrt(0.7)
raman.initial_state[1] = np.sqrt(0.3)
raman.compute_dynamics()
fig, ax = plt.subplots(1, 1)
ax.set_title("populations")
ax.set_xlabel("time")
ax.set_ylabel("populations")
plt.plot(raman.times, raman.pops_excited)
plt.show()
def test():
fig_directory = "C:\\Users\\Max\\amo-physics\\liexperiment\\raman\\coherent_population_transfer\\constant_detuning_rabi"
subname = "spectrum"
raman = cpt.RamanTransition()
detunings = np.linspace(-2.0e6, 2.0e6, 30)
four_pops = np.zeros_like(detunings)
nbars = np.zeros_like(detunings)
raman.n_vibrational = 3;
raman.initial_state = np.zeros(2 * raman.n_vibrational, dtype="complex64")
raman.constant_rabi = 100.0e3
raman.anharmonicity = 26.0e3
raman.simulation_duration = 10.0e-6
raman.simulation_nsteps = 50
raman.trap_frequency = 1.0e6
raman.lamb_dicke = 0.28
raman.initial_state[0] = np.sqrt(1.0)
raman.initial_state[1] = np.sqrt(0.0)
fig, ax = plt.subplots(1, 1)
fig.name = "spectrum"
ax.set_title("simulated raman spectrum\n ")
ax.set_xlabel("detuning (kHz)")
ax.set_ylabel("population in |4> (blue)\n nbar (black)")
raman.constant_detuning = 1.0e6
raman.compute_quantum_numbers()
print raman.hamiltonian(2.2e-6)
if __name__ == "__main__":
# test()
spectrum_constant_pulse()
# rabi_flopping()
|
mit
|
wohlert/agnosia
|
classifiers/dnn.py
|
2
|
5199
|
"""
network.py
Provides different network models.
"""
import numpy as np
np.random.seed(1337)
from keras.callbacks import Callback
from keras.models import Sequential, Model
from keras.layers import Activation, Dense, Dropout, Merge, Reshape, Input, merge
from keras.layers import Convolution2D, MaxPooling2D
from keras.layers.wrappers import TimeDistributed
from keras.layers.recurrent import LSTM
from keras.optimizers import SGD
from keras.callbacks import ModelCheckpoint, EarlyStopping
from sklearn.metrics import accuracy_score
def create_single_frame(input_shape):
"""
Creates a CNN for a single image frame.
"""
model = Sequential()
# 4 32*3*3 convolution layers
model.add(Convolution2D(32, 3, 3, border_mode="valid", input_shape=input_shape))
model.add(Activation('relu'))
model.add(Convolution2D(32, 3, 3))
model.add(Activation('relu'))
model.add(Convolution2D(32, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# 2 64*3*3 convolution layers
model.add(Convolution2D(64, 3, 3, border_mode="valid"))
model.add(Activation('relu'))
model.add(Convolution2D(64, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# 1 128*3*3 convolution layer
model.add(Convolution2D(128, 3, 3, border_mode="valid"))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
return model
def create_multi_frame(cnn_shape, frames):
"""
Create 7 parallel CNNs that converge into a recurrent
LSTM layer to make a prediction.
"""
model = Sequential()
# Create 7 CNNs and merge the outputs
convnets = [create_single_frame(cnn_shape) for _ in range(frames)]
model.add(Merge(convnets, mode="concat"))
model.add(Reshape((128, frames)))
# LSTM layer - only keep last prediction
model.add(LSTM(128, input_dim=frames, input_length=128, return_sequences=False))
model.add(Activation("tanh"))
# Fully connected layer
model.add(Dropout(0.5))
model.add(Dense(512))
model.add(Activation("relu"))
# Prediction layer
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation("sigmoid"))
return model
def functional_model(image_shape, frames):
"""
Creates a neural network using the functional API for Keras.
"""
conv_input = Input(shape=image_shape)
# 3 32*3*3 convolution layers
conv1 = Convolution2D(32, 3, 3, border_mode="valid", activation="relu")(conv_input)
conv1 = Convolution2D(32, 3, 3, activation="relu")(conv1)
conv1 = Convolution2D(32, 3, 3, activation="relu")(conv1)
max1 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(conv1)
# 2 64*3*3 convolution layers
conv2 = Convolution2D(64, 3, 3, border_mode="valid", activation="relu")(max1)
conv2 = Convolution2D(64, 3, 3, activation="relu")(conv2)
max2 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(conv2)
# 1 128*3*3 convolution layer
conv3 = Convolution2D(128, 3, 3, border_mode="valid", activation="relu")(max2)
max3 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(conv3)
# Model for convolutional network
convnet = Model(input=conv_input, output=max3)
# 7 input layers for convnerts
inputs = [Input(shape=image_shape) for _ in range(frames)]
# 7 convnets
convnets = [convnet(input) for input in inputs]
merge_nets = merge(convnets, mode="concat")
reshape = Reshape((128, 7))(merge_nets)
lstm = LSTM(128, input_dim=frames, input_length=128, return_sequences=False, activation="tanh")(reshape)
# dropout1 = Dropout(0.5)(lstm)
dense1 = Dense(512, activation="relu")(lstm)
# dropout2 = Dropout(0.5)(dense1)
prediction = Dense(1, activation="sigmoid")(dense1)
return Model(input=inputs, output=prediction)
# Load data
from utils import random_split
X_train, X_test, y_train, y_test = random_split("images/", 32, 7)
_, frames, channels, width, height = np.shape(X_train)
# Reshape to match CNN shapes
X_train = list(X_train.reshape(frames, -1, channels, width, height))
X_test = list(X_test.reshape(frames, -1, channels, width, height))
image_shape = (channels, width, height)
# Create model
model = functional_model(image_shape, frames)
model.compile(loss='binary_crossentropy',
metrics=['accuracy'],
optimizer="adam")
#SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
# Create callbacks
checkpoint = ModelCheckpoint("weights.{epoch:02d}-{val_loss:.2f}.hdf5")
early_stop = EarlyStopping(patience=2)
class LossHistory(Callback):
def on_train_begin(self, logs={}):
self.losses = []
def on_batch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
callbacks = [
# checkpoint,
# early_stop,
LossHistory()]
# Fit model
batch_size = 32
nb_epochs = 10
history = model.fit(X_train, y_train.ravel(), batch_size=batch_size,
nb_epoch=nb_epochs, callbacks=callbacks)
# Evaluate model
prediction = model.predict(X_test, batch_size=batch_size)
accuracy = accuracy_score(prediction, y_test.ravel())
print(accuracy)
|
apache-2.0
|
ilyes14/scikit-learn
|
sklearn/utils/tests/test_class_weight.py
|
90
|
12846
|
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
def test_compute_class_weight():
# Test (and demo) compute_class_weight.
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = assert_warns(DeprecationWarning,
compute_class_weight, "auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_true(cw[0] < cw[1] < cw[2])
cw = compute_class_weight("balanced", classes, y)
# total effect of samples is preserved
class_counts = np.bincount(y)[2:]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
# Raise error when y does not contain all class labels
classes = np.arange(4)
y = np.asarray([0, 0, 0, 1, 1, 2])
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
def test_compute_class_weight_dict():
classes = np.arange(3)
class_weights = {0: 1.0, 1: 2.0, 2: 3.0}
y = np.asarray([0, 0, 1, 2])
cw = compute_class_weight(class_weights, classes, y)
# When the user specifies class weights, compute_class_weights should just
# return them.
assert_array_almost_equal(np.asarray([1.0, 2.0, 3.0]), cw)
# When a class weight is specified that isn't in classes, a ValueError
# should get raised
msg = 'Class label 4 not present.'
class_weights = {0: 1.0, 1: 2.0, 2: 3.0, 4: 1.5}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
msg = 'Class label -1 not present.'
class_weights = {-1: 5.0, 0: 1.0, 1: 2.0, 2: 3.0}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
def test_compute_class_weight_invariance():
# Test that results with class_weight="balanced" is invariant wrt
# class imbalance if the number of samples is identical.
# The test uses a balanced two class dataset with 100 datapoints.
# It creates three versions, one where class 1 is duplicated
# resulting in 150 points of class 1 and 50 of class 0,
# one where there are 50 points in class 1 and 150 in class 0,
# and one where there are 100 points of each class (this one is balanced
# again).
# With balancing class weights, all three should give the same model.
X, y = make_blobs(centers=2, random_state=0)
# create dataset where class 1 is duplicated twice
X_1 = np.vstack([X] + [X[y == 1]] * 2)
y_1 = np.hstack([y] + [y[y == 1]] * 2)
# create dataset where class 0 is duplicated twice
X_0 = np.vstack([X] + [X[y == 0]] * 2)
y_0 = np.hstack([y] + [y[y == 0]] * 2)
# cuplicate everything
X_ = np.vstack([X] * 2)
y_ = np.hstack([y] * 2)
# results should be identical
logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1)
logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0)
logreg = LogisticRegression(class_weight="balanced").fit(X_, y_)
assert_array_almost_equal(logreg1.coef_, logreg0.coef_)
assert_array_almost_equal(logreg.coef_, logreg0.coef_)
def test_compute_class_weight_auto_negative():
# Test compute_class_weight when labels are negative
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
# Test with unbalanced class labels.
y = np.asarray([-1, 0, 0, -2, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([0.545, 1.636, 0.818]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
class_counts = np.bincount(y + 2)
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2. / 3, 2., 1.])
def test_compute_class_weight_auto_unordered():
# Test compute_class_weight when classes are unordered
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1.636, 0.818, 0.545]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
class_counts = np.bincount(y)[classes]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2., 1., 2. / 3])
def test_compute_sample_weight():
# Test (and demo) compute_sample_weight.
# Test with balanced classes
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with user-defined weights
sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.])
# Test with column vector of balanced classes
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with unbalanced classes
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
expected_auto = np.asarray([.6, .6, .6, .6, .6, .6, 1.8])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y)
expected_balanced = np.array([0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 2.3333])
assert_array_almost_equal(sample_weight, expected_balanced, decimal=4)
# Test with `None` weights
sample_weight = compute_sample_weight(None, y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.])
# Test with multi-output of balanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with multi-output with user-defined weights
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.])
# Test with multi-output of unbalanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, expected_balanced ** 2, decimal=3)
def test_compute_sample_weight_with_subsample():
# Test compute_sample_weight with subsamples specified.
# Test with balanced classes and all samples present
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with column vector of balanced classes and all samples present
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with a subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y, range(4))
assert_array_almost_equal(sample_weight, [.5, .5, .5, 1.5, 1.5, 1.5])
sample_weight = compute_sample_weight("balanced", y, range(4))
assert_array_almost_equal(sample_weight, [2. / 3, 2. / 3,
2. / 3, 2., 2., 2.])
# Test with a bootstrap subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
expected_auto = np.asarray([1 / 3., 1 / 3., 1 / 3., 5 / 3., 5 / 3., 5 / 3.])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
expected_balanced = np.asarray([0.6, 0.6, 0.6, 3., 3., 3.])
assert_array_almost_equal(sample_weight, expected_balanced)
# Test with a bootstrap subsample for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_balanced ** 2)
# Test with a missing class
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
# Test with a missing class for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
def test_compute_sample_weight_errors():
# Test compute_sample_weight raises errors expected.
# Invalid preset string
y = np.asarray([1, 1, 1, 2, 2, 2])
y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
assert_raises(ValueError, compute_sample_weight, "ni", y)
assert_raises(ValueError, compute_sample_weight, "ni", y, range(4))
assert_raises(ValueError, compute_sample_weight, "ni", y_)
assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4))
# Not "auto" for subsample
assert_raises(ValueError,
compute_sample_weight, {1: 2, 2: 1}, y, range(4))
# Not a list or preset for multi-output
assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_)
# Incorrect length list for multi-output
assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
|
bsd-3-clause
|
ehogan/iris
|
lib/iris/tests/test_grib_load.py
|
8
|
28851
|
# (C) British Crown Copyright 2010 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris tests first so that some things can be initialised before
# importing anything else
import iris.tests as tests
import datetime
from distutils.version import StrictVersion
import cf_units
import numpy as np
import iris
import iris.exceptions
from iris.tests import mock
import iris.tests.stock
import iris.util
# Run tests in no graphics mode if matplotlib is not available.
if tests.MPL_AVAILABLE:
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import iris.plot as iplt
import iris.quickplot as qplt
if tests.GRIB_AVAILABLE:
import gribapi
import iris.fileformats.grib
def _mock_gribapi_fetch(message, key):
"""
Fake the gribapi key-fetch.
Fetch key-value from the fake message (dictionary).
If the key is not present, raise the diagnostic exception.
"""
if key in message:
return message[key]
else:
raise _mock_gribapi.GribInternalError
def _mock_gribapi__grib_is_missing(grib_message, keyname):
"""
Fake the gribapi key-existence enquiry.
Return whether the key exists in the fake message (dictionary).
"""
return (keyname not in grib_message)
def _mock_gribapi__grib_get_native_type(grib_message, keyname):
"""
Fake the gribapi type-discovery operation.
Return type of key-value in the fake message (dictionary).
If the key is not present, raise the diagnostic exception.
"""
if keyname in grib_message:
return type(grib_message[keyname])
raise _mock_gribapi.GribInternalError(keyname)
if tests.GRIB_AVAILABLE:
# Construct a mock object to mimic the gribapi for GribWrapper testing.
_mock_gribapi = mock.Mock(spec=gribapi)
_mock_gribapi.GribInternalError = Exception
_mock_gribapi.grib_get_long = mock.Mock(side_effect=_mock_gribapi_fetch)
_mock_gribapi.grib_get_string = mock.Mock(side_effect=_mock_gribapi_fetch)
_mock_gribapi.grib_get_double = mock.Mock(side_effect=_mock_gribapi_fetch)
_mock_gribapi.grib_get_double_array = mock.Mock(
side_effect=_mock_gribapi_fetch)
_mock_gribapi.grib_is_missing = mock.Mock(
side_effect=_mock_gribapi__grib_is_missing)
_mock_gribapi.grib_get_native_type = mock.Mock(
side_effect=_mock_gribapi__grib_get_native_type)
# define seconds in an hour, for general test usage
_hour_secs = 3600.0
class FakeGribMessage(dict):
"""
A 'fake grib message' object, for testing GribWrapper construction.
Behaves as a dictionary, containing key-values for message keys.
"""
def __init__(self, **kwargs):
"""
Create a fake message object.
General keys can be set/add as required via **kwargs.
The keys 'edition' and 'time_code' are specially managed.
"""
# Start with a bare dictionary
dict.__init__(self)
# Extract specially-recognised keys.
edition = kwargs.pop('edition', 1)
time_code = kwargs.pop('time_code', None)
# Set the minimally required keys.
self._init_minimal_message(edition=edition)
# Also set a time-code, if given.
if time_code is not None:
self.set_timeunit_code(time_code)
# Finally, add any remaining passed key-values.
self.update(**kwargs)
def _init_minimal_message(self, edition=1):
# Set values for all the required keys.
# 'edition' controls the edition-specific keys.
self.update({
'Ni': 1,
'Nj': 1,
'numberOfValues': 1,
'alternativeRowScanning': 0,
'centre': 'ecmf',
'year': 2007,
'month': 3,
'day': 23,
'hour': 12,
'minute': 0,
'indicatorOfUnitOfTimeRange': 1,
'shapeOfTheEarth': 6,
'gridType': 'rotated_ll',
'angleOfRotation': 0.0,
'iDirectionIncrementInDegrees': 0.036,
'jDirectionIncrementInDegrees': 0.036,
'iScansNegatively': 0,
'jScansPositively': 1,
'longitudeOfFirstGridPointInDegrees': -5.70,
'latitudeOfFirstGridPointInDegrees': -4.452,
'jPointsAreConsecutive': 0,
'values': np.array([[1.0]]),
'indicatorOfParameter': 9999,
'parameterNumber': 9999,
})
# Add edition-dependent settings.
self['edition'] = edition
if edition == 1:
self.update({
'startStep': 24,
'timeRangeIndicator': 1,
'P1': 2, 'P2': 0,
# time unit - needed AS WELL as 'indicatorOfUnitOfTimeRange'
'unitOfTime': 1,
'table2Version': 9999,
})
if edition == 2:
self.update({
'iDirectionIncrementGiven': 1,
'jDirectionIncrementGiven': 1,
'uvRelativeToGrid': 0,
'forecastTime': 24,
'productDefinitionTemplateNumber': 0,
'stepRange': 24,
'discipline': 9999,
'parameterCategory': 9999,
'tablesVersion': 4
})
def set_timeunit_code(self, timecode):
# Do timecode setting (somewhat edition-dependent).
self['indicatorOfUnitOfTimeRange'] = timecode
if self['edition'] == 1:
# for some odd reason, GRIB1 code uses *both* of these
# NOTE kludge -- the 2 keys are really the same thing
self['unitOfTime'] = timecode
@tests.skip_data
@tests.skip_grib
class TestGribLoad(tests.GraphicsTest):
def setUp(self):
iris.fileformats.grib.hindcast_workaround = True
def tearDown(self):
iris.fileformats.grib.hindcast_workaround = False
def test_load(self):
cubes = iris.load(tests.get_data_path(('GRIB', 'rotated_uk',
"uk_wrongparam.grib1")))
self.assertCML(cubes, ("grib_load", "rotated.cml"))
cubes = iris.load(tests.get_data_path(('GRIB', "time_processed",
"time_bound.grib1")))
self.assertCML(cubes, ("grib_load", "time_bound_grib1.cml"))
cubes = iris.load(tests.get_data_path(('GRIB', "time_processed",
"time_bound.grib2")))
self.assertCML(cubes, ("grib_load", "time_bound_grib2.cml"))
cubes = iris.load(tests.get_data_path(('GRIB', "3_layer_viz",
"3_layer.grib2")))
cubes = iris.cube.CubeList([cubes[1], cubes[0], cubes[2]])
self.assertCML(cubes, ("grib_load", "3_layer.cml"))
def test_load_masked(self):
gribfile = tests.get_data_path(
('GRIB', 'missing_values', 'missing_values.grib2'))
cubes = iris.load(gribfile)
self.assertCML(cubes, ('grib_load', 'missing_values_grib2.cml'))
@tests.skip_plot
def test_y_fastest(self):
cubes = iris.load(tests.get_data_path(("GRIB", "y_fastest",
"y_fast.grib2")))
self.assertCML(cubes, ("grib_load", "y_fastest.cml"))
iplt.contourf(cubes[0])
plt.gca().coastlines()
plt.title("y changes fastest")
self.check_graphic()
@tests.skip_plot
def test_ij_directions(self):
def old_compat_load(name):
cube = iris.load(tests.get_data_path(('GRIB', 'ij_directions',
name)))[0]
return [cube]
cubes = old_compat_load("ipos_jpos.grib2")
self.assertCML(cubes, ("grib_load", "ipos_jpos.cml"))
iplt.contourf(cubes[0])
plt.gca().coastlines()
plt.title("ipos_jpos cube")
self.check_graphic()
cubes = old_compat_load("ipos_jneg.grib2")
self.assertCML(cubes, ("grib_load", "ipos_jneg.cml"))
iplt.contourf(cubes[0])
plt.gca().coastlines()
plt.title("ipos_jneg cube")
self.check_graphic()
cubes = old_compat_load("ineg_jneg.grib2")
self.assertCML(cubes, ("grib_load", "ineg_jneg.cml"))
iplt.contourf(cubes[0])
plt.gca().coastlines()
plt.title("ineg_jneg cube")
self.check_graphic()
cubes = old_compat_load("ineg_jpos.grib2")
self.assertCML(cubes, ("grib_load", "ineg_jpos.cml"))
iplt.contourf(cubes[0])
plt.gca().coastlines()
plt.title("ineg_jpos cube")
self.check_graphic()
def test_shape_of_earth(self):
def old_compat_load(name):
cube = iris.load(tests.get_data_path(('GRIB', 'shape_of_earth',
name)))[0]
return cube
# pre-defined sphere
cube = old_compat_load("0.grib2")
self.assertCML(cube, ("grib_load", "earth_shape_0.cml"))
# custom sphere
cube = old_compat_load("1.grib2")
self.assertCML(cube, ("grib_load", "earth_shape_1.cml"))
# IAU65 oblate sphere
cube = old_compat_load("2.grib2")
self.assertCML(cube, ("grib_load", "earth_shape_2.cml"))
# custom oblate spheroid (km)
cube = old_compat_load("3.grib2")
self.assertCML(cube, ("grib_load", "earth_shape_3.cml"))
# IAG-GRS80 oblate spheroid
cube = old_compat_load("4.grib2")
self.assertCML(cube, ("grib_load", "earth_shape_4.cml"))
# WGS84
cube = old_compat_load("5.grib2")
self.assertCML(cube, ("grib_load", "earth_shape_5.cml"))
# pre-defined sphere
cube = old_compat_load("6.grib2")
self.assertCML(cube, ("grib_load", "earth_shape_6.cml"))
# custom oblate spheroid (m)
cube = old_compat_load("7.grib2")
self.assertCML(cube, ("grib_load", "earth_shape_7.cml"))
# grib1 - same as grib2 shape 6, above
cube = old_compat_load("global.grib1")
self.assertCML(cube, ("grib_load", "earth_shape_grib1.cml"))
@tests.skip_plot
def test_polar_stereo_grib1(self):
cube = iris.load_cube(tests.get_data_path(
("GRIB", "polar_stereo", "ST4.2013052210.01h")))
self.assertCML(cube, ("grib_load", "polar_stereo_grib1.cml"))
qplt.contourf(cube, norm=LogNorm())
plt.gca().coastlines()
plt.gca().gridlines()
plt.title("polar stereo grib1")
self.check_graphic()
@tests.skip_plot
def test_polar_stereo_grib2(self):
cube = iris.load_cube(tests.get_data_path(
("GRIB", "polar_stereo",
"CMC_glb_TMP_ISBL_1015_ps30km_2013052000_P006.grib2")))
self.assertCML(cube, ("grib_load", "polar_stereo_grib2.cml"))
qplt.contourf(cube)
plt.gca().coastlines()
plt.gca().gridlines()
plt.title("polar stereo grib2")
self.check_graphic()
@tests.skip_plot
def test_lambert_grib1(self):
cube = iris.load_cube(tests.get_data_path(
("GRIB", "lambert", "lambert.grib1")))
self.assertCML(cube, ("grib_load", "lambert_grib1.cml"))
qplt.contourf(cube)
plt.gca().coastlines()
plt.gca().gridlines()
plt.title("lambert grib1")
self.check_graphic()
@tests.skip_plot
def test_lambert_grib2(self):
cube = iris.load_cube(tests.get_data_path(
("GRIB", "lambert", "lambert.grib2")))
self.assertCML(cube, ("grib_load", "lambert_grib2.cml"))
qplt.contourf(cube)
plt.gca().coastlines()
plt.gca().gridlines()
plt.title("lambert grib2")
self.check_graphic()
def test_regular_gg_grib1(self):
cube = iris.load_cube(tests.get_data_path(
('GRIB', 'gaussian', 'regular_gg.grib1')))
self.assertCML(cube, ('grib_load', 'regular_gg_grib1.cml'))
def test_regular_gg_grib2(self):
cube = iris.load_cube(tests.get_data_path(
('GRIB', 'gaussian', 'regular_gg.grib2')))
self.assertCML(cube, ('grib_load', 'regular_gg_grib2.cml'))
def test_reduced_ll(self):
cube = iris.load_cube(tests.get_data_path(
("GRIB", "reduced", "reduced_ll.grib1")))
self.assertCML(cube, ("grib_load", "reduced_ll_grib1.cml"))
def test_reduced_gg(self):
cube = iris.load_cube(tests.get_data_path(
("GRIB", "reduced", "reduced_gg.grib2")))
self.assertCML(cube, ("grib_load", "reduced_gg_grib2.cml"))
def test_reduced_missing(self):
cube = iris.load_cube(tests.get_data_path(
("GRIB", "reduced", "reduced_ll_missing.grib1")))
self.assertCML(cube, ("grib_load", "reduced_ll_missing_grib1.cml"))
@tests.skip_grib
class TestGribTimecodes(tests.IrisTest):
def _run_timetests(self, test_set):
# Check the unit-handling for given units-codes and editions.
# Operates on lists of cases for various time-units and grib-editions.
# Format: (edition, code, expected-exception,
# equivalent-seconds, description-string)
with mock.patch('iris.fileformats.grib.gribapi', _mock_gribapi):
for test_controls in test_set:
(
grib_edition, timeunit_codenum,
expected_error,
timeunit_secs, timeunit_str
) = test_controls
# Construct a suitable fake test message.
message = FakeGribMessage(
edition=grib_edition,
time_code=timeunit_codenum
)
if expected_error:
# Expect GribWrapper construction to fail.
with self.assertRaises(type(expected_error)) as ar_context:
msg = iris.fileformats.grib.GribWrapper(message)
self.assertEqual(
ar_context.exception.args,
expected_error.args)
continue
# 'ELSE'...
# Expect the wrapper construction to work.
# Make a GribWrapper object and test it.
wrapped_msg = iris.fileformats.grib.GribWrapper(message)
# Check the units string.
forecast_timeunit = wrapped_msg._forecastTimeUnit
self.assertEqual(
forecast_timeunit, timeunit_str,
'Bad unit string for edition={ed:01d}, '
'unitcode={code:01d} : '
'expected="{wanted}" GOT="{got}"'.format(
ed=grib_edition,
code=timeunit_codenum,
wanted=timeunit_str,
got=forecast_timeunit
)
)
# Check the data-starttime calculation.
interval_start_to_end = (
wrapped_msg._phenomenonDateTime
- wrapped_msg._referenceDateTime
)
if grib_edition == 1:
interval_from_units = wrapped_msg.P1
else:
interval_from_units = wrapped_msg.forecastTime
interval_from_units *= datetime.timedelta(0, timeunit_secs)
self.assertEqual(
interval_start_to_end, interval_from_units,
'Inconsistent start time offset for edition={ed:01d}, '
'unitcode={code:01d} : '
'from-unit="{unit_str}" '
'from-phenom-minus-ref="{e2e_str}"'.format(
ed=grib_edition,
code=timeunit_codenum,
unit_str=interval_from_units,
e2e_str=interval_start_to_end
)
)
# Test groups of testcases for various time-units and grib-editions.
# Format: (edition, code, expected-exception,
# equivalent-seconds, description-string)
def test_timeunits_common(self):
tests = (
(1, 0, None, 60.0, 'minutes'),
(1, 1, None, _hour_secs, 'hours'),
(1, 2, None, 24.0 * _hour_secs, 'days'),
(1, 10, None, 3.0 * _hour_secs, '3 hours'),
(1, 11, None, 6.0 * _hour_secs, '6 hours'),
(1, 12, None, 12.0 * _hour_secs, '12 hours'),
)
TestGribTimecodes._run_timetests(self, tests)
@staticmethod
def _err_bad_timeunit(code):
return iris.exceptions.NotYetImplementedError(
'Unhandled time unit for forecast '
'indicatorOfUnitOfTimeRange : {code}'.format(code=code)
)
def test_timeunits_grib1_specific(self):
tests = (
(1, 13, None, 0.25 * _hour_secs, '15 minutes'),
(1, 14, None, 0.5 * _hour_secs, '30 minutes'),
(1, 254, None, 1.0, 'seconds'),
(1, 111, TestGribTimecodes._err_bad_timeunit(111), 1.0, '??'),
)
TestGribTimecodes._run_timetests(self, tests)
def test_timeunits_grib2_specific(self):
tests = (
(2, 13, None, 1.0, 'seconds'),
# check the extra grib1 keys FAIL
(2, 14, TestGribTimecodes._err_bad_timeunit(14), 0.0, '??'),
(2, 254, TestGribTimecodes._err_bad_timeunit(254), 0.0, '??'),
)
TestGribTimecodes._run_timetests(self, tests)
def test_timeunits_calendar(self):
tests = (
(1, 3, TestGribTimecodes._err_bad_timeunit(3), 0.0, 'months'),
(1, 4, TestGribTimecodes._err_bad_timeunit(4), 0.0, 'years'),
(1, 5, TestGribTimecodes._err_bad_timeunit(5), 0.0, 'decades'),
(1, 6, TestGribTimecodes._err_bad_timeunit(6), 0.0, '30 years'),
(1, 7, TestGribTimecodes._err_bad_timeunit(7), 0.0, 'centuries'),
)
TestGribTimecodes._run_timetests(self, tests)
def test_timeunits_invalid(self):
tests = (
(1, 111, TestGribTimecodes._err_bad_timeunit(111), 1.0, '??'),
(2, 27, TestGribTimecodes._err_bad_timeunit(27), 1.0, '??'),
)
TestGribTimecodes._run_timetests(self, tests)
def test_load_probability_forecast(self):
# Test GribWrapper interpretation of PDT 4.9 data.
# NOTE:
# Currently Iris has only partial support for PDT 4.9.
# Though it can load the data, key metadata (thresholds) is lost.
# At present, we are not testing for this.
# Make a testing grib message in memory, with gribapi.
grib_message = gribapi.grib_new_from_samples('GRIB2')
gribapi.grib_set_long(grib_message, 'productDefinitionTemplateNumber',
9)
gribapi.grib_set_string(grib_message, 'stepRange', '10-55')
grib_wrapper = iris.fileformats.grib.GribWrapper(grib_message)
# Define two expected datetimes for _periodEndDateTime as
# gribapi v1.9.16 mis-calculates this.
# See https://software.ecmwf.int/wiki/display/GRIB/\
# GRIB+API+version+1.9.18+released
try:
# gribapi v1.9.16 has no __version__ attribute.
gribapi_ver = gribapi.__version__
except AttributeError:
gribapi_ver = gribapi.grib_get_api_version()
if StrictVersion(gribapi_ver) < StrictVersion('1.9.18'):
exp_end_date = datetime.datetime(year=2007, month=3, day=25,
hour=12, minute=0, second=0)
else:
exp_end_date = datetime.datetime(year=2007, month=3, day=25,
hour=19, minute=0, second=0)
# Check that it captures the statistics time period info.
# (And for now, nothing else)
self.assertEqual(
grib_wrapper._referenceDateTime,
datetime.datetime(year=2007, month=3, day=23,
hour=12, minute=0, second=0)
)
self.assertEqual(
grib_wrapper._periodStartDateTime,
datetime.datetime(year=2007, month=3, day=23,
hour=22, minute=0, second=0)
)
self.assertEqual(grib_wrapper._periodEndDateTime, exp_end_date)
def test_warn_unknown_pdts(self):
# Test loading of an unrecognised GRIB Product Definition Template.
# Get a temporary file by name (deleted afterward by context).
with self.temp_filename() as temp_gribfile_path:
# Write a test grib message to the temporary file.
with open(temp_gribfile_path, 'wb') as temp_gribfile:
grib_message = gribapi.grib_new_from_samples('GRIB2')
# Set the PDT to something unexpected.
gribapi.grib_set_long(
grib_message, 'productDefinitionTemplateNumber', 5)
gribapi.grib_write(grib_message, temp_gribfile)
# Load the message from the file as a cube.
cube_generator = iris.fileformats.grib.load_cubes(
temp_gribfile_path)
cube = next(cube_generator)
# Check the cube has an extra "warning" attribute.
self.assertEqual(
cube.attributes['GRIB_LOAD_WARNING'],
'unsupported GRIB2 ProductDefinitionTemplate: #4.5'
)
@tests.skip_grib
class TestGribSimple(tests.IrisTest):
# A testing class that does not need the test data.
def mock_grib(self):
# A mock grib message, with attributes that can't be Mocks themselves.
grib = mock.Mock()
grib.startStep = 0
grib.phenomenon_points = lambda unit: 3
grib._forecastTimeUnit = "hours"
grib.productDefinitionTemplateNumber = 0
# define a level type (NB these 2 are effectively the same)
grib.levelType = 1
grib.typeOfFirstFixedSurface = 1
grib.typeOfSecondFixedSurface = 1
return grib
def cube_from_message(self, grib):
# Parameter translation now uses the GribWrapper, so we must convert
# the Mock-based fake message to a FakeGribMessage.
with mock.patch('iris.fileformats.grib.gribapi', _mock_gribapi):
grib_message = FakeGribMessage(**grib.__dict__)
wrapped_msg = iris.fileformats.grib.GribWrapper(grib_message)
cube, _, _ = iris.fileformats.rules._make_cube(
wrapped_msg, iris.fileformats.grib.load_rules.convert)
return cube
@tests.skip_grib
class TestGrib1LoadPhenomenon(TestGribSimple):
# Test recognition of grib phenomenon types.
def mock_grib(self):
grib = super(TestGrib1LoadPhenomenon, self).mock_grib()
grib.edition = 1
return grib
def test_grib1_unknownparam(self):
grib = self.mock_grib()
grib.table2Version = 0
grib.indicatorOfParameter = 9999
cube = self.cube_from_message(grib)
self.assertEqual(cube.standard_name, None)
self.assertEqual(cube.long_name, None)
self.assertEqual(cube.units, cf_units.Unit("???"))
def test_grib1_unknown_local_param(self):
grib = self.mock_grib()
grib.table2Version = 128
grib.indicatorOfParameter = 999
cube = self.cube_from_message(grib)
self.assertEqual(cube.standard_name, None)
self.assertEqual(cube.long_name, 'UNKNOWN LOCAL PARAM 999.128')
self.assertEqual(cube.units, cf_units.Unit("???"))
def test_grib1_unknown_standard_param(self):
grib = self.mock_grib()
grib.table2Version = 1
grib.indicatorOfParameter = 975
cube = self.cube_from_message(grib)
self.assertEqual(cube.standard_name, None)
self.assertEqual(cube.long_name, 'UNKNOWN LOCAL PARAM 975.1')
self.assertEqual(cube.units, cf_units.Unit("???"))
def known_grib1(self, param, standard_str, units_str):
grib = self.mock_grib()
grib.table2Version = 1
grib.indicatorOfParameter = param
cube = self.cube_from_message(grib)
self.assertEqual(cube.standard_name, standard_str)
self.assertEqual(cube.long_name, None)
self.assertEqual(cube.units, cf_units.Unit(units_str))
def test_grib1_known_standard_params(self):
# at present, there are just a very few of these
self.known_grib1(11, 'air_temperature', 'kelvin')
self.known_grib1(33, 'x_wind', 'm s-1')
self.known_grib1(34, 'y_wind', 'm s-1')
@tests.skip_grib
class TestGrib2LoadPhenomenon(TestGribSimple):
# Test recognition of grib phenomenon types.
def mock_grib(self):
grib = super(TestGrib2LoadPhenomenon, self).mock_grib()
grib.edition = 2
grib._forecastTimeUnit = 'hours'
grib._forecastTime = 0.0
grib.phenomenon_points = lambda unit: [0.0]
return grib
def known_grib2(self, discipline, category, param,
standard_name, long_name, units_str):
grib = self.mock_grib()
grib.discipline = discipline
grib.parameterCategory = category
grib.parameterNumber = param
cube = self.cube_from_message(grib)
try:
_cf_units = cf_units.Unit(units_str)
except ValueError:
_cf_units = cf_units.Unit('???')
self.assertEqual(cube.standard_name, standard_name)
self.assertEqual(cube.long_name, long_name)
self.assertEqual(cube.units, _cf_units)
def test_grib2_unknownparam(self):
grib = self.mock_grib()
grib.discipline = 999
grib.parameterCategory = 999
grib.parameterNumber = 9999
cube = self.cube_from_message(grib)
self.assertEqual(cube.standard_name, None)
self.assertEqual(cube.long_name, None)
self.assertEqual(cube.units, cf_units.Unit("???"))
def test_grib2_known_standard_params(self):
# check we know how to translate at least these params
# I.E. all the ones the older scheme provided.
full_set = [
(0, 0, 0, "air_temperature", None, "kelvin"),
(0, 0, 2, "air_potential_temperature", None, "K"),
(0, 1, 0, "specific_humidity", None, "kg kg-1"),
(0, 1, 1, "relative_humidity", None, "%"),
(0, 1, 3, None, "precipitable_water", "kg m-2"),
(0, 1, 22, None, "cloud_mixing_ratio", "kg kg-1"),
(0, 1, 13, "liquid_water_content_of_surface_snow", None, "kg m-2"),
(0, 2, 1, "wind_speed", None, "m s-1"),
(0, 2, 2, "x_wind", None, "m s-1"),
(0, 2, 3, "y_wind", None, "m s-1"),
(0, 2, 8, "lagrangian_tendency_of_air_pressure", None, "Pa s-1"),
(0, 2, 10, "atmosphere_absolute_vorticity", None, "s-1"),
(0, 3, 0, "air_pressure", None, "Pa"),
(0, 3, 1, "air_pressure_at_sea_level", None, "Pa"),
(0, 3, 3, None, "icao_standard_atmosphere_reference_height", "m"),
(0, 3, 5, "geopotential_height", None, "m"),
(0, 3, 9, "geopotential_height_anomaly", None, "m"),
(0, 6, 1, "cloud_area_fraction", None, "%"),
(0, 6, 6, "atmosphere_mass_content_of_cloud_liquid_water", None,
"kg m-2"),
(0, 7, 6,
"atmosphere_specific_convective_available_potential_energy",
None, "J kg-1"),
(0, 7, 7, None, "convective_inhibition", "J kg-1"),
(0, 7, 8, None, "storm_relative_helicity", "J kg-1"),
(0, 14, 0, "atmosphere_mole_content_of_ozone", None, "Dobson"),
(2, 0, 0, "land_area_fraction", None, "1"),
(10, 2, 0, "sea_ice_area_fraction", None, "1")]
for (discipline, category, number,
standard_name, long_name, units) in full_set:
self.known_grib2(discipline, category, number,
standard_name, long_name, units)
if __name__ == "__main__":
tests.main()
|
lgpl-3.0
|
jaeilepp/mne-python
|
examples/realtime/plot_compute_rt_average.py
|
7
|
1912
|
"""
========================================================
Compute real-time evoked responses using moving averages
========================================================
This example demonstrates how to connect to an MNE Real-time server
using the RtClient and use it together with RtEpochs to compute
evoked responses using moving averages.
Note: The MNE Real-time server (mne_rt_server), which is part of mne-cpp,
has to be running on the same computer.
"""
# Authors: Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Mainak Jas <mainak@neuro.hut.fi>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.realtime import RtEpochs, MockRtClient
print(__doc__)
# Fiff file to simulate the realtime client
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = mne.io.read_raw_fif(raw_fname, preload=True)
# select gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=True, exclude=raw.info['bads'])
# select the left-auditory condition
event_id, tmin, tmax = 1, -0.2, 0.5
# create the mock-client object
rt_client = MockRtClient(raw)
# create the real-time epochs object
rt_epochs = RtEpochs(rt_client, event_id, tmin, tmax, picks=picks,
decim=1, reject=dict(grad=4000e-13, eog=150e-6))
# start the acquisition
rt_epochs.start()
# send raw buffers
rt_client.send_data(rt_epochs, picks, tmin=0, tmax=150, buffer_size=1000)
for ii, ev in enumerate(rt_epochs.iter_evoked()):
print("Just got epoch %d" % (ii + 1))
ev.pick_types(meg=True, eog=False) # leave out the eog channel
if ii == 0:
evoked = ev
else:
evoked = mne.combine_evoked([evoked, ev], weights='nave')
plt.clf() # clear canvas
evoked.plot(axes=plt.gca()) # plot on current figure
plt.pause(0.05)
|
bsd-3-clause
|
notsambeck/siftsite
|
siftsite/sift/sift_app.py
|
1
|
8941
|
# GPU info from desktop
# Hardware Class: graphics card
# Model: "nVidia GF119 [GeForce GT 620 OEM]"
# Vendor: pci 0x10de "nVidia Corporation"
# Device: pci 0x1049 "GF119 [GeForce GT 620 OEM]"
# SubVendor: pci 0x10de "nVidia Corporation"
import numpy as np
import os
import datetime
import time
import pickle
# dataset is a sift module that imports CIFAR and provides
# image transform functions and access to saved datasets/etc.
import dataset
import sift_keras
from sift_keras import model
twitter_mode = True
if twitter_mode:
from google.cloud import vision
vision_client = vision.Client()
import tweepy
from secret import consumerSecret, consumerKey
from secret import accessToken, accessTokenSecret
# secret.py is in .gitignore, stores twitter login keys as str
auth = tweepy.OAuthHandler(consumerKey, consumerSecret)
auth.set_access_token(accessToken, accessTokenSecret)
try:
api = tweepy.API(auth)
print('twitter connected')
# print(api.me())
except:
print('twitter connect failed')
twitter_mode = False
# optional functions for network visualization, debug
'''
import import_batch
import matplotlib.pyplot as plt
'''
# do you want to train the network? load a dataset:
# import pickle
# x, xt, y, yt = dataset.loadDataset('data/full_cifar_plus_161026.pkl')
model.load_weights(sift_keras.savefile)
batch_size = 1000 # over 2000 kills desktop
scale = 127.5 # scale factor for +/- 1
bad_wd = ['computer wallpaper',
'pattern',
'texture',
'font',
'text',
'line',
'atmosphere',
'close up',
'closeup',
'atmosphere of earth',
'grass family',
'black',
'blue',
'purple',
'green',
'material',
'phenomenon',
'grass']
boring = ['#green', '#blue', '#black', '#grass',
'#purple', '#pink', '#light', '#sky',
'#white', '#phenomenon', '#tree', '#water',
'#plant', '#tree', '#macrophotography',
'#cloud', '#plantstem', '#leaf', '#skin',
'#flora', '#photography', '#mouth']
bonus = ['#art', '#contemporaryart', '#painting', '#notart',
'#abstract', '#abstractart', '#contemporaryphotography',
'#conceptualartist', '#snapchat', '#sift']
def image_generator(increment, counter):
to_net = np.empty((batch_size, 32, 32, 3), 'float32')
for i in range(batch_size):
tr = dataset.get_transform(counter)
to_net[i] = dataset.idct(tr) # ycc format
counter = np.mod(np.add(counter, increment), dataset.quantization)
to_net = np.divide(np.subtract(to_net, scale), scale)
# print('batch stats: max={}, min={}'.format(to_net.max(), to_net.min()))
return to_net, counter
def Sift(increment=11999, restart=False):
'''non-visualized Sift program. Runs omega images then stops, counting by
increment. Net checks them, candidates are saved to a folder named
found_images/ -- today's date & increment -- / ####.png '''
last = 0
if not restart:
print('Loading saved state...')
try:
f = open('save.file', 'rb')
counter = pickle.load(f)
images_found = pickle.load(f)
processed = pickle.load(f)
tweeted = pickle.load(f)
print('{} images found of {} processed; tweeted {}.'
.format(images_found, processed*batch_size, tweeted))
f.close()
except FileNotFoundError:
print('save.file does not exist. RESTARTING')
counter = np.zeros((32, 32, 3), dtype='float32')
images_found = 0
processed = 0
tweeted = 0
else:
print('Warning: Restarting, will save over progress')
counter = np.zeros((32, 32, 3), dtype='float32')
images_found = 0
processed = 0
tweeted = 0
# make dir found_images
if not os.path.exists('found_images'):
os.makedirs('found_images')
directory = "".join(['found_images/', str(datetime.date.today()),
'_increment-', str(increment)])
if not os.path.exists(directory):
os.makedirs(directory)
print('saving to', directory)
# MAIN LOOP
# for rep in range(1):
while True:
if processed % 10 == 0:
print('processed {} batches of {}'.format(processed, batch_size))
processed += 1
data, counter = image_generator(increment, counter)
ps = model.predict_on_batch(data)
for i in range(batch_size):
if ps[i, 1] > ps[i, 0]:
images_found += 1
now = time.time()
print('Image found: no.', images_found, ' at ', now)
# s = Image.fromarray(dataset.orderPIL(images[im]))
s = dataset.net2pil(data[i])
f = ''.join([str(images_found), '_', str(ps[i, 1]), '.png'])
s.save(os.path.join(directory, f))
if now - last > 30: # only tweet after > 30 seconds
# s.resize((512, 512)).save('twitter.png')
arr = dataset.make_arr(s)
x = dataset.expand(arr)
xim = dataset.make_pil(x, input_format='RGB',
output_format='RGB')
xim.resize((512, 512)).save('twitter.png')
# twitter module
if twitter_mode:
with open(os.path.join(directory, f), 'rb') as tw:
content = tw.read()
try:
goog = vision_client.image(content=content)
labels = goog.detect_labels()
labels = [label for label in labels
if label.description not in bad_wd]
# num = labels[0].score
# word = labels[0].description
# print(word, num)
ds = ['#'+label.description.replace(' ', '')
for label in labels]
except:
print('Google api failed, not tweeting')
continue # or tweet without this continue
tweet = '''#DEFINITELY #FOUND #AN #IMAGE.
#painting #notapainting #art #notart'''
# skip boring images
if all([d in boring for d in ds]) or \
(ds[0] in boring and labels[0].score < .98):
print('boring image, not tweeting it.')
print('_'.join(ds))
continue
# different kinds of tweets
bot = i % 100
if bot <= 5:
ds.append('@pixelsorter')
elif 5 < bot <= 10:
ds.append('@WordPadBot')
elif bot == 99:
# spam mode
my_fs = api.followers()
u = my_fs[np.random.randint(0, len(my_fs))]
u_fs = api.followers(u.screen_name)
usr = u_fs[np.random.randint(0, len(u_fs))]
at = usr.screen_name
ds = ['@{} IS THIS YOUR IMAGE?'
.format(at)] + ds
else:
for _ in range(3):
r = np.random.randint(0, len(bonus))
ds.append(bonus[r])
# make tweet, cap length
tweet = '''IMAGE FOUND. #{}
{}'''.format(str(images_found), ' '.join(ds))
if len(tweet) > 130:
tweet = tweet[:110]
try:
print('tweeting:', tweet)
api.update_with_media('twitter.png', tweet)
last = now
tweeted += 1
except:
print('Tweet failed')
# save progress
if processed % 100 == 0:
print('saving progress to save.file')
f = open('save.file', 'wb')
pickle.dump(counter, f)
pickle.dump(images_found, f)
pickle.dump(processed, f)
pickle.dump(tweeted, f)
f.close()
if __name__ == '__main__':
print()
print('SIFTnonvisual loaded. Twitter={}. For visuals, run sift.py.'
.format(twitter_mode))
print()
Sift()
|
mit
|
ioam/lancet
|
lancet/core.py
|
1
|
39453
|
#
# Lancet core
#
import os, itertools, copy
import re, glob, string
import json
import param
try:
import numpy as np
np_ftypes = np.sctypes['float']
except:
np, np_ftypes = None, []
try: from pandas import DataFrame
except: DataFrame = None # pyflakes:ignore (try/except import)
try: from holoviews import Table
except: Table = None # pyflakes:ignore (try/except import)
from collections import defaultdict, OrderedDict
float_types = [float] + np_ftypes
def identityfn(x): return x
def fp_repr(x): return str(x) if (type(x) in float_types) else repr(x)
def set_fp_precision(value):
"""
Function to set the floating precision across lancet.
"""
Arguments.set_default('fp_precision', value)
def to_table(args, vdims=[]):
"Helper function to convet an Args object to a HoloViews Table"
if not Table:
return "HoloViews Table not available"
kdims = [dim for dim in args.constant_keys + args.varying_keys
if dim not in vdims]
items = [tuple([spec[k] for k in kdims+vdims])
for spec in args.specs]
return Table(items, kdims=kdims, vdims=vdims)
#=====================#
# Argument Specifiers #
#=====================#
class PrettyPrinted(object):
"""
A mixin class for generating pretty-printed representations.
"""
def pprint_args(self, pos_args, keyword_args, infix_operator=None, extra_params={}):
"""
Method to define the positional arguments and keyword order
for pretty printing.
"""
if infix_operator and not (len(pos_args)==2 and keyword_args==[]):
raise Exception('Infix format requires exactly two'
' positional arguments and no keywords')
(kwargs,_,_,_) = self._pprint_args
self._pprint_args = (keyword_args + kwargs, pos_args, infix_operator, extra_params)
def _pprint(self, cycle=False, flat=False, annotate=False, onlychanged=True, level=1, tab = ' '):
"""
Pretty printer that prints only the modified keywords and
generates flat representations (for repr) and optionally
annotates the top of the repr with a comment.
"""
(kwargs, pos_args, infix_operator, extra_params) = self._pprint_args
(br, indent) = ('' if flat else '\n', '' if flat else tab * level)
prettify = lambda x: isinstance(x, PrettyPrinted) and not flat
pretty = lambda x: x._pprint(flat=flat, level=level+1) if prettify(x) else repr(x)
params = dict(self.get_param_values())
show_lexsort = getattr(self, '_lexorder', None) is not None
modified = [k for (k,v) in self.get_param_values(onlychanged=onlychanged)]
pkwargs = [(k, params[k]) for k in kwargs if (k in modified)] + list(extra_params.items())
arg_list = [(k,params[k]) for k in pos_args] + pkwargs
lines = []
if annotate: # Optional annotating comment
len_ckeys, len_vkeys = len(self.constant_keys), len(self.varying_keys)
info_triple = (len(self),
', %d constant key(s)' % len_ckeys if len_ckeys else '',
', %d varying key(s)' % len_vkeys if len_vkeys else '')
annotation = '# == %d items%s%s ==\n' % info_triple
lines = [annotation]
if show_lexsort: lines.append('(')
if cycle:
lines.append('%s(...)' % self.__class__.__name__)
elif infix_operator:
level = level - 1
triple = (pretty(params[pos_args[0]]), infix_operator, pretty(params[pos_args[1]]))
lines.append('%s %s %s' % triple)
else:
lines.append('%s(' % self.__class__.__name__)
for (k,v) in arg_list:
lines.append('%s%s=%s' % (br+indent, k, pretty(v)))
lines.append(',')
lines = lines[:-1] +[br+(tab*(level-1))+')'] # Remove trailing comma
if show_lexsort:
lines.append(').lexsort(%s)' % ', '.join(repr(el) for el in self._lexorder))
return ''.join(lines)
def __repr__(self):
return self._pprint(flat=True, onlychanged=False)
def __str__(self):
return self._pprint()
class Arguments(PrettyPrinted, param.Parameterized):
"""
The abstract, base class that defines the core interface and
methods for all members of the Arguments family of classes,
including either the simple, static members of Args below, or the
sophisticated parameter exploration algorithms subclassing from
DynamicArgs defined in dynamic.py.
The Args subclass may be used directly and forms the root of one
family of classes that have statically defined or precomputed
argument sets (defined below). The second subfamily are the
DynamicArgs, designed to allow more sophisticated, online
parameter space exploration techniques such as hill climbing,
bisection search, genetic algorithms and so on.
"""
fp_precision = param.Integer(default=4, constant=True, doc='''
The floating point precision to use for floating point values.
Unlike other basic Python types, floats need care with their
representation as you only want to display up to the precision
actually specified. A floating point precision of 0 casts
number to integers before representing them.''')
def __init__(self, **params):
self._pprint_args = ([],[],None,{})
self.pprint_args([],['fp_precision', 'dynamic'])
super(Arguments,self).__init__(**params)
# Some types cannot be sorted easily (e.g. numpy arrays)
self.unsortable_keys = []
def __iter__(self): return self
def __contains__(self, value):
return value in (self.constant_keys + self.varying_keys)
@classmethod
def spec_formatter(cls, spec):
" Formats the elements of an argument set appropriately"
return type(spec)((k, str(v)) for (k,v) in spec.items())
@property
def constant_keys(self):
"""
Returns the list of parameter names whose values are constant
as the argument specifier is iterated. Note that the union of
constant and varying_keys should partition the entire set of
keys in the case where there are no unsortable keys.
"""
raise NotImplementedError
@property
def constant_items(self):
"""
Returns the set of constant items as a list of tuples. This
allows easy conversion to dictionary format. Note, the items
should be supplied in the same key ordering as for
constant_keys for consistency.
"""
raise NotImplementedError
@property
def varying_keys(self):
"""
Returns the list of parameters whose values vary as the
argument specifier is iterated. Whenever it is possible, keys
should be sorted from those slowest to faster varying and
sorted alphanumerically within groups that vary at the same
rate.
"""
raise NotImplementedError
def round_floats(self, specs, fp_precision):
_round_float = lambda v, fp: np.round(v, fp) if (type(v) in np_ftypes) else round(v, fp)
_round = (lambda v, fp: int(v)) if fp_precision==0 else _round_float
return (dict((k, _round(v, fp_precision) if (type(v) in float_types) else v)
for (k,v) in spec.items()) for spec in specs)
def __next__(self):
"""
Called to get a list of specifications: dictionaries with
parameter name keys and string values.
"""
raise StopIteration
next = __next__
def copy(self):
"""
Convenience method to avoid using the specifier without
exhausting it.
"""
return copy.copy(self)
def _collect_by_key(self,specs):
"""
Returns a dictionary like object with the lists of values
collapsed by their respective key. Useful to find varying vs
constant keys and to find how fast keys vary.
"""
# Collect (key, value) tuples as list of lists, flatten with chain
allkeys = itertools.chain.from_iterable(
[[(k, run[k]) for k in run] for run in specs])
collection = defaultdict(list)
for (k,v) in allkeys: collection[k].append(v)
return collection
def _operator(self, operator, other):
identities = [isinstance(el, Identity) for el in [self, other]]
if not any(identities): return operator(self,other)
if all(identities): return Identity()
elif identities[1]: return self
else: return other
def __add__(self, other):
"""
Concatenates two argument specifiers.
"""
return self._operator(Concatenate, other)
def __mul__(self, other):
"""
Takes the Cartesian product of two argument specifiers.
"""
return self._operator(CartesianProduct, other)
def _cartesian_product(self, first_specs, second_specs):
"""
Takes the Cartesian product of the specifications. Result will
contain N specifications where N = len(first_specs) *
len(second_specs) and keys are merged.
Example: [{'a':1},{'b':2}] * [{'c':3},{'d':4}] =
[{'a':1,'c':3},{'a':1,'d':4},{'b':2,'c':3},{'b':2,'d':4}]
"""
return [ dict(zip(
list(s1.keys()) + list(s2.keys()),
list(s1.values()) + list(s2.values())
))
for s1 in first_specs for s2 in second_specs ]
def summary(self):
"""
A succinct summary of the argument specifier. Unlike the repr,
a summary does not have to be complete but must supply the
most relevant information about the object to the user.
"""
print("Items: %s" % len(self))
varying_keys = ', '.join('%r' % k for k in self.varying_keys)
print("Varying Keys: %s" % varying_keys)
items = ', '.join(['%s=%r' % (k,v)
for (k,v) in self.constant_items])
if self.constant_items:
print("Constant Items: %s" % items)
class Identity(Arguments):
"""
The identity element for any Arguments object 'args' under the *
operator (CartesianProduct) and + operator (Concatenate). The
following identities hold:
args is (Identity() * args)
args is (args * Identity())
args is (Identity() + args)
args is (args + Identity())
Note that the empty Args() object can also fulfill the role of
Identity under the addition operator.
"""
fp_precision = param.Integer(default=None, allow_None=True,
precedence=(-1), constant=True, doc='''
fp_precision is disabled as Identity() never contains any
arguments.''')
def __eq__(self, other): return isinstance(other, Identity)
def __repr__(self): return "Identity()"
def __str__(self): return repr(self)
def __nonzero__(self): raise ValueError("The boolean value of Identity is undefined")
def __bool__(self): raise ValueError("The boolean value of Identity is undefined")
class Args(Arguments):
"""
An Arguments class that supports statically specified or
precomputed argument sets. It may be used directly to specify
argument values but also forms the base class for a family of more
specific static Argument classes. Each subclass is less flexible
and general but allows arguments to be easily and succinctly
specified. For instance, the Range subclass allows parameter
ranges to be easily declared.
The constructor of Args accepts argument definitions in two
different formats. The keyword format allows constant arguments to
be specified directly and easily. For instance:
>>> v1 = Args(a=2, b=3)
>>> v1
Args(fp_precision=4,a=2,b=3)
The alternative input format takes an explicit list of the
argument specifications:
>>> v2 = Args([{'a':2, 'b':3}]) # Equivalent behaviour to above
>>> v1.specs == v2.specs
True
This latter format is completely flexible and general, allowing
any arbitrary list of arguments to be specified as desired. This
is not generally recommended however as the structure of a
parameter space is often expressed more clearly by composing
together simpler, more succinct Args objects with the
CartesianProduct (*) or Concatenation (+) operators.
"""
specs = param.List(default=[], constant=True, doc='''
The static list of specifications (ie. dictionaries) to be
returned by the specifier. Float values are rounded according
to fp_precision.''')
def __init__(self, specs=None, fp_precision=None, **params):
if fp_precision is None: fp_precision = Arguments.fp_precision
raw_specs, params, explicit = self._build_specs(specs, params, fp_precision)
super(Args, self).__init__(fp_precision=fp_precision, specs=raw_specs, **params)
self._lexorder = None
if explicit:
self.pprint_args(['specs'],[])
else: # Present in kwarg format
self.pprint_args([], self.constant_keys, None,
OrderedDict(sorted(self.constant_items)))
def _build_specs(self, specs, kwargs, fp_precision):
"""
Returns the specs, the remaining kwargs and whether or not the
constructor was called with kwarg or explicit specs.
"""
if specs is None:
overrides = param.ParamOverrides(self, kwargs,
allow_extra_keywords=True)
extra_kwargs = overrides.extra_keywords()
kwargs = dict([(k,v) for (k,v) in kwargs.items()
if k not in extra_kwargs])
rounded_specs = list(self.round_floats([extra_kwargs],
fp_precision))
if extra_kwargs=={}: return [], kwargs, True
else: return rounded_specs, kwargs, False
return list(self.round_floats(specs, fp_precision)), kwargs, True
def __iter__(self):
self._exhausted = False
return self
def __next__(self):
if self._exhausted:
raise StopIteration
else:
self._exhausted=True
return self.specs
next = __next__
def _unique(self, sequence, idfun=repr):
"""
Note: repr() must be implemented properly on all objects. This
is implicitly assumed by Lancet when Python objects need to be
formatted to string representation.
"""
seen = {}
return [seen.setdefault(idfun(e),e) for e in sequence
if idfun(e) not in seen]
def show(self, exclude=[]):
"""
Convenience method to inspect the available argument values in
human-readable format. The ordering of keys is determined by
how quickly they vary.
The exclude list allows specific keys to be excluded for
readability (e.g. to hide long, absolute filenames).
"""
ordering = self.constant_keys + self.varying_keys
spec_lines = [', '.join(['%s=%s' % (k, s[k]) for k in ordering
if (k in s) and (k not in exclude)])
for s in self.specs]
print('\n'.join(['%d: %s' % (i,l) for (i,l) in enumerate(spec_lines)]))
def lexsort(self, *order):
"""
The lexical sort order is specified by a list of string
arguments. Each string is a key name prefixed by '+' or '-'
for ascending and descending sort respectively. If the key is
not found in the operand's set of varying keys, it is ignored.
"""
if order == []:
raise Exception("Please specify the keys for sorting, use"
"'+' prefix for ascending,"
"'-' for descending.)")
if not set(el[1:] for el in order).issubset(set(self.varying_keys)):
raise Exception("Key(s) specified not in the set of varying keys.")
sorted_args = copy.deepcopy(self)
specs_param = sorted_args.params('specs')
specs_param.constant = False
sorted_args.specs = self._lexsorted_specs(order)
specs_param.constant = True
sorted_args._lexorder = order
return sorted_args
def _lexsorted_specs(self, order):
"""
A lexsort is specified using normal key string prefixed by '+'
(for ascending) or '-' for (for descending).
Note that in Python 2, if a key is missing, None is returned
(smallest Python value). In Python 3, an Exception will be
raised regarding comparison of heterogenous types.
"""
specs = self.specs[:]
if not all(el[0] in ['+', '-'] for el in order):
raise Exception("Please specify the keys for sorting, use"
"'+' prefix for ascending,"
"'-' for descending.)")
sort_cycles = [(el[1:], True if el[0]=='+' else False)
for el in reversed(order)
if el[1:] in self.varying_keys]
for (key, ascending) in sort_cycles:
specs = sorted(specs, key=lambda s: s.get(key, None),
reverse=(not ascending))
return specs
@property
def constant_keys(self):
collection = self._collect_by_key(self.specs)
return [k for k in sorted(collection) if
(len(self._unique(collection[k])) == 1)]
@property
def constant_items(self):
collection = self._collect_by_key(self.specs)
return [(k,collection[k][0]) for k in self.constant_keys]
@property
def varying_keys(self):
collection = self._collect_by_key(self.specs)
constant_set = set(self.constant_keys)
unordered_varying = set(collection.keys()).difference(constant_set)
# Finding out how fast keys are varying
grouplens = [(len([len(list(y)) for (_,y)
in itertools.groupby(collection[k])]),k)
for k in collection
if (k not in self.unsortable_keys)]
varying_counts = [(n,k) for (n,k) in sorted(grouplens) if (k in unordered_varying)]
# Grouping keys with common frequency alphanumerically (desired behaviour).
ddict = defaultdict(list)
for (n,k) in varying_counts: ddict[n].append(k)
alphagroups = [sorted(ddict[k]) for k in sorted(ddict)]
return [el for group in alphagroups for el in group] + sorted(self.unsortable_keys)
@property
def dframe(self):
return DataFrame(self.specs) if DataFrame else "Pandas not available"
@property
def table(self):
return to_table(self)
def __len__(self): return len(self.specs)
class Concatenate(Args):
"""
Concatenate is the sequential composition of two specifiers. The
specifier created by the compositon (firsts + second) generates
the arguments in first followed by the arguments in second.
"""
first = param.ClassSelector(default=None, class_=Args, allow_None=True, constant=True, doc='''
The first specifier in the concatenation.''')
second = param.ClassSelector(default=None, class_=Args, allow_None=True, constant=True, doc='''
The second specifier in the concatenation.''')
def __init__(self, first, second):
max_precision = max(first.fp_precision, second.fp_precision)
specs = first.specs + second.specs
super(Concatenate, self).__init__(specs, fp_precision=max_precision,
first=first, second=second)
self.pprint_args(['first', 'second'],[], infix_operator='+')
class CartesianProduct(Args):
"""
CartesianProduct is the Cartesian product of two specifiers. The
specifier created by the compositon (firsts * second) generates
the cartesian produce of the arguments in first followed by the
arguments in second. Note that len(first * second) =
len(first)*len(second)
"""
first = param.ClassSelector(default=None, class_=Args, allow_None=True,
constant=True, doc='''The first specifier in the Cartesian product.''')
second = param.ClassSelector(default=None, class_=Args, allow_None=True,
constant=True, doc='''The second specifier in the Cartesian product.''')
def __init__(self, first, second):
max_precision = max(first.fp_precision, second.fp_precision)
specs = self._cartesian_product(first.specs, second.specs)
overlap = (set(first.varying_keys + first.constant_keys)
& set(second.varying_keys + second.constant_keys))
assert overlap == set(), ('Sets of keys cannot overlap'
'between argument specifiers'
'in cartesian product.')
super(CartesianProduct, self).__init__(specs, fp_precision=max_precision,
first=first, second=second)
self.pprint_args(['first', 'second'],[], infix_operator='*')
class Range(Args):
"""
Range generates an argument from a numerically interpolated range
which is linear by default. An optional function can be specified
to sample a numeric range with regular intervals.
"""
key = param.String(default='', constant=True, doc='''
The key assigned to the values computed over the numeric range.''')
start_value = param.Number(default=None, allow_None=True, constant=True, doc='''
The starting numeric value of the range.''')
end_value = param.Number(default=None, allow_None=True, constant=True, doc='''
The ending numeric value of the range (inclusive).''')
steps = param.Integer(default=2, constant=True, bounds=(1,None), doc='''
The number of steps to interpolate over. Default is 2 which
returns the start and end values without interpolation.''')
# Can't this be a lambda?
mapfn = param.Callable(default=identityfn, constant=True, doc='''
The function to be mapped across the linear range. The
identity function is used by by default''')
def __init__(self, key, start_value, end_value, steps=2, mapfn=identityfn, **params):
values = self.linspace(start_value, end_value, steps)
specs = [{key:mapfn(val)} for val in values ]
super(Range, self).__init__(specs, key=key, start_value=start_value,
end_value=end_value, steps=steps,
mapfn=mapfn, **params)
self.pprint_args(['key', 'start_value'], ['end_value', 'steps'])
def linspace(self, start, stop, n):
""" Simple replacement for numpy linspace"""
if n == 1: return [start]
L = [0.0] * n
nm1 = n - 1
nm1inv = 1.0 / nm1
for i in range(n):
L[i] = nm1inv * (start*(nm1 - i) + stop*i)
return L
class List(Args):
"""
An argument specifier that takes its values from a given list.
"""
values = param.List(default=[], constant=True, doc='''
The list values that are to be returned by the specifier''')
key = param.String(default='default', constant=True, doc='''
The key assigned to the elements of the supplied list.''')
def __init__(self, key, values, **params):
specs = [{key:val} for val in values]
super(List, self).__init__(specs, key=key, values=values, **params)
self.pprint_args(['key', 'values'], [])
class Log(Args):
"""
Specifier that loads arguments from a log file in task id (tid)
order. This wrapper class allows a concise representation of file
logs with the option of adding the task id to the loaded
specifications.
For full control over the arguments, you can use this class to
create a fully specified Args object as follows:
Args(Log.extract_log(<log_file>).values()),
"""
log_path = param.String(default=None, allow_None=True, constant=True, doc='''
The relative or absolute path to the log file. If a relative
path is given, the absolute path is computed relative to
os.getcwd().''')
tid_key = param.String(default='tid', constant=True, allow_None=True, doc='''
If not None, the key given to the tid values included in the
loaded specifications. If None, the tid number is ignored.''')
@staticmethod
def extract_log(log_path, dict_type=dict):
"""
Parses the log file generated by a launcher and returns
dictionary with tid keys and specification values.
Ordering can be maintained by setting dict_type to the
appropriate constructor (i.e. OrderedDict). Keys are converted
from unicode to strings for kwarg use.
"""
log_path = (log_path if os.path.isfile(log_path)
else os.path.join(os.getcwd(), log_path))
with open(log_path,'r') as log:
splits = (line.split() for line in log)
uzipped = ((int(split[0]), json.loads(" ".join(split[1:]))) for split in splits)
szipped = [(i, dict((str(k),v) for (k,v) in d.items())) for (i,d) in uzipped]
return dict_type(szipped)
@staticmethod
def write_log(log_path, data, allow_append=True):
"""
Writes the supplied specifications to the log path. The data
may be supplied as either as a an Args or as a list of
dictionaries.
By default, specifications will be appropriately appended to
an existing log file. This can be disabled by setting
allow_append to False.
"""
append = os.path.isfile(log_path)
islist = isinstance(data, list)
if append and not allow_append:
raise Exception('Appending has been disabled'
' and file %s exists' % log_path)
if not (islist or isinstance(data, Args)):
raise Exception('Can only write Args objects or dictionary'
' lists to log file.')
specs = data if islist else data.specs
if not all(isinstance(el,dict) for el in specs):
raise Exception('List elements must be dictionaries.')
log_file = open(log_path, 'r+') if append else open(log_path, 'w')
start = int(log_file.readlines()[-1].split()[0])+1 if append else 0
ascending_indices = range(start, start+len(data))
log_str = '\n'.join(['%d %s' % (tid, json.dumps(el))
for (tid, el) in zip(ascending_indices,specs)])
log_file.write("\n"+log_str if append else log_str)
log_file.close()
def __init__(self, log_path, tid_key='tid', **params):
log_items = sorted(Log.extract_log(log_path).items())
if tid_key is None:
log_specs = [spec for (_, spec) in log_items]
else:
log_specs = [dict(list(spec.items())+[(tid_key,idx)])
for (idx, spec) in log_items]
super(Log, self).__init__(log_specs,
log_path=log_path,
tid_key=tid_key,
**params)
self.pprint_args(['log_path'], ['tid_key'])
class FilePattern(Args):
"""
A FilePattern specifier allows files to be matched and information
encoded in filenames to be extracted via an extended form of
globbing. This object may be used to specify filename arguments to
CommandTemplates when launching jobs but it also very useful for
collating files for analysis.
For instance, you can find the absolute filenames of all npz files
in a 'data' subdirectory (relative to the root) that start with
'timeseries' using the pattern 'data/timeseries*.npz'.
In addition to globbing supported by the glob module, patterns can
extract metadata encoded in filenames using a subset of the Python
format specification syntax. To illustrate, you can use
'data/timeseries-{date}.npz' to record the date strings associated
with matched files. Note that a particular named fields can only
be used in a particular pattern once.
By default metadata is extracted as strings but format types are
supported in the usual manner
eg. 'data/timeseries-{day:d}-{month:d}.npz' will extract the day
and month from the filename as integer values. Only field names
and types are recognised with other format specification syntax
ignored. Type codes supported: 'd', 'b', 'o', 'x', 'e','E','f',
'F','g', 'G', 'n' (if ommited, result is a string by default).
Note that ordering is determined via ascending alphanumeric sort
and that actual filenames should not include any globbing
characters, namely: '?','*','[' and ']' (general good practice for
filenames anyway).
"""
key = param.String(default=None, allow_None=True, constant=True, doc='''
The key name given to the matched file path strings.''')
pattern = param.String(default=None, allow_None=True, constant=True, doc='''
The pattern files are to be searched against.''')
root = param.String(default=None, allow_None=True, constant=True, doc='''
The root directory from which patterns are to be loaded. The
root is set relative to os.getcwd().''')
@classmethod
def directory(cls, directory, root=None, extension=None, **kwargs):
"""
Load all the files in a given directory selecting only files
with the given extension if specified. The given kwargs are
passed through to the normal constructor.
"""
root = os.getcwd() if root is None else root
suffix = '' if extension is None else '.' + extension.rsplit('.')[-1]
pattern = directory + os.sep + '*' + suffix
key = os.path.join(root, directory,'*').rsplit(os.sep)[-2]
format_parse = list(string.Formatter().parse(key))
if not all([el is None for el in zip(*format_parse)[1]]):
raise Exception('Directory cannot contain format field specifications')
return cls(key, pattern, root, **kwargs)
def __init__(self, key, pattern, root=None, **params):
root = os.getcwd() if root is None else root
specs = self._load_expansion(key, root, pattern)
self.files = [s[key] for s in specs]
super(FilePattern, self).__init__(specs, key=key, pattern=pattern,
root=root, **params)
self.pprint_args(['key', 'pattern'], ['root'])
def fields(self):
"""
Return the fields specified in the pattern using Python's
formatting mini-language.
"""
parse = list(string.Formatter().parse(self.pattern))
return [f for f in zip(*parse)[1] if f is not None]
def _load_expansion(self, key, root, pattern):
"""
Loads the files that match the given pattern.
"""
path_pattern = os.path.join(root, pattern)
expanded_paths = self._expand_pattern(path_pattern)
specs=[]
for (path, tags) in expanded_paths:
filelist = [os.path.join(path,f) for f in os.listdir(path)] if os.path.isdir(path) else [path]
for filepath in filelist:
specs.append(dict(tags,**{key:os.path.abspath(filepath)}))
return sorted(specs, key=lambda s: s[key])
def _expand_pattern(self, pattern):
"""
From the pattern decomposition, finds the absolute paths
matching the pattern.
"""
(globpattern, regexp, fields, types) = self._decompose_pattern(pattern)
filelist = glob.glob(globpattern)
expansion = []
for fname in filelist:
if fields == []:
expansion.append((fname, {}))
continue
match = re.match(regexp, fname)
if match is None: continue
match_items = match.groupdict().items()
tags = dict((k,types.get(k, str)(v)) for (k,v) in match_items)
expansion.append((fname, tags))
return expansion
def _decompose_pattern(self, pattern):
"""
Given a path pattern with format declaration, generates a
four-tuple (glob_pattern, regexp pattern, fields, type map)
"""
sep = '~lancet~sep~'
float_codes = ['e','E','f', 'F','g', 'G', 'n']
typecodes = dict([(k,float) for k in float_codes]
+ [('b',bin), ('d',int), ('o',oct), ('x',hex)])
parse = list(string.Formatter().parse(pattern))
text, fields, codes, _ = zip(*parse)
# Finding the field types from format string
types = []
for (field, code) in zip(fields, codes):
if code in ['', None]: continue
constructor = typecodes.get(code[-1], None)
if constructor: types += [(field, constructor)]
stars = ['' if not f else '*' for f in fields]
globpat = ''.join(text+star for (text,star) in zip(text,stars))
refields = ['' if not f else sep+('(?P<%s>.*?)'% f)+sep for f in fields]
parts = ''.join(text+group for (text,group) in zip(text, refields)).split(sep)
for i in range(0, len(parts), 2): parts[i] = re.escape(parts[i])
regexp_pattern = ''.join(parts).replace('\\*','.*')
fields = list(f for f in fields if f)
return globpat, regexp_pattern , fields, dict(types)
@property
def table(self):
return to_table(self, [self.key])
# Importing from filetypes requires PrettyPrinted to be defined first
from lancet.filetypes import FileType
class FileInfo(Args):
"""
Loads metadata from a set of filenames. For instance, you can load
metadata associated with a series of image files given by a
FilePattern. Unlike other explicit instances of Args, this object
extends the values of an existing Args object. Once you have
loaded the metadata, FileInfo allows you to load the file data
into a pandas DataFrame or a HoloViews Table.
"""
source = param.ClassSelector(class_ = Args, doc='''
The argument specifier that supplies the file paths.''')
filetype = param.ClassSelector(constant=True, class_= FileType, doc='''
A FileType object to be applied to each file path.''')
key = param.String(constant=True, doc='''
The key used to find the file paths for inspection.''')
ignore = param.List(default=[], constant=True, doc='''
Metadata keys that are to be explicitly ignored. ''')
def __init__(self, source, key, filetype, ignore = [], **params):
specs = self._info(source, key, filetype, ignore)
super(FileInfo, self).__init__(specs,
source = source,
filetype = filetype,
key = key,
ignore=ignore,
**params)
self.pprint_args(['source', 'key', 'filetype'], ['ignore'])
@classmethod
def from_pattern(cls, pattern, filetype=None, key='filename', root=None, ignore=[]):
"""
Convenience method to directly chain a pattern processed by
FilePattern into a FileInfo instance.
Note that if a default filetype has been set on FileInfo, the
filetype argument may be omitted.
"""
filepattern = FilePattern(key, pattern, root=root)
if FileInfo.filetype and filetype is None:
filetype = FileInfo.filetype
elif filetype is None:
raise Exception("The filetype argument must be supplied unless "
"an appropriate default has been specified as "
"FileInfo.filetype")
return FileInfo(filepattern, key, filetype, ignore=ignore)
@property
def table(self):
return to_table(self, [self.key])
def load(self, val, **kwargs):
"""
Load the file contents into the supplied pandas dataframe or
HoloViews Table. This allows a selection to be made over the
metadata before loading the file contents (may be slow).
"""
if Table and isinstance(val, Table):
return self.load_table(val, **kwargs)
elif DataFrame and isinstance(val, DataFrame):
return self.load_dframe(val, **kwargs)
else:
raise Exception("Type %s not a DataFrame or Table." % type(val))
def load_table(self, table):
"""
Load the file contents into the supplied Table using the
specified key and filetype. The input table should have the
filenames as values which will be replaced by the loaded
data. If data_key is specified, this key will be used to index
the loaded data to retrive the specified item.
"""
items, data_keys = [], None
for key, filename in table.items():
data_dict = self.filetype.data(filename[0])
current_keys = tuple(sorted(data_dict.keys()))
values = [data_dict[k] for k in current_keys]
if data_keys is None:
data_keys = current_keys
elif data_keys != current_keys:
raise Exception("Data keys are inconsistent")
items.append((key, values))
return Table(items, kdims=table.kdims, vdims=data_keys)
def load_dframe(self, dframe):
"""
Load the file contents into the supplied dataframe using the
specified key and filetype.
"""
filename_series = dframe[self.key]
loaded_data = filename_series.map(self.filetype.data)
keys = [list(el.keys()) for el in loaded_data.values]
for key in set().union(*keys):
key_exists = key in dframe.columns
if key_exists:
self.warning("Appending '_data' suffix to data key %r to avoid"
"overwriting existing metadata with the same name." % key)
suffix = '_data' if key_exists else ''
dframe[key+suffix] = loaded_data.map(lambda x: x.get(key, np.nan))
return dframe
def _info(self, source, key, filetype, ignore):
"""
Generates the union of the source.specs and the metadata
dictionary loaded by the filetype object.
"""
specs, mdata = [], {}
mdata_clashes = set()
for spec in source.specs:
if key not in spec:
raise Exception("Key %r not available in 'source'." % key)
mdata = dict((k,v) for (k,v) in filetype.metadata(spec[key]).items()
if k not in ignore)
mdata_spec = {}
mdata_spec.update(spec)
mdata_spec.update(mdata)
specs.append(mdata_spec)
mdata_clashes = mdata_clashes | (set(spec.keys()) & set(mdata.keys()))
# Metadata clashes can be avoided by using the ignore list.
if mdata_clashes:
self.warning("Loaded metadata keys overriding source keys.")
return specs
|
bsd-3-clause
|
schreiberx/sweet
|
benchmarks_sphere/paper_jrn_nla_rexi_linear/sph_rexi_linear_paper_gaussian_ts_comparison_earth_scale_cheyenne_performance/pp_plot_csv_pdf.py
|
1
|
3040
|
#! /usr/bin/python3
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import sys
first = True
zoom_lat = True
zoom_lat = False
zoom_lat = 'eta' in sys.argv[1]
fontsize=8
figsize=(9, 3)
files = sys.argv[1:]
refdataavailable = False
if files[0] == 'reference':
reffilename = files[1]
files = files[2:]
print("Loading reference solution from '"+reffilename+"'")
refdata = np.loadtxt(reffilename, skiprows=3)
refdata = refdata[1:,1:]
refdataavailable = True
#for filename in sys.argv[1:]:
for filename in files:
print(filename)
data = np.loadtxt(filename, skiprows=3)
labelsx = data[0,1:]
labelsy = data[1:,0]
data = data[1:,1:]
if zoom_lat:
while labelsy[1] < 10:
labelsy = labelsy[1:]
data = data[1:]
while labelsy[-2] > 80:
labelsy = labelsy[0:-2]
data = data[0:-2]
if first:
lon_min = labelsx[0]
lon_max = labelsx[-1]
lat_min = labelsy[0]
lat_max = labelsy[-1]
new_labelsx = np.linspace(lon_min, lon_max, 7)
new_labelsy = np.linspace(lat_min, lat_max, 7)
labelsx = np.interp(new_labelsx, labelsx, labelsx)
labelsy = np.interp(new_labelsy, labelsy, labelsy)
if first:
cmin = np.amin(data)
cmax = np.amax(data)
if 'eta' in filename:
cmin *= 1.2
cmax *= 1.2
if cmax-cmin < 0.3 and cmin > 0.9 and cmax < 1.1:
hs = 0.005
cmin = 0.96
cmax = 1.04
cmid = 0.5*(cmax-cmin)
contour_levels = np.append(np.arange(cmin, cmid-hs, hs), np.arange(cmid+hs, cmax, hs))
elif cmax-cmin < 3000 and cmin > 9000 and cmax < 11000:
hs = 30
cmin = 9000
cmax = 11000
cmid = 0.5*(cmax+cmin)
#contour_levels = np.append(np.arange(cmin, cmid-hs, hs), np.arange(cmid+hs, cmax, hs))
contour_levels = np.arange(cmin, cmax, hs)
else:
if 'eta' in filename:
hs = 2e-5
contour_levels = np.append(np.arange(-1e-4, 0, s), np.arange(s, 1e-4, hs))
else:
hs = 5
contour_levels = np.append(np.arange(900, 1000-hs, hs), np.arange(1000+hs, 1100, hs))
extent = (labelsx[0], labelsx[-1], labelsy[0], labelsy[-1])
plt.figure(figsize=figsize)
plt.imshow(data, interpolation='nearest', extent=extent, origin='lower', aspect='auto', cmap=plt.get_cmap('rainbow'))
plt.clim(cmin, cmax)
cbar = plt.colorbar()
cbar.ax.tick_params(labelsize=fontsize)
plt.title(filename, fontsize=fontsize)
if refdataavailable:
CS = plt.contour(refdata, colors="black", origin='lower', extent=extent, vmin=cmin, vmax=cmax, levels=contour_levels, linewidths=0.35)
for c in CS.collections:
c.set_dashes([(0, (2.0, 2.0))])
plt.contour(data, colors="black", origin='lower', extent=extent, vmin=cmin, vmax=cmax, levels=contour_levels, linewidths=0.35)
ax = plt.gca()
ax.xaxis.set_label_coords(0.5, -0.075)
plt.xticks(labelsx, fontsize=fontsize)
plt.xlabel("Longitude", fontsize=fontsize)
plt.yticks(labelsy, fontsize=fontsize)
plt.ylabel("Latitude", fontsize=fontsize)
outfilename = filename.replace('.csv', '.pdf')
print(outfilename)
plt.savefig(outfilename)
plt.close()
first = False
|
mit
|
abhishekgahlot/scikit-learn
|
examples/cluster/plot_feature_agglomeration_vs_univariate_selection.py
|
30
|
3909
|
"""
==============================================
Feature agglomeration vs. univariate selection
==============================================
This example compares 2 dimensionality reduction strategies:
- univariate feature selection with Anova
- feature agglomeration with Ward hierarchical clustering
Both methods are compared in a regression problem using
a BayesianRidge as supervised estimator.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
print(__doc__)
import shutil
import tempfile
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg, ndimage
from sklearn.feature_extraction.image import grid_to_graph
from sklearn import feature_selection
from sklearn.cluster import FeatureAgglomeration
from sklearn.linear_model import BayesianRidge
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.externals.joblib import Memory
from sklearn.cross_validation import KFold
###############################################################################
# Generate data
n_samples = 200
size = 40 # image size
roi_size = 15
snr = 5.
np.random.seed(0)
mask = np.ones([size, size], dtype=np.bool)
coef = np.zeros((size, size))
coef[0:roi_size, 0:roi_size] = -1.
coef[-roi_size:, -roi_size:] = 1.
X = np.random.randn(n_samples, size ** 2)
for x in X: # smooth data
x[:] = ndimage.gaussian_filter(x.reshape(size, size), sigma=1.0).ravel()
X -= X.mean(axis=0)
X /= X.std(axis=0)
y = np.dot(X, coef.ravel())
noise = np.random.randn(y.shape[0])
noise_coef = (linalg.norm(y, 2) / np.exp(snr / 20.)) / linalg.norm(noise, 2)
y += noise_coef * noise # add noise
###############################################################################
# Compute the coefs of a Bayesian Ridge with GridSearch
cv = KFold(len(y), 2) # cross-validation generator for model selection
ridge = BayesianRidge()
cachedir = tempfile.mkdtemp()
mem = Memory(cachedir=cachedir, verbose=1)
# Ward agglomeration followed by BayesianRidge
connectivity = grid_to_graph(n_x=size, n_y=size)
ward = FeatureAgglomeration(n_clusters=10, connectivity=connectivity,
memory=mem, n_components=1)
clf = Pipeline([('ward', ward), ('ridge', ridge)])
# Select the optimal number of parcels with grid search
clf = GridSearchCV(clf, {'ward__n_clusters': [10, 20, 30]}, n_jobs=1, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_agglomeration_ = coef_.reshape(size, size)
# Anova univariate feature selection followed by BayesianRidge
f_regression = mem.cache(feature_selection.f_regression) # caching function
anova = feature_selection.SelectPercentile(f_regression)
clf = Pipeline([('anova', anova), ('ridge', ridge)])
# Select the optimal percentage of features with grid search
clf = GridSearchCV(clf, {'anova__percentile': [5, 10, 20]}, cv=cv)
clf.fit(X, y) # set the best parameters
coef_ = clf.best_estimator_.steps[-1][1].coef_
coef_ = clf.best_estimator_.steps[0][1].inverse_transform(coef_)
coef_selection_ = coef_.reshape(size, size)
###############################################################################
# Inverse the transformation to plot the results on an image
plt.close('all')
plt.figure(figsize=(7.3, 2.7))
plt.subplot(1, 3, 1)
plt.imshow(coef, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("True weights")
plt.subplot(1, 3, 2)
plt.imshow(coef_selection_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Selection")
plt.subplot(1, 3, 3)
plt.imshow(coef_agglomeration_, interpolation="nearest", cmap=plt.cm.RdBu_r)
plt.title("Feature Agglomeration")
plt.subplots_adjust(0.04, 0.0, 0.98, 0.94, 0.16, 0.26)
plt.show()
# Attempt to remove the temporary cachedir, but don't worry if it fails
shutil.rmtree(cachedir, ignore_errors=True)
|
bsd-3-clause
|
tamasgal/km3pipe
|
examples/monitoring/pmt_rates.py
|
1
|
4444
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ts=4 sw=4 et
"""
======================
Mean PMT Rates Monitor
======================
The following script calculates the mean PMT rates and updates the plot.
"""
# Author: Tamas Gal <tgal@km3net.de>
# License: MIT
from datetime import datetime
import io
from collections import defaultdict
import threading
import time
import km3pipe as kp
from km3pipe.io.daq import TMCHData
import numpy as np
import matplotlib
matplotlib.use("Agg") # noqa
import matplotlib.pyplot as plt
import km3pipe.style as kpst
kpst.use("km3pipe")
__author__ = "Tamas Gal"
__email__ = "tgal@km3net.de"
VERSION = "1.0"
log = kp.logger.get_logger("PMTrates")
class PMTRates(kp.Module):
def configure(self):
self.detector = self.require("detector")
self.du = self.require("du")
self.interval = self.get("interval") or 10
self.plot_path = self.get("plot_path") or "km3web/plots/pmtrates.png"
self.max_x = 800
self.index = 0
self.rates = defaultdict(list)
self.rates_matrix = np.full((18 * 31, self.max_x), np.nan)
self.lock = threading.Lock()
self.thread = threading.Thread(target=self.run, args=())
self.thread.daemon = True
self.thread.start()
def run(self):
interval = self.interval
while True:
time.sleep(interval)
now = datetime.now()
self.add_column()
self.update_plot()
with self.lock:
self.rates = defaultdict(list)
delta_t = (datetime.now() - now).total_seconds()
remaining_t = self.interval - delta_t
log.info(
"Delta t: {} -> waiting for {}s".format(
delta_t, self.interval - delta_t
)
)
if remaining_t < 0:
log.error(
"Can't keep up with plot production. " "Increase the interval!"
)
interval = 1
else:
interval = remaining_t
def add_column(self):
m = np.roll(self.rates_matrix, -1, 1)
y_range = 18 * 31
mean_rates = np.full(y_range, np.nan)
for i in range(y_range):
if i not in self.rates:
continue
mean_rates[i] = np.mean(self.rates[i])
m[:, self.max_x - 1] = mean_rates
self.rates_matrix = m
def update_plot(self):
print("Updating plot at {}".format(self.plot_path))
now = time.time()
max_x = self.max_x
interval = self.interval
def xlabel_func(timestamp):
return datetime.utcfromtimestamp(timestamp).strftime("%H:%M")
m = self.rates_matrix
m[m > 15000] = 15000
m[m < 5000] = 5000
fig, ax = plt.subplots(figsize=(10, 6))
ax.imshow(m, origin="lower")
ax.set_title(
"Mean PMT Rates for DU{} (colours from 5kHz to 15kHz)\n{}".format(
self.du, datetime.utcnow()
)
)
ax.set_xlabel("UTC time [{}s/px]".format(interval))
plt.yticks(
[i * 31 for i in range(18)], ["Floor {}".format(f) for f in range(1, 19)]
)
xtics_int = range(0, max_x, int(max_x / 10))
plt.xticks(
[i for i in xtics_int],
[xlabel_func(now - (max_x - i) * interval) for i in xtics_int],
)
fig.tight_layout()
plt.savefig(self.plot_path)
plt.close("all")
def process(self, blob):
tmch_data = TMCHData(io.BytesIO(blob["CHData"]))
dom_id = tmch_data.dom_id
if dom_id not in self.detector.doms:
return blob
du, floor, _ = self.detector.doms[dom_id]
if du != self.du:
return blob
y_base = (floor - 1) * 31
for channel_id, rate in enumerate(tmch_data.pmt_rates):
idx = y_base + channel_id
with self.lock:
self.rates[idx].append(rate)
return blob
def main():
detector = kp.hardware.Detector(det_id=29)
pipe = kp.Pipeline(timeit=True)
pipe.attach(
kp.io.CHPump,
host="192.168.0.110",
port=5553,
tags="IO_MONIT",
timeout=60 * 60 * 24 * 7,
max_queue=1000,
)
pipe.attach(PMTRates, detector=detector, du=2, interval=2)
pipe.drain()
if __name__ == "__main__":
main()
|
mit
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.