repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
cactusbin/nyt
|
matplotlib/lib/mpl_toolkits/axes_grid1/axes_rgb.py
|
7
|
4658
|
import numpy as np
from axes_divider import make_axes_locatable, Size, locatable_axes_factory
def make_rgb_axes(ax, pad=0.01, axes_class=None, add_all=True):
"""
pad : fraction of the axes height.
"""
divider = make_axes_locatable(ax)
pad_size = Size.Fraction(pad, Size.AxesY(ax))
xsize = Size.Fraction((1.-2.*pad)/3., Size.AxesX(ax))
ysize = Size.Fraction((1.-2.*pad)/3., Size.AxesY(ax))
divider.set_horizontal([Size.AxesX(ax), pad_size, xsize])
divider.set_vertical([ysize, pad_size, ysize, pad_size, ysize])
ax.set_axes_locator(divider.new_locator(0, 0, ny1=-1))
ax_rgb = []
if axes_class is None:
try:
axes_class = locatable_axes_factory(ax._axes_class)
except AttributeError:
axes_class = locatable_axes_factory(type(ax))
for ny in [4, 2, 0]:
ax1 = axes_class(ax.get_figure(),
ax.get_position(original=True),
sharex=ax, sharey=ax)
locator = divider.new_locator(nx=2, ny=ny)
ax1.set_axes_locator(locator)
for t in ax1.yaxis.get_ticklabels() + ax1.xaxis.get_ticklabels():
t.set_visible(False)
try:
for axis in ax1.axis.values():
axis.major_ticklabels.set_visible(False)
except AttributeError:
pass
ax_rgb.append(ax1)
if add_all:
fig = ax.get_figure()
for ax1 in ax_rgb:
fig.add_axes(ax1)
return ax_rgb
#import matplotlib.axes as maxes
def imshow_rgb(ax, r, g, b, **kwargs):
ny, nx = r.shape
R = np.zeros([ny, nx, 3], dtype="d")
R[:,:,0] = r
G = np.zeros_like(R)
G[:,:,1] = g
B = np.zeros_like(R)
B[:,:,2] = b
RGB = R + G + B
im_rgb = ax.imshow(RGB, **kwargs)
return im_rgb
from mpl_axes import Axes
class RGBAxesBase(object):
def __init__(self, *kl, **kwargs):
pad = kwargs.pop("pad", 0.0)
add_all = kwargs.pop("add_all", True)
axes_class = kwargs.pop("axes_class", None)
if axes_class is None:
axes_class = self._defaultAxesClass
ax = axes_class(*kl, **kwargs)
divider = make_axes_locatable(ax)
pad_size = Size.Fraction(pad, Size.AxesY(ax))
xsize = Size.Fraction((1.-2.*pad)/3., Size.AxesX(ax))
ysize = Size.Fraction((1.-2.*pad)/3., Size.AxesY(ax))
divider.set_horizontal([Size.AxesX(ax), pad_size, xsize])
divider.set_vertical([ysize, pad_size, ysize, pad_size, ysize])
ax.set_axes_locator(divider.new_locator(0, 0, ny1=-1))
ax_rgb = []
for ny in [4, 2, 0]:
ax1 = axes_class(ax.get_figure(),
ax.get_position(original=True),
sharex=ax, sharey=ax, **kwargs)
locator = divider.new_locator(nx=2, ny=ny)
ax1.set_axes_locator(locator)
ax1.axis[:].toggle(ticklabels=False)
#for t in ax1.yaxis.get_ticklabels() + ax1.xaxis.get_ticklabels():
# t.set_visible(False)
#if hasattr(ax1, "_axislines"):
# for axisline in ax1._axislines.values():
# axisline.major_ticklabels.set_visible(False)
ax_rgb.append(ax1)
self.RGB = ax
self.R, self.G, self.B = ax_rgb
if add_all:
fig = ax.get_figure()
fig.add_axes(ax)
self.add_RGB_to_figure()
self._config_axes()
def _config_axes(self):
for ax1 in [self.RGB, self.R, self.G, self.B]:
#for sp1 in ax1.spines.values():
# sp1.set_color("w")
ax1.axis[:].line.set_color("w")
ax1.axis[:].major_ticks.set_mec("w")
# for tick in ax1.xaxis.get_major_ticks() + ax1.yaxis.get_major_ticks():
# tick.tick1line.set_mec("w")
# tick.tick2line.set_mec("w")
def add_RGB_to_figure(self):
self.RGB.get_figure().add_axes(self.R)
self.RGB.get_figure().add_axes(self.G)
self.RGB.get_figure().add_axes(self.B)
def imshow_rgb(self, r, g, b, **kwargs):
ny, nx = r.shape
R = np.zeros([ny, nx, 3], dtype="d")
R[:,:,0] = r
G = np.zeros_like(R)
G[:,:,1] = g
B = np.zeros_like(R)
B[:,:,2] = b
RGB = R + G + B
im_rgb = self.RGB.imshow(RGB, **kwargs)
im_r = self.R.imshow(R, **kwargs)
im_g = self.G.imshow(G, **kwargs)
im_b = self.B.imshow(B, **kwargs)
return im_rgb, im_r, im_g, im_b
class RGBAxes(RGBAxesBase):
_defaultAxesClass = Axes
|
unlicense
|
riteshkasat/scipy_2015_sklearn_tutorial
|
notebooks/figures/plot_digits_datasets.py
|
19
|
2750
|
# Taken from example in scikit-learn examples
# Authors: Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import (manifold, datasets, decomposition, ensemble, lda,
random_projection)
def digits_plot():
digits = datasets.load_digits(n_class=6)
n_digits = 500
X = digits.data[:n_digits]
y = digits.target[:n_digits]
n_samples, n_features = X.shape
n_neighbors = 30
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(X.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 1e5:
# don't show points that are too close
# set a high threshold to basically turn this off
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
n_img_per_row = 10
img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
for i in range(n_img_per_row):
ix = 10 * i + 1
for j in range(n_img_per_row):
iy = 10 * j + 1
img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))
plt.imshow(img, cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('A selection from the 64-dimensional digits dataset')
print("Computing PCA projection")
pca = decomposition.PCA(n_components=2).fit(X)
X_pca = pca.transform(X)
plot_embedding(X_pca, "Principal Components projection of the digits")
plt.figure()
plt.matshow(pca.components_[0, :].reshape(8, 8), cmap="gray")
plt.axis('off')
plt.figure()
plt.matshow(pca.components_[1, :].reshape(8, 8), cmap="gray")
plt.axis('off')
plt.show()
|
cc0-1.0
|
mtkwock/Genome-Matching
|
code/thinkbayes2.py
|
1
|
67439
|
"""This file contains code for use with "Think Stats" and
"Think Bayes", both by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
"""This file contains class definitions for:
Hist: represents a histogram (map from values to integer frequencies).
Pmf: represents a probability mass function (map from values to probs).
_DictWrapper: private parent class for Hist and Pmf.
Cdf: represents a discrete cumulative distribution function
Pdf: represents a continuous probability density function
"""
import bisect
import copy
import logging
import math
import random
import re
from collections import Counter
from operator import itemgetter
import thinkplot
import numpy as np
import pandas
import scipy.stats
import scipy.special
import scipy.ndimage
ROOT2 = math.sqrt(2)
def RandomSeed(x):
"""Initialize the random and np.random generators.
x: int seed
"""
random.seed(x)
np.random.seed(x)
def Odds(p):
"""Computes odds for a given probability.
Example: p=0.75 means 75 for and 25 against, or 3:1 odds in favor.
Note: when p=1, the formula for odds divides by zero, which is
normally undefined. But I think it is reasonable to define Odds(1)
to be infinity, so that's what this function does.
p: float 0-1
Returns: float odds
"""
if p == 1:
return float('inf')
return p / (1 - p)
def Probability(o):
"""Computes the probability corresponding to given odds.
Example: o=2 means 2:1 odds in favor, or 2/3 probability
o: float odds, strictly positive
Returns: float probability
"""
return o / (o + 1)
def Probability2(yes, no):
"""Computes the probability corresponding to given odds.
Example: yes=2, no=1 means 2:1 odds in favor, or 2/3 probability.
yes, no: int or float odds in favor
"""
return yes / (yes + no)
class Interpolator(object):
"""Represents a mapping between sorted sequences; performs linear interp.
Attributes:
xs: sorted list
ys: sorted list
"""
def __init__(self, xs, ys):
self.xs = xs
self.ys = ys
def Lookup(self, x):
"""Looks up x and returns the corresponding value of y."""
return self._Bisect(x, self.xs, self.ys)
def Reverse(self, y):
"""Looks up y and returns the corresponding value of x."""
return self._Bisect(y, self.ys, self.xs)
def _Bisect(self, x, xs, ys):
"""Helper function."""
if x <= xs[0]:
return ys[0]
if x >= xs[-1]:
return ys[-1]
i = bisect.bisect(xs, x)
frac = 1.0 * (x - xs[i - 1]) / (xs[i] - xs[i - 1])
y = ys[i - 1] + frac * 1.0 * (ys[i] - ys[i - 1])
return y
class _DictWrapper(object):
"""An object that contains a dictionary."""
def __init__(self, obj=None, label=None):
"""Initializes the distribution.
obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs
label: string label
"""
self.label = label if label is not None else '_nolegend_'
self.d = {}
# flag whether the distribution is under a log transform
self.log = False
if obj is None:
return
if isinstance(obj, (_DictWrapper, Cdf, Pdf)):
self.label = label if label is not None else obj.label
if isinstance(obj, dict):
self.d.update(obj.items())
elif isinstance(obj, (_DictWrapper, Cdf, Pdf)):
self.d.update(obj.Items())
elif isinstance(obj, pandas.Series):
self.d.update(obj.value_counts().iteritems())
else:
# finally, treat it like a list
self.d.update(Counter(obj))
if len(self) > 0 and isinstance(self, Pmf):
self.Normalize()
def __hash__(self):
return id(self)
def __str__(self):
cls = self.__class__.__name__
return '%s(%s)' % (cls, str(self.d))
__repr__ = __str__
def __eq__(self, other):
return self.d == other.d
def __len__(self):
return len(self.d)
def __iter__(self):
return iter(self.d)
def iterkeys(self):
"""Returns an iterator over keys."""
return iter(self.d)
def __contains__(self, value):
return value in self.d
def __getitem__(self, value):
return self.d.get(value, 0)
def __setitem__(self, value, prob):
self.d[value] = prob
def __delitem__(self, value):
del self.d[value]
def Copy(self, label=None):
"""Returns a copy.
Make a shallow copy of d. If you want a deep copy of d,
use copy.deepcopy on the whole object.
label: string label for the new Hist
returns: new _DictWrapper with the same type
"""
new = copy.copy(self)
new.d = copy.copy(self.d)
new.label = label if label is not None else self.label
return new
def Scale(self, factor):
"""Multiplies the values by a factor.
factor: what to multiply by
Returns: new object
"""
new = self.Copy()
new.d.clear()
for val, prob in self.Items():
new.Set(val * factor, prob)
return new
def Log(self, m=None):
"""Log transforms the probabilities.
Removes values with probability 0.
Normalizes so that the largest logprob is 0.
"""
if self.log:
raise ValueError("Pmf/Hist already under a log transform")
self.log = True
if m is None:
m = self.MaxLike()
for x, p in self.d.items():
if p:
self.Set(x, math.log(p / m))
else:
self.Remove(x)
def Exp(self, m=None):
"""Exponentiates the probabilities.
m: how much to shift the ps before exponentiating
If m is None, normalizes so that the largest prob is 1.
"""
if not self.log:
raise ValueError("Pmf/Hist not under a log transform")
self.log = False
if m is None:
m = self.MaxLike()
for x, p in self.d.items():
self.Set(x, math.exp(p - m))
def GetDict(self):
"""Gets the dictionary."""
return self.d
def SetDict(self, d):
"""Sets the dictionary."""
self.d = d
def Values(self):
"""Gets an unsorted sequence of values.
Note: one source of confusion is that the keys of this
dictionary are the values of the Hist/Pmf, and the
values of the dictionary are frequencies/probabilities.
"""
return self.d.keys()
def Items(self):
"""Gets an unsorted sequence of (value, freq/prob) pairs."""
return self.d.items()
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
Note: options are ignored
Returns:
tuple of (sorted value sequence, freq/prob sequence)
"""
if min(self.d.keys()) is np.nan:
logging.warning('Hist: contains NaN, may not render correctly.')
return zip(*sorted(self.Items()))
def MakeCdf(self, label=None):
"""Makes a Cdf."""
label = label if label is not None else self.label
return Cdf(self, label=label)
def Print(self):
"""Prints the values and freqs/probs in ascending order."""
for val, prob in sorted(self.d.items()):
print(val, prob)
def Set(self, x, y=0):
"""Sets the freq/prob associated with the value x.
Args:
x: number value
y: number freq or prob
"""
self.d[x] = y
def Incr(self, x, term=1):
"""Increments the freq/prob associated with the value x.
Args:
x: number value
term: how much to increment by
"""
self.d[x] = self.d.get(x, 0) + term
def Mult(self, x, factor):
"""Scales the freq/prob associated with the value x.
Args:
x: number value
factor: how much to multiply by
"""
self.d[x] = self.d.get(x, 0) * factor
def Remove(self, x):
"""Removes a value.
Throws an exception if the value is not there.
Args:
x: value to remove
"""
del self.d[x]
def Total(self):
"""Returns the total of the frequencies/probabilities in the map."""
total = sum(self.d.values())
return total
def MaxLike(self):
"""Returns the largest frequency/probability in the map."""
return max(self.d.values())
def Largest(self, n=10):
"""Returns the largest n values, with frequency/probability.
n: number of items to return
"""
return sorted(self.d.items(), reverse=True)[:n]
def Smallest(self, n=10):
"""Returns the smallest n values, with frequency/probability.
n: number of items to return
"""
return sorted(self.d.items(), reverse=False)[:n]
def MostProbable(self, n=-1):
'''
Returns the most probable n key-value pairs based
'''
keyvals = [[x, self.d[x]] for x in self.d.keys()]
sorted_list = sorted(keyvals, key=lambda x: float(x[1]))
sorted_list.reverse()
# keyvals.sort(key=lambda x: float(x[1]))
return sorted_list
class Hist(_DictWrapper):
"""Represents a histogram, which is a map from values to frequencies.
Values can be any hashable type; frequencies are integer counters.
"""
def Freq(self, x):
"""Gets the frequency associated with the value x.
Args:
x: number value
Returns:
int frequency
"""
return self.d.get(x, 0)
def Freqs(self, xs):
"""Gets frequencies for a sequence of values."""
return [self.Freq(x) for x in xs]
def IsSubset(self, other):
"""Checks whether the values in this histogram are a subset of
the values in the given histogram."""
for val, freq in self.Items():
if freq > other.Freq(val):
return False
return True
def Subtract(self, other):
"""Subtracts the values in the given histogram from this histogram."""
for val, freq in other.Items():
self.Incr(val, -freq)
class Pmf(_DictWrapper):
"""Represents a probability mass function.
Values can be any hashable type; probabilities are floating-point.
Pmfs are not necessarily normalized.
"""
def Prob(self, x, default=0):
"""Gets the probability associated with the value x.
Args:
x: number value
default: value to return if the key is not there
Returns:
float probability
"""
return self.d.get(x, default)
def Probs(self, xs):
"""Gets probabilities for a sequence of values."""
return [self.Prob(x) for x in xs]
def Percentile(self, percentage):
"""Computes a percentile of a given Pmf.
Note: this is not super efficient. If you are planning
to compute more than a few percentiles, compute the Cdf.
percentage: float 0-100
returns: value from the Pmf
"""
p = percentage / 100.0
total = 0
for val, prob in sorted(self.Items()):
total += prob
if total >= p:
return val
def ProbGreater(self, x):
"""Probability that a sample from this Pmf exceeds x.
x: number
returns: float probability
"""
if isinstance(x, _DictWrapper):
return PmfProbGreater(self, x)
else:
t = [prob for (val, prob) in self.d.items() if val > x]
return sum(t)
def ProbLess(self, x):
"""Probability that a sample from this Pmf is less than x.
x: number
returns: float probability
"""
if isinstance(x, _DictWrapper):
return PmfProbLess(self, x)
else:
t = [prob for (val, prob) in self.d.items() if val < x]
return sum(t)
def __lt__(self, obj):
"""Less than.
obj: number or _DictWrapper
returns: float probability
"""
return self.ProbLess(obj)
def __gt__(self, obj):
"""Greater than.
obj: number or _DictWrapper
returns: float probability
"""
return self.ProbGreater(obj)
def __ge__(self, obj):
"""Greater than or equal.
obj: number or _DictWrapper
returns: float probability
"""
return 1 - (self < obj)
def __le__(self, obj):
"""Less than or equal.
obj: number or _DictWrapper
returns: float probability
"""
return 1 - (self > obj)
def Normalize(self, fraction=1.0):
"""Normalizes this PMF so the sum of all probs is fraction.
Args:
fraction: what the total should be after normalization
Returns: the total probability before normalizing
"""
if self.log:
raise ValueError("Normalize: Pmf is under a log transform")
total = self.Total()
if total == 0.0:
raise ValueError('Normalize: total probability is zero.')
#logging.warning('Normalize: total probability is zero.')
#return total
factor = fraction / total
for x in self.d:
self.d[x] *= factor
return total
def Random(self):
"""Chooses a random element from this PMF.
Note: this is not very efficient. If you plan to call
this more than a few times, consider converting to a CDF.
Returns:
float value from the Pmf
"""
target = random.random()
total = 0.0
for x, p in self.d.items():
total += p
if total >= target:
return x
# we shouldn't get here
raise ValueError('Random: Pmf might not be normalized.')
def Mean(self):
"""Computes the mean of a PMF.
Returns:
float mean
"""
mean = 0.0
for x, p in self.d.items():
mean += p * x
return mean
def Var(self, mu=None):
"""Computes the variance of a PMF.
mu: the point around which the variance is computed;
if omitted, computes the mean
returns: float variance
"""
if mu is None:
mu = self.Mean()
var = 0.0
for x, p in self.d.items():
var += p * (x - mu) ** 2
return var
def Std(self, mu=None):
"""Computes the standard deviation of a PMF.
mu: the point around which the variance is computed;
if omitted, computes the mean
returns: float standard deviation
"""
var = self.Var(mu)
return math.sqrt(var)
def MaximumLikelihood(self):
"""Returns the value with the highest probability.
Returns: float probability
"""
_, val = max((prob, val) for val, prob in self.Items())
return val
def CredibleInterval(self, percentage=90):
"""Computes the central credible interval.
If percentage=90, computes the 90% CI.
Args:
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
cdf = self.MakeCdf()
return cdf.CredibleInterval(percentage)
def __add__(self, other):
"""Computes the Pmf of the sum of values drawn from self and other.
other: another Pmf or a scalar
returns: new Pmf
"""
try:
return self.AddPmf(other)
except AttributeError:
return self.AddConstant(other)
def AddPmf(self, other):
"""Computes the Pmf of the sum of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 + v2, p1 * p2)
return pmf
def AddConstant(self, other):
"""Computes the Pmf of the sum a constant and values from self.
other: a number
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
pmf.Set(v1 + other, p1)
return pmf
def __sub__(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
try:
return self.SubPmf(other)
except AttributeError:
return self.AddConstant(-other)
def SubPmf(self, other):
"""Computes the Pmf of the diff of values drawn from self and other.
other: another Pmf
returns: new Pmf
"""
pmf = Pmf()
for v1, p1 in self.Items():
for v2, p2 in other.Items():
pmf.Incr(v1 - v2, p1 * p2)
return pmf
def Max(self, k):
"""Computes the CDF of the maximum of k selections from this dist.
k: int
returns: new Cdf
"""
cdf = self.MakeCdf()
return cdf.Max(k)
class Joint(Pmf):
"""Represents a joint distribution.
The values are sequences (usually tuples)
"""
def Marginal(self, i, label=None):
"""Gets the marginal distribution of the indicated variable.
i: index of the variable we want
Returns: Pmf
"""
pmf = Pmf(label=label)
for vs, prob in self.Items():
pmf.Incr(vs[i], prob)
return pmf
def Conditional(self, i, j, val, label=None):
"""Gets the conditional distribution of the indicated variable.
Distribution of vs[i], conditioned on vs[j] = val.
i: index of the variable we want
j: which variable is conditioned on
val: the value the jth variable has to have
Returns: Pmf
"""
pmf = Pmf(label=label)
for vs, prob in self.Items():
if vs[j] != val:
continue
pmf.Incr(vs[i], prob)
pmf.Normalize()
return pmf
def MaxLikeInterval(self, percentage=90):
"""Returns the maximum-likelihood credible interval.
If percentage=90, computes a 90% CI containing the values
with the highest likelihoods.
percentage: float between 0 and 100
Returns: list of values from the suite
"""
interval = []
total = 0
t = [(prob, val) for val, prob in self.Items()]
t.sort(reverse=True)
for prob, val in t:
interval.append(val)
total += prob
if total >= percentage / 100.0:
break
return interval
def MakeJoint(pmf1, pmf2):
"""Joint distribution of values from pmf1 and pmf2.
Assumes that the PMFs represent independent random variables.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
Joint pmf of value pairs
"""
joint = Joint()
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
joint.Set((v1, v2), p1 * p2)
return joint
def MakeHistFromList(t, label=None):
"""Makes a histogram from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this histogram
Returns:
Hist object
"""
return Hist(t, label=label)
def MakeHistFromDict(d, label=None):
"""Makes a histogram from a map from values to frequencies.
Args:
d: dictionary that maps values to frequencies
label: string label for this histogram
Returns:
Hist object
"""
return Hist(d, label)
def MakePmfFromList(t, label=None):
"""Makes a PMF from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(t, label=label)
def MakePmfFromDict(d, label=None):
"""Makes a PMF from a map from values to probabilities.
Args:
d: dictionary that maps values to probabilities
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(d, label=label)
def MakePmfFromItems(t, label=None):
"""Makes a PMF from a sequence of value-probability pairs
Args:
t: sequence of value-probability pairs
label: string label for this PMF
Returns:
Pmf object
"""
return Pmf(dict(t), label=label)
def MakePmfFromHist(hist, label=None):
"""Makes a normalized PMF from a Hist object.
Args:
hist: Hist object
label: string label
Returns:
Pmf object
"""
if label is None:
label = hist.label
return Pmf(hist, label=label)
def MakeMixture(metapmf, label='mix'):
"""Make a mixture distribution.
Args:
metapmf: Pmf that maps from Pmfs to probs.
label: string label for the new Pmf.
Returns: Pmf object.
"""
mix = Pmf(label=label)
for pmf, p1 in metapmf.Items():
for x, p2 in pmf.Items():
mix.Incr(x, p1 * p2)
return mix
def MakeUniformPmf(low, high, n):
"""Make a uniform Pmf.
low: lowest value (inclusive)
high: highest value (inclusize)
n: number of values
"""
pmf = Pmf()
for x in np.linspace(low, high, n):
pmf.Set(x, 1)
pmf.Normalize()
return pmf
class Cdf(object):
"""Represents a cumulative distribution function.
Attributes:
xs: sequence of values
ps: sequence of probabilities
label: string used as a graph label.
"""
def __init__(self, obj=None, ps=None, label=None):
"""Initializes.
If ps is provided, obj must be the corresponding list of values.
obj: Hist, Pmf, Cdf, Pdf, dict, pandas Series, list of pairs
ps: list of cumulative probabilities
label: string label
"""
self.label = label if label is not None else '_nolegend_'
if isinstance(obj, (_DictWrapper, Cdf, Pdf)):
if not label:
self.label = label if label is not None else obj.label
if obj is None:
# caller does not provide obj, make an empty Cdf
self.xs = np.asarray([])
self.ps = np.asarray([])
if ps is not None:
logging.warning("Cdf: can't pass ps without also passing xs.")
return
else:
# if the caller provides xs and ps, just store them
if ps is not None:
if isinstance(ps, str):
logging.warning("Cdf: ps can't be a string")
self.xs = np.asarray(obj)
self.ps = np.asarray(ps)
return
# caller has provided just obj, not ps
if isinstance(obj, Cdf):
self.xs = copy.copy(obj.xs)
self.ps = copy.copy(obj.ps)
return
if isinstance(obj, _DictWrapper):
dw = obj
else:
dw = Hist(obj)
if len(dw) == 0:
self.xs = np.asarray([])
self.ps = np.asarray([])
return
xs, freqs = zip(*sorted(dw.Items()))
self.xs = np.asarray(xs)
self.ps = np.cumsum(freqs, dtype=np.float)
self.ps /= self.ps[-1]
def __str__(self):
return 'Cdf(%s, %s)' % (str(self.xs), str(self.ps))
__repr__ = __str__
def __len__(self):
return len(self.xs)
def __getitem__(self, x):
return self.Prob(x)
def __setitem__(self):
raise UnimplementedMethodException()
def __delitem__(self):
raise UnimplementedMethodException()
def __eq__(self, other):
return np.all(self.xs == other.xs) and np.all(self.ps == other.ps)
def Copy(self, label=None):
"""Returns a copy of this Cdf.
label: string label for the new Cdf
"""
if label is None:
label = self.label
return Cdf(list(self.xs), list(self.ps), label=label)
def MakePmf(self, label=None):
"""Makes a Pmf."""
if label is None:
label = self.label
return Pmf(self, label=label)
def Values(self):
"""Returns a sorted list of values.
"""
return self.xs
def Items(self):
"""Returns a sorted sequence of (value, probability) pairs.
Note: in Python3, returns an iterator.
"""
a = self.ps
b = np.roll(a, 1)
b[0] = 0
return zip(self.xs, a-b)
def Shift(self, term):
"""Adds a term to the xs.
term: how much to add
"""
new = self.Copy()
new.xs = [x + term for x in self.xs]
return new
def Scale(self, factor):
"""Multiplies the xs by a factor.
factor: what to multiply by
"""
new = self.Copy()
new.xs = [x * factor for x in self.xs]
return new
def Prob(self, x):
"""Returns CDF(x), the probability that corresponds to value x.
Args:
x: number
Returns:
float probability
"""
if x < self.xs[0]:
return 0.0
index = bisect.bisect(self.xs, x)
p = self.ps[index-1]
return p
def Probs(self, xs):
"""Gets probabilities for a sequence of values.
xs: any sequence that can be converted to NumPy array
returns: NumPy array of cumulative probabilities
"""
xs = np.asarray(xs)
index = np.searchsorted(self.xs, xs, side='right')
ps = self.ps[index-1]
ps[xs < self.xs[0]] = 0.0
return ps
ProbArray = Probs
def Value(self, p):
"""Returns InverseCDF(p), the value that corresponds to probability p.
Args:
p: number in the range [0, 1]
Returns:
number value
"""
if p < 0 or p > 1:
raise ValueError('Probability p must be in range [0, 1]')
index = bisect.bisect_left(self.ps, p)
return self.xs[index]
def ValueArray(self, ps):
"""Returns InverseCDF(p), the value that corresponds to probability p.
Args:
ps: NumPy array of numbers in the range [0, 1]
Returns:
NumPy array of values
"""
ps = np.asarray(ps)
if np.any(ps < 0) or np.any(ps > 1):
raise ValueError('Probability p must be in range [0, 1]')
index = np.searchsorted(self.ps, ps, side='left')
return self.xs[index]
def Percentile(self, p):
"""Returns the value that corresponds to percentile p.
Args:
p: number in the range [0, 100]
Returns:
number value
"""
return self.Value(p / 100.0)
def PercentileRank(self, x):
"""Returns the percentile rank of the value x.
x: potential value in the CDF
returns: percentile rank in the range 0 to 100
"""
return self.Prob(x) * 100.0
def Random(self):
"""Chooses a random value from this distribution."""
return self.Value(random.random())
def Sample(self, n):
"""Generates a random sample from this distribution.
n: int length of the sample
returns: NumPy array
"""
ps = np.random.random(n)
return self.ValueArray(ps)
def Mean(self):
"""Computes the mean of a CDF.
Returns:
float mean
"""
old_p = 0
total = 0.0
for x, new_p in zip(self.xs, self.ps):
p = new_p - old_p
total += p * x
old_p = new_p
return total
def CredibleInterval(self, percentage=90):
"""Computes the central credible interval.
If percentage=90, computes the 90% CI.
Args:
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
prob = (1 - percentage / 100.0) / 2
interval = self.Value(prob), self.Value(1 - prob)
return interval
ConfidenceInterval = CredibleInterval
def _Round(self, multiplier=1000.0):
"""
An entry is added to the cdf only if the percentile differs
from the previous value in a significant digit, where the number
of significant digits is determined by multiplier. The
default is 1000, which keeps log10(1000) = 3 significant digits.
"""
# TODO(write this method)
raise UnimplementedMethodException()
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
An empirical CDF is a step function; linear interpolation
can be misleading.
Note: options are ignored
Returns:
tuple of (xs, ps)
"""
def interleave(a, b):
c = np.empty(a.shape[0] + b.shape[0])
c[::2] = a
c[1::2] = b
return c
a = np.array(self.xs)
xs = interleave(a, a)
shift_ps = np.roll(self.ps, 1)
shift_ps[0] = 0
ps = interleave(shift_ps, self.ps)
return xs, ps
def Max(self, k):
"""Computes the CDF of the maximum of k selections from this dist.
k: int
returns: new Cdf
"""
cdf = self.Copy()
cdf.ps **= k
return cdf
def MakeCdfFromItems(items, label=None):
"""Makes a cdf from an unsorted sequence of (value, frequency) pairs.
Args:
items: unsorted sequence of (value, frequency) pairs
label: string label for this CDF
Returns:
cdf: list of (value, fraction) pairs
"""
return Cdf(dict(items), label=label)
def MakeCdfFromDict(d, label=None):
"""Makes a CDF from a dictionary that maps values to frequencies.
Args:
d: dictionary that maps values to frequencies.
label: string label for the data.
Returns:
Cdf object
"""
return Cdf(d, label=label)
def MakeCdfFromList(seq, label=None):
"""Creates a CDF from an unsorted sequence.
Args:
seq: unsorted sequence of sortable values
label: string label for the cdf
Returns:
Cdf object
"""
return Cdf(seq, label=label)
def MakeCdfFromHist(hist, label=None):
"""Makes a CDF from a Hist object.
Args:
hist: Pmf.Hist object
label: string label for the data.
Returns:
Cdf object
"""
if label is None:
label = hist.label
return Cdf(hist, label=label)
def MakeCdfFromPmf(pmf, label=None):
"""Makes a CDF from a Pmf object.
Args:
pmf: Pmf.Pmf object
label: string label for the data.
Returns:
Cdf object
"""
if label is None:
label = pmf.label
return Cdf(pmf, label=label)
class UnimplementedMethodException(Exception):
"""Exception if someone calls a method that should be overridden."""
class Suite(Pmf):
"""Represents a suite of hypotheses and their probabilities."""
datalog = []
def Update(self, data):
"""Updates each hypothesis based on the data.
data: any representation of the data
returns: the normalizing constant
"""
for hypo in self.Values():
like = self.Likelihood(data, hypo)
self.Mult(hypo, like)
return self.Normalize()
def LogUpdate(self, data):
"""Updates a suite of hypotheses based on new data.
Modifies the suite directly; if you want to keep the original, make
a copy.
Note: unlike Update, LogUpdate does not normalize.
Args:
data: any representation of the data
"""
for hypo in self.Values():
like = self.LogLikelihood(data, hypo)
self.Incr(hypo, like)
def UpdateSet(self, dataset):
"""Updates each hypothesis based on the dataset.
This is more efficient than calling Update repeatedly because
it waits until the end to Normalize.
Modifies the suite directly; if you want to keep the original, make
a copy.
dataset: a sequence of data
returns: the normalizing constant
"""
count = 0.0
length = len(dataset)
for data in dataset:
for hypo in self.Values():
like = self.Likelihood(data, hypo)
self.Mult(hypo, like)
count = count + 1
return self.Normalize()
def UpdateSetAndLog(self, dataset, f):
'''
Same as UpdateSet except also logs information to the given file (f)
'''
count = 0.0
length = len(dataset)
for data in dataset:
for hypo in self.Values():
like = self.Likelihood(data, hypo)
self.Mult(hypo, like)
count = count + 1
f.write(str([[x, self.Prob(x), data[0], data[1]] for x in self.Values()]) + "\n")
return self.Normalize()
def LogUpdateSet(self, dataset):
"""Updates each hypothesis based on the dataset.
Modifies the suite directly; if you want to keep the original, make
a copy.
dataset: a sequence of data
returns: None
"""
for data in dataset:
self.LogUpdate(data)
def Likelihood(self, data, hypo):
"""Computes the likelihood of the data under the hypothesis.
hypo: some representation of the hypothesis
data: some representation of the data
"""
raise UnimplementedMethodException()
def LogLikelihood(self, data, hypo):
"""Computes the log likelihood of the data under the hypothesis.
hypo: some representation of the hypothesis
data: some representation of the data
"""
raise UnimplementedMethodException()
def Print(self):
"""Prints the hypotheses and their probabilities."""
for hypo, prob in sorted(self.Items()):
print(hypo, prob)
def MakeOdds(self):
"""Transforms from probabilities to odds.
Values with prob=0 are removed.
"""
for hypo, prob in self.Items():
if prob:
self.Set(hypo, Odds(prob))
else:
self.Remove(hypo)
def MakeProbs(self):
"""Transforms from odds to probabilities."""
for hypo, odds in self.Items():
self.Set(hypo, Probability(odds))
def GetLogs(self):
'''Returns the history of each hypothesis'''
return self.datalog
def MakeSuiteFromList(t, label=None):
"""Makes a suite from an unsorted sequence of values.
Args:
t: sequence of numbers
label: string label for this suite
Returns:
Suite object
"""
hist = MakeHistFromList(t, label=label)
d = hist.GetDict()
return MakeSuiteFromDict(d)
def MakeSuiteFromHist(hist, label=None):
"""Makes a normalized suite from a Hist object.
Args:
hist: Hist object
label: string label
Returns:
Suite object
"""
if label is None:
label = hist.label
# make a copy of the dictionary
d = dict(hist.GetDict())
return MakeSuiteFromDict(d, label)
def MakeSuiteFromDict(d, label=None):
"""Makes a suite from a map from values to probabilities.
Args:
d: dictionary that maps values to probabilities
label: string label for this suite
Returns:
Suite object
"""
suite = Suite(label=label)
suite.SetDict(d)
suite.Normalize()
return suite
class Pdf(object):
"""Represents a probability density function (PDF)."""
def Density(self, x):
"""Evaluates this Pdf at x.
Returns: float or NumPy array of probability density
"""
raise UnimplementedMethodException()
def GetLinspace(self):
"""Get a linspace for plotting.
Not all subclasses of Pdf implement this.
Returns: numpy array
"""
raise UnimplementedMethodException()
def MakePmf(self, **options):
"""Makes a discrete version of this Pdf.
options can include
label: string
low: low end of range
high: high end of range
n: number of places to evaluate
Returns: new Pmf
"""
label = options.pop('label', '')
xs, ds = self.Render(**options)
return Pmf(dict(zip(xs, ds)), label=label)
def Render(self, **options):
"""Generates a sequence of points suitable for plotting.
Returns:
tuple of (xs, densities)
"""
low, high = options.pop('low', None), options.pop('high', None)
if low is not None and high is not None:
n = options.pop('n', 101)
xs = np.linspace(low, high, n)
else:
xs = options.pop('xs', None)
if xs is None:
xs = self.GetLinspace()
ds = self.Density(xs)
return xs, ds
def Items(self):
"""Generates a sequence of (value, probability) pairs.
"""
return zip(*self.Render())
class NormalPdf(Pdf):
"""Represents the PDF of a Normal distribution."""
def __init__(self, mu=0, sigma=1, label=None):
"""Constructs a Normal Pdf with given mu and sigma.
mu: mean
sigma: standard deviation
label: string
"""
self.mu = mu
self.sigma = sigma
self.label = label if label is not None else '_nolegend_'
def __str__(self):
return 'NormalPdf(%f, %f)' % (self.mu, self.sigma)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
low, high = self.mu-3*self.sigma, self.mu+3*self.sigma
return np.linspace(low, high, 101)
def Density(self, xs):
"""Evaluates this Pdf at xs.
xs: scalar or sequence of floats
returns: float or NumPy array of probability density
"""
return scipy.stats.norm.pdf(xs, self.mu, self.sigma)
class ExponentialPdf(Pdf):
"""Represents the PDF of an exponential distribution."""
def __init__(self, lam=1, label=None):
"""Constructs an exponential Pdf with given parameter.
lam: rate parameter
label: string
"""
self.lam = lam
self.label = label if label is not None else '_nolegend_'
def __str__(self):
return 'ExponentialPdf(%f)' % (self.lam)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
low, high = 0, 5.0/self.lam
return np.linspace(low, high, 101)
def Density(self, xs):
"""Evaluates this Pdf at xs.
xs: scalar or sequence of floats
returns: float or NumPy array of probability density
"""
return scipy.stats.expon.pdf(xs, scale=1.0/self.lam)
class EstimatedPdf(Pdf):
"""Represents a PDF estimated by KDE."""
def __init__(self, sample, label=None):
"""Estimates the density function based on a sample.
sample: sequence of data
label: string
"""
self.label = label if label is not None else '_nolegend_'
self.kde = scipy.stats.gaussian_kde(sample)
low = min(sample)
high = max(sample)
self.linspace = np.linspace(low, high, 101)
def __str__(self):
return 'EstimatedPdf(label=%s)' % str(self.label)
def GetLinspace(self):
"""Get a linspace for plotting.
Returns: numpy array
"""
return self.linspace
def Density(self, xs):
"""Evaluates this Pdf at xs.
returns: float or NumPy array of probability density
"""
return self.kde.evaluate(xs)
def CredibleInterval(pmf, percentage=90):
"""Computes a credible interval for a given distribution.
If percentage=90, computes the 90% CI.
Args:
pmf: Pmf object representing a posterior distribution
percentage: float between 0 and 100
Returns:
sequence of two floats, low and high
"""
cdf = pmf.MakeCdf()
prob = (1 - percentage / 100.0) / 2
interval = cdf.Value(prob), cdf.Value(1 - prob)
return interval
def PmfProbLess(pmf1, pmf2):
"""Probability that a value from pmf1 is less than a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0.0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 < v2:
total += p1 * p2
return total
def PmfProbGreater(pmf1, pmf2):
"""Probability that a value from pmf1 is less than a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0.0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 > v2:
total += p1 * p2
return total
def PmfProbEqual(pmf1, pmf2):
"""Probability that a value from pmf1 equals a value from pmf2.
Args:
pmf1: Pmf object
pmf2: Pmf object
Returns:
float probability
"""
total = 0.0
for v1, p1 in pmf1.Items():
for v2, p2 in pmf2.Items():
if v1 == v2:
total += p1 * p2
return total
def RandomSum(dists):
"""Chooses a random value from each dist and returns the sum.
dists: sequence of Pmf or Cdf objects
returns: numerical sum
"""
total = sum(dist.Random() for dist in dists)
return total
def SampleSum(dists, n):
"""Draws a sample of sums from a list of distributions.
dists: sequence of Pmf or Cdf objects
n: sample size
returns: new Pmf of sums
"""
pmf = Pmf(RandomSum(dists) for i in range(n))
return pmf
def EvalNormalPdf(x, mu, sigma):
"""Computes the unnormalized PDF of the normal distribution.
x: value
mu: mean
sigma: standard deviation
returns: float probability density
"""
return scipy.stats.norm.pdf(x, mu, sigma)
def MakeNormalPmf(mu, sigma, num_sigmas, n=201):
"""Makes a PMF discrete approx to a Normal distribution.
mu: float mean
sigma: float standard deviation
num_sigmas: how many sigmas to extend in each direction
n: number of values in the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
low = mu - num_sigmas * sigma
high = mu + num_sigmas * sigma
for x in np.linspace(low, high, n):
p = EvalNormalPdf(x, mu, sigma)
pmf.Set(x, p)
pmf.Normalize()
return pmf
def EvalBinomialPmf(k, n, p):
"""Evaluates the binomial pmf.
Returns the probabily of k successes in n trials with probability p.
"""
return scipy.stats.binom.pmf(k, n, p)
def EvalPoissonPmf(k, lam):
"""Computes the Poisson PMF.
k: number of events
lam: parameter lambda in events per unit time
returns: float probability
"""
# don't use the scipy function (yet). for lam=0 it returns NaN;
# should be 0.0
# return scipy.stats.poisson.pmf(k, lam)
return lam ** k * math.exp(-lam) / math.factorial(k)
def MakePoissonPmf(lam, high, step=1):
"""Makes a PMF discrete approx to a Poisson distribution.
lam: parameter lambda in events per unit time
high: upper bound of the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
for k in range(0, high + 1, step):
p = EvalPoissonPmf(k, lam)
pmf.Set(k, p)
pmf.Normalize()
return pmf
def EvalExponentialPdf(x, lam):
"""Computes the exponential PDF.
x: value
lam: parameter lambda in events per unit time
returns: float probability density
"""
return lam * math.exp(-lam * x)
def EvalExponentialCdf(x, lam):
"""Evaluates CDF of the exponential distribution with parameter lam."""
return 1 - math.exp(-lam * x)
def MakeExponentialPmf(lam, high, n=200):
"""Makes a PMF discrete approx to an exponential distribution.
lam: parameter lambda in events per unit time
high: upper bound
n: number of values in the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
for x in np.linspace(0, high, n):
p = EvalExponentialPdf(x, lam)
pmf.Set(x, p)
pmf.Normalize()
return pmf
def StandardNormalCdf(x):
"""Evaluates the CDF of the standard Normal distribution.
See http://en.wikipedia.org/wiki/Normal_distribution
#Cumulative_distribution_function
Args:
x: float
Returns:
float
"""
return (math.erf(x / ROOT2) + 1) / 2
def EvalNormalCdf(x, mu=0, sigma=1):
"""Evaluates the CDF of the normal distribution.
Args:
x: float
mu: mean parameter
sigma: standard deviation parameter
Returns:
float
"""
return scipy.stats.norm.cdf(x, loc=mu, scale=sigma)
def EvalNormalCdfInverse(p, mu=0, sigma=1):
"""Evaluates the inverse CDF of the normal distribution.
See http://en.wikipedia.org/wiki/Normal_distribution#Quantile_function
Args:
p: float
mu: mean parameter
sigma: standard deviation parameter
Returns:
float
"""
return scipy.stats.norm.ppf(p, loc=mu, scale=sigma)
def EvalLognormalCdf(x, mu=0, sigma=1):
"""Evaluates the CDF of the lognormal distribution.
x: float or sequence
mu: mean parameter
sigma: standard deviation parameter
Returns: float or sequence
"""
return scipy.stats.lognorm.cdf(x, loc=mu, scale=sigma)
def RenderExpoCdf(lam, low, high, n=101):
"""Generates sequences of xs and ps for an exponential CDF.
lam: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
xs = np.linspace(low, high, n)
ps = 1 - np.exp(-lam * xs)
#ps = scipy.stats.expon.cdf(xs, scale=1.0/lam)
return xs, ps
def RenderNormalCdf(mu, sigma, low, high, n=101):
"""Generates sequences of xs and ps for a Normal CDF.
mu: parameter
sigma: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
xs = np.linspace(low, high, n)
ps = scipy.stats.norm.cdf(xs, mu, sigma)
return xs, ps
def RenderParetoCdf(xmin, alpha, low, high, n=50):
"""Generates sequences of xs and ps for a Pareto CDF.
xmin: parameter
alpha: parameter
low: float
high: float
n: number of points to render
returns: numpy arrays (xs, ps)
"""
if low < xmin:
low = xmin
xs = np.linspace(low, high, n)
ps = 1 - (xs / xmin) ** -alpha
#ps = scipy.stats.pareto.cdf(xs, scale=xmin, b=alpha)
return xs, ps
class Beta(object):
"""Represents a Beta distribution.
See http://en.wikipedia.org/wiki/Beta_distribution
"""
def __init__(self, alpha=1, beta=1, label=None):
"""Initializes a Beta distribution."""
self.alpha = alpha
self.beta = beta
self.label = label if label is not None else '_nolegend_'
def Update(self, data):
"""Updates a Beta distribution.
data: pair of int (heads, tails)
"""
heads, tails = data
self.alpha += heads
self.beta += tails
def Mean(self):
"""Computes the mean of this distribution."""
return self.alpha / (self.alpha + self.beta)
def Random(self):
"""Generates a random variate from this distribution."""
return random.betavariate(self.alpha, self.beta)
def Sample(self, n):
"""Generates a random sample from this distribution.
n: int sample size
"""
size = n,
return np.random.beta(self.alpha, self.beta, size)
def EvalPdf(self, x):
"""Evaluates the PDF at x."""
return x ** (self.alpha - 1) * (1 - x) ** (self.beta - 1)
def MakePmf(self, steps=101, label=None):
"""Returns a Pmf of this distribution.
Note: Normally, we just evaluate the PDF at a sequence
of points and treat the probability density as a probability
mass.
But if alpha or beta is less than one, we have to be
more careful because the PDF goes to infinity at x=0
and x=1. In that case we evaluate the CDF and compute
differences.
"""
if self.alpha < 1 or self.beta < 1:
cdf = self.MakeCdf()
pmf = cdf.MakePmf()
return pmf
xs = [i / (steps - 1.0) for i in range(steps)]
probs = [self.EvalPdf(x) for x in xs]
pmf = Pmf(dict(zip(xs, probs)), label=label)
return pmf
def MakeCdf(self, steps=101):
"""Returns the CDF of this distribution."""
xs = [i / (steps - 1.0) for i in range(steps)]
ps = [scipy.special.betainc(self.alpha, self.beta, x) for x in xs]
cdf = Cdf(xs, ps)
return cdf
class Dirichlet(object):
"""Represents a Dirichlet distribution.
See http://en.wikipedia.org/wiki/Dirichlet_distribution
"""
def __init__(self, n, conc=1, label=None):
"""Initializes a Dirichlet distribution.
n: number of dimensions
conc: concentration parameter (smaller yields more concentration)
label: string label
"""
if n < 2:
raise ValueError('A Dirichlet distribution with '
'n<2 makes no sense')
self.n = n
self.params = np.ones(n, dtype=np.float) * conc
self.label = label if label is not None else '_nolegend_'
def Update(self, data):
"""Updates a Dirichlet distribution.
data: sequence of observations, in order corresponding to params
"""
m = len(data)
self.params[:m] += data
def Random(self):
"""Generates a random variate from this distribution.
Returns: normalized vector of fractions
"""
p = np.random.gamma(self.params)
return p / p.sum()
def Likelihood(self, data):
"""Computes the likelihood of the data.
Selects a random vector of probabilities from this distribution.
Returns: float probability
"""
m = len(data)
if self.n < m:
return 0
x = data
p = self.Random()
q = p[:m] ** x
return q.prod()
def LogLikelihood(self, data):
"""Computes the log likelihood of the data.
Selects a random vector of probabilities from this distribution.
Returns: float log probability
"""
m = len(data)
if self.n < m:
return float('-inf')
x = self.Random()
y = np.log(x[:m]) * data
return y.sum()
def MarginalBeta(self, i):
"""Computes the marginal distribution of the ith element.
See http://en.wikipedia.org/wiki/Dirichlet_distribution
#Marginal_distributions
i: int
Returns: Beta object
"""
alpha0 = self.params.sum()
alpha = self.params[i]
return Beta(alpha, alpha0 - alpha)
def PredictivePmf(self, xs, label=None):
"""Makes a predictive distribution.
xs: values to go into the Pmf
Returns: Pmf that maps from x to the mean prevalence of x
"""
alpha0 = self.params.sum()
ps = self.params / alpha0
return Pmf(zip(xs, ps), label=label)
def BinomialCoef(n, k):
"""Compute the binomial coefficient "n choose k".
n: number of trials
k: number of successes
Returns: float
"""
return scipy.misc.comb(n, k)
def LogBinomialCoef(n, k):
"""Computes the log of the binomial coefficient.
http://math.stackexchange.com/questions/64716/
approximating-the-logarithm-of-the-binomial-coefficient
n: number of trials
k: number of successes
Returns: float
"""
return n * math.log(n) - k * math.log(k) - (n - k) * math.log(n - k)
def NormalProbability(ys, jitter=0.0):
"""Generates data for a normal probability plot.
ys: sequence of values
jitter: float magnitude of jitter added to the ys
returns: numpy arrays xs, ys
"""
n = len(ys)
xs = np.random.normal(0, 1, n)
xs.sort()
if jitter:
ys = Jitter(ys, jitter)
else:
ys = np.array(ys)
ys.sort()
return xs, ys
def Jitter(values, jitter=0.5):
"""Jitters the values by adding a uniform variate in (-jitter, jitter).
values: sequence
jitter: scalar magnitude of jitter
returns: new numpy array
"""
n = len(values)
return np.random.uniform(-jitter, +jitter, n) + values
def NormalProbabilityPlot(sample, label=None, fit_color='0.8'):
"""Makes a normal probability plot with a fitted line.
sample: sequence of numbers
label: string label for the data
fit_color: color string for the fitted line
"""
xs, ys = NormalProbability(sample)
mean, var = MeanVar(sample)
std = math.sqrt(var)
fit = FitLine(xs, mean, std)
thinkplot.Plot(*fit, color=fit_color, label='model')
xs, ys = NormalProbability(sample)
thinkplot.Plot(xs, ys, label=label)
def Mean(xs):
"""Computes mean.
xs: sequence of values
returns: float mean
"""
return np.mean(xs)
def Var(xs, mu=None, ddof=0):
"""Computes variance.
xs: sequence of values
mu: option known mean
ddof: delta degrees of freedom
returns: float
"""
xs = np.asarray(xs)
if mu is None:
mu = xs.mean()
ds = xs - mu
return np.dot(ds, ds) / (len(xs) - ddof)
def Std(xs, mu=None, ddof=0):
"""Computes standard deviation.
xs: sequence of values
mu: option known mean
ddof: delta degrees of freedom
returns: float
"""
var = Var(xs, mu, ddof)
return math.sqrt(var)
def MeanVar(xs, ddof=0):
"""Computes mean and variance.
Based on http://stackoverflow.com/questions/19391149/
numpy-mean-and-variance-from-single-function
xs: sequence of values
ddof: delta degrees of freedom
returns: pair of float, mean and var
"""
xs = np.asarray(xs)
mean = xs.mean()
s2 = Var(xs, mean, ddof)
return mean, s2
def Trim(t, p=0.01):
"""Trims the largest and smallest elements of t.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
sequence of values
"""
n = int(p * len(t))
t = sorted(t)[n:-n]
return t
def TrimmedMean(t, p=0.01):
"""Computes the trimmed mean of a sequence of numbers.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
float
"""
t = Trim(t, p)
return Mean(t)
def TrimmedMeanVar(t, p=0.01):
"""Computes the trimmed mean and variance of a sequence of numbers.
Side effect: sorts the list.
Args:
t: sequence of numbers
p: fraction of values to trim off each end
Returns:
float
"""
t = Trim(t, p)
mu, var = MeanVar(t)
return mu, var
def CohenEffectSize(group1, group2):
"""Compute Cohen's d.
group1: Series or NumPy array
group2: Series or NumPy array
returns: float
"""
diff = group1.mean() - group2.mean()
n1, n2 = len(group1), len(group2)
var1 = group1.var()
var2 = group2.var()
pooled_var = (n1 * var1 + n2 * var2) / (n1 + n2)
d = diff / math.sqrt(pooled_var)
return d
def Cov(xs, ys, meanx=None, meany=None):
"""Computes Cov(X, Y).
Args:
xs: sequence of values
ys: sequence of values
meanx: optional float mean of xs
meany: optional float mean of ys
Returns:
Cov(X, Y)
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
if meanx is None:
meanx = np.mean(xs)
if meany is None:
meany = np.mean(ys)
cov = np.dot(xs-meanx, ys-meany) / len(xs)
return cov
def Corr(xs, ys):
"""Computes Corr(X, Y).
Args:
xs: sequence of values
ys: sequence of values
Returns:
Corr(X, Y)
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
meanx, varx = MeanVar(xs)
meany, vary = MeanVar(ys)
corr = Cov(xs, ys, meanx, meany) / math.sqrt(varx * vary)
return corr
def SerialCorr(series, lag=1):
"""Computes the serial correlation of a series.
series: Series
lag: integer number of intervals to shift
returns: float correlation
"""
xs = series[lag:]
ys = series.shift(lag)[lag:]
corr = Corr(xs, ys)
return corr
def SpearmanCorr(xs, ys):
"""Computes Spearman's rank correlation.
Args:
xs: sequence of values
ys: sequence of values
Returns:
float Spearman's correlation
"""
xranks = pandas.Series(xs).rank()
yranks = pandas.Series(ys).rank()
return Corr(xranks, yranks)
def MapToRanks(t):
"""Returns a list of ranks corresponding to the elements in t.
Args:
t: sequence of numbers
Returns:
list of integer ranks, starting at 1
"""
# pair up each value with its index
pairs = enumerate(t)
# sort by value
sorted_pairs = sorted(pairs, key=itemgetter(1))
# pair up each pair with its rank
ranked = enumerate(sorted_pairs)
# sort by index
resorted = sorted(ranked, key=lambda trip: trip[1][0])
# extract the ranks
ranks = [trip[0]+1 for trip in resorted]
return ranks
def LeastSquares(xs, ys):
"""Computes a linear least squares fit for ys as a function of xs.
Args:
xs: sequence of values
ys: sequence of values
Returns:
tuple of (intercept, slope)
"""
meanx, varx = MeanVar(xs)
meany = Mean(ys)
slope = Cov(xs, ys, meanx, meany) / varx
inter = meany - slope * meanx
return inter, slope
def FitLine(xs, inter, slope):
"""Fits a line to the given data.
xs: sequence of x
returns: tuple of numpy arrays (sorted xs, fit ys)
"""
fit_xs = np.sort(xs)
fit_ys = inter + slope * fit_xs
return fit_xs, fit_ys
def Residuals(xs, ys, inter, slope):
"""Computes residuals for a linear fit with parameters inter and slope.
Args:
xs: independent variable
ys: dependent variable
inter: float intercept
slope: float slope
Returns:
list of residuals
"""
xs = np.asarray(xs)
ys = np.asarray(ys)
res = ys - (inter + slope * xs)
return res
def CoefDetermination(ys, res):
"""Computes the coefficient of determination (R^2) for given residuals.
Args:
ys: dependent variable
res: residuals
Returns:
float coefficient of determination
"""
return 1 - Var(res) / Var(ys)
def CorrelatedGenerator(rho):
"""Generates standard normal variates with serial correlation.
rho: target coefficient of correlation
Returns: iterable
"""
x = random.gauss(0, 1)
yield x
sigma = math.sqrt(1 - rho**2)
while True:
x = random.gauss(x * rho, sigma)
yield x
def CorrelatedNormalGenerator(mu, sigma, rho):
"""Generates normal variates with serial correlation.
mu: mean of variate
sigma: standard deviation of variate
rho: target coefficient of correlation
Returns: iterable
"""
for x in CorrelatedGenerator(rho):
yield x * sigma + mu
def RawMoment(xs, k):
"""Computes the kth raw moment of xs.
"""
return sum(x**k for x in xs) / len(xs)
def CentralMoment(xs, k):
"""Computes the kth central moment of xs.
"""
mean = RawMoment(xs, 1)
return sum((x - mean)**k for x in xs) / len(xs)
def StandardizedMoment(xs, k):
"""Computes the kth standardized moment of xs.
"""
var = CentralMoment(xs, 2)
std = math.sqrt(var)
return CentralMoment(xs, k) / std**k
def Skewness(xs):
"""Computes skewness.
"""
return StandardizedMoment(xs, 3)
def Median(xs):
"""Computes the median (50th percentile) of a sequence.
xs: sequence or anything else that can initialize a Cdf
returns: float
"""
cdf = Cdf(xs)
return cdf.Value(0.5)
def IQR(xs):
"""Computes the interquartile of a sequence.
xs: sequence or anything else that can initialize a Cdf
returns: pair of floats
"""
cdf = Cdf(xs)
return cdf.Value(0.25), cdf.Value(0.75)
def PearsonMedianSkewness(xs):
"""Computes the Pearson median skewness.
"""
median = Median(xs)
mean = RawMoment(xs, 1)
var = CentralMoment(xs, 2)
std = math.sqrt(var)
gp = 3 * (mean - median) / std
return gp
class FixedWidthVariables(object):
"""Represents a set of variables in a fixed width file."""
def __init__(self, variables, index_base=0):
"""Initializes.
variables: DataFrame
index_base: are the indices 0 or 1 based?
Attributes:
colspecs: list of (start, end) index tuples
names: list of string variable names
"""
self.variables = variables
# note: by default, subtract 1 from colspecs
self.colspecs = variables[['start', 'end']] - index_base
# convert colspecs to a list of pair of int
self.colspecs = self.colspecs.astype(np.int).values.tolist()
self.names = variables['name']
def ReadFixedWidth(self, filename, **options):
"""Reads a fixed width ASCII file.
filename: string filename
returns: DataFrame
"""
df = pandas.read_fwf(filename,
colspecs=self.colspecs,
names=self.names,
**options)
return df
def ReadStataDct(dct_file, **options):
"""Reads a Stata dictionary file.
dct_file: string filename
options: dict of options passed to open()
returns: FixedWidthVariables object
"""
type_map = dict(byte=int, int=int, long=int, float=float, double=float)
var_info = []
for line in open(dct_file, **options):
match = re.search( r'_column\(([^)]*)\)', line)
if match:
start = int(match.group(1))
t = line.split()
vtype, name, fstring = t[1:4]
name = name.lower()
if vtype.startswith('str'):
vtype = str
else:
vtype = type_map[vtype]
long_desc = ' '.join(t[4:]).strip('"')
var_info.append((start, vtype, name, fstring, long_desc))
columns = ['start', 'type', 'name', 'fstring', 'desc']
variables = pandas.DataFrame(var_info, columns=columns)
# fill in the end column by shifting the start column
variables['end'] = variables.start.shift(-1)
variables['end'][len(variables)-1] = 0
dct = FixedWidthVariables(variables, index_base=1)
return dct
def Resample(xs, n=None):
"""Draw a sample from xs with the same length as xs.
xs: sequence
n: sample size (default: len(xs))
returns: NumPy array
"""
if n is None:
n = len(xs)
return np.random.choice(xs, n, replace=True)
def SampleRows(df, nrows, replace=False):
"""Choose a sample of rows from a DataFrame.
df: DataFrame
nrows: number of rows
replace: whether to sample with replacement
returns: DataDf
"""
indices = np.random.choice(df.index, nrows, replace=replace)
sample = df.loc[indices]
return sample
def ResampleRows(df):
"""Resamples rows from a DataFrame.
df: DataFrame
returns: DataFrame
"""
return SampleRows(df, len(df), replace=True)
def ResampleRowsWeighted(df, column='finalwgt'):
"""Resamples a DataFrame using probabilities proportional to given column.
df: DataFrame
column: string column name to use as weights
returns: DataFrame
"""
weights = df[column]
cdf = Pmf(weights.iteritems()).MakeCdf()
indices = cdf.Sample(len(weights))
sample = df.loc[indices]
return sample
def PercentileRow(array, p):
"""Selects the row from a sorted array that maps to percentile p.
p: float 0--100
returns: NumPy array (one row)
"""
rows, cols = array.shape
index = int(rows * p / 100)
return array[index,]
def PercentileRows(ys_seq, percents):
"""Selects rows from a sequence that map to percentiles.
ys_seq: sequence of unsorted rows
percents: list of percentiles (0-100) to select
returns: list of NumPy arrays
"""
nrows = len(ys_seq)
ncols = len(ys_seq[0])
array = np.zeros((nrows, ncols))
for i, ys in enumerate(ys_seq):
array[i,] = ys
array = np.sort(array, axis=0)
rows = [PercentileRow(array, p) for p in percents]
return rows
def Smooth(xs, sigma=2, **options):
"""Smooths a NumPy array with a Gaussian filter.
xs: sequence
sigma: standard deviation of the filter
"""
return scipy.ndimage.filters.gaussian_filter1d(xs, sigma, **options)
class HypothesisTest(object):
"""Represents a hypothesis test."""
def __init__(self, data):
"""Initializes.
data: data in whatever form is relevant
"""
self.data = data
self.MakeModel()
self.actual = self.TestStatistic(data)
self.test_stats = None
self.test_cdf = None
def PValue(self, iters=1000):
"""Computes the distribution of the test statistic and p-value.
iters: number of iterations
returns: float p-value
"""
self.test_stats = [self.TestStatistic(self.RunModel())
for _ in range(iters)]
self.test_cdf = Cdf(self.test_stats)
count = sum(1 for x in self.test_stats if x >= self.actual)
return count / iters
def MaxTestStat(self):
"""Returns the largest test statistic seen during simulations.
"""
return max(self.test_stats)
def PlotCdf(self, label=None):
"""Draws a Cdf with vertical lines at the observed test stat.
"""
def VertLine(x):
"""Draws a vertical line at x."""
thinkplot.Plot([x, x], [0, 1], color='0.8')
VertLine(self.actual)
thinkplot.Cdf(self.test_cdf, label=label)
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
raise UnimplementedMethodException()
def MakeModel(self):
"""Build a model of the null hypothesis.
"""
pass
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
raise UnimplementedMethodException()
def main():
pass
if __name__ == '__main__':
main()
|
mit
|
aitoralmeida/morelab-coauthor-analyzer
|
NetworkAnalyzer.py
|
1
|
5061
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 05 16:42:59 2013
@author: aitor
"""
import networkx as nx
import matplotlib.pylab as plt
from collections import OrderedDict
import csv
verbose = True
def get_graph(path):
fh = open(path, 'rb')
G = nx.read_edgelist(fh)
fh.close()
#remove posible self loops
G.remove_edges_from(G.selfloop_edges())
return G
def write_csv_centralities(file_name, data):
with open(file_name, 'wb') as csvfile:
writer = csv.writer(csvfile, delimiter=';')
for d in data:
if verbose:
print "%s: %s" % (d, data[d])
writer.writerow([d, data[d]])
def write_csv_groups(file_name, groups):
with open(file_name, 'wb') as csvfile:
writer = csv.writer(csvfile, delimiter=';')
for e in groups:
if verbose:
print e
writer.writerow(e)
def write_csv_group(file_name, group):
with open(file_name, 'wb') as csvfile:
writer = csv.writer(csvfile, delimiter=';')
writer.writerow(group)
def get_graph_info(G):
node_num = G.number_of_nodes()
edge_num = G.number_of_edges()
nodes = G.nodes()
if verbose:
print "The loaded network has %s nodes and %s edges\r\n" % (node_num, edge_num)
print "The nodes of the network are:"
for n in nodes:
print n
with open('./data/results/networkInfo.csv', 'wb') as csvfile:
writer = csv.writer(csvfile, delimiter=';')
writer.writerow(['nodes',node_num])
writer.writerow(['edges',edge_num])
def draw_graph(G):
nx.draw(G)
plt.savefig("./images/simpleNetwork.png")
if verbose:
plt.show()
#**********CENTRALITIES***********
def calculate_degree_centrality(G):
cent_degree = nx.degree_centrality(G)
sorted_cent_degree = OrderedDict(sorted(cent_degree.items(), key=lambda t: t[1], reverse=True))
if verbose:
print "\n\r*** Degree Centrality ***"
write_csv_centralities('./data/results/degreeCent.csv', sorted_cent_degree)
def calculate_betweenness_centrality(G):
cent_betweenness = nx.betweenness_centrality(G)
sorted_cent_betweenness = OrderedDict(sorted(cent_betweenness.items(), key=lambda t: t[1], reverse=True))
if verbose:
print "\n\r*** Betweenness Centrality ***"
write_csv_centralities('./data/results/betweennessCent.csv', sorted_cent_betweenness)
def calculate_closeness_centrality(G):
cent_closeness = nx.closeness_centrality(G)
sorted_cent_closeness = OrderedDict(sorted(cent_closeness.items(), key=lambda t: t[1], reverse=True))
if verbose:
print "\n\r*** Closeness Centrality ***"
write_csv_centralities('./data/results/closenessCent.csv', sorted_cent_closeness)
def calculate_eigenvector_centrality(G):
cent_eigenvector = nx.eigenvector_centrality(G)
sorted_cent_eigenvector = OrderedDict(sorted(cent_eigenvector.items(), key=lambda t: t[1], reverse=True))
if verbose:
print "\n\r*** Eigenvector Centrality ***"
write_csv_centralities('./data/results/eigenvectorCent.csv', sorted_cent_eigenvector)
def calculate_pagerank(G):
page_rank = nx.pagerank(G)
sorted_page_rank = OrderedDict(sorted(page_rank.items(), key=lambda t: t[1], reverse=True))
if verbose:
print "\n\r*** PageRank ***"
write_csv_centralities('./data/results/pagerank.csv', sorted_page_rank)
#**********COMMUNITIES***********
def calculate_cliques(G):
cliques = list(nx.find_cliques(G))
if verbose:
print "\n\r*** Cliques ***"
write_csv_groups('./data/results/cliques.csv', cliques)
def calculate_main_k_core(G):
core_main = nx.k_core(G)
nx.draw(core_main)
plt.savefig("./images/kCoreMain.png")
if verbose:
print "\r\nk-Core: Main"
print core_main.nodes()
plt.show()
write_csv_group('./data/results/mainKCore.csv', core_main.nodes())
def calculate_k_core(G, K):
core_k = nx.k_core(G, k=K)
nx.draw(core_k)
plt.savefig("./images/kCore" + str(K) + ".png")
if verbose:
print "\r\nk-Core: " + str(K)
print core_k.nodes()
plt.show()
write_csv_group('./data/results/kCore' + str(K) + '.csv', core_k.nodes())
def calculate_k_clique(G, K):
communities = nx.k_clique_communities(G, K)
if verbose:
print "k-cliques " + str(K)
write_csv_groups('./data/results/kClique' + str(K) + '.csv', communities)
if __name__ == '__main__':
print 'Analizing co-author data from ' + "./data/coauthors.txt"
G = get_graph("./data/coauthors.txt")
get_graph_info(G)
draw_graph(G)
#centralities
calculate_degree_centrality(G)
calculate_betweenness_centrality(G)
calculate_closeness_centrality(G)
calculate_closeness_centrality(G)
calculate_pagerank(G)
#communities
calculate_cliques(G)
calculate_main_k_core(G)
calculate_k_core(G,4)
#calculate_k_clique(G, 2)
|
apache-2.0
|
planet-os/notebooks
|
api-examples/dh_py_access/lib/dataset.py
|
1
|
4533
|
# -*- coding: utf-8 -*-
import requests
import pandas as pd
from netCDF4 import Dataset
# from lib.parse_urls import parse_urls
from . parse_urls import parse_urls
class dataset:
def __init__(self,datasetkey,datahub):
self.datasetkey = datasetkey
self.datahub=datahub
def variables(self):
variables=parse_urls(self.datahub.server,self.datahub.version,"datasets/"+self.datasetkey+"/variables",self.datahub.apikey)
return variables.r.json()['variables']
def variable_names(self):
return sorted(list(set(list(map(lambda x: x['variableKey'], self.variables())))))
def standard_names(self):
"""
return list of standard names of variables
"""
return self.return_names('standard_name')
def return_names(self,nameversion):
"""
return list of variables by name type
"""
stdnames=[]
for k in self.variables():
for j in k:
if j == 'attributes':
for i in k[j]:
if i['attributeKey']==nameversion:
stdnames.append(i['attributeValue'])
return sorted(list(set(stdnames)))
def get_standard_name_from_variable_name(self,varname):
for i in self.variables():
if i['variableKey'] == varname:
for j in i['attributes']:
if j['attributeKey']=='long_name':
return j['attributeValue']
def long_names(self):
"""
return list of long names of variables
"""
return self.return_names('long_name')
def get_tds_file(self,variable):
"""
Until something better found ...
return first file tds path that contains variable name, should work with either standard or long name!
"""
tdaddr="http://{0}/{1}/data/dataset_physical_contents/{2}?apikey={3}".format(self.datahub.server,self.datahub.version,self.datasetkey,self.datahub.apikey)
r=requests.get(tdaddr).json()
for htt in r:
found_vars=[j for j in htt['variables'] for i in j if j[i]==variable]
if len(found_vars)>0:
return htt['planetosOpenDAPVariables']
def get_tds_field(self,variable):
stdname=self.get_standard_name_from_variable_name(variable)
if not stdname:
stdname=variable
if len(stdname)==0:
stdname=variable
## print("stdname in get_field",stdname)
tdsfile=self.get_tds_file(variable)
assert len(tdsfile)>10, "could not determine TDS path, cannot continue"
## print('TDS file',tdsfile)
ds = Dataset(tdsfile)
vari = ds.variables[variable]
dimlen = len(vari.dimensions)
if dimlen==4:
return vari[0,0,:,:]
elif dimlen==3:
return vari[0,:,:]
elif dimlen==2:
return vari[:,:]
else:
return vari[:]
## raise ValueError("Cannot return 2D array for {0}".format(variable))
def get_json_data_in_pandas(self,count=10,z='all',pandas=True,**kwargs):
def convert_json_to_some_pandas(injson):
param_list = ['axes','data']
new_dict = {}
[new_dict.update({i:[]}) for i in param_list]
[(new_dict['axes'].append(i['axes']),new_dict['data'].append(i['data'])) for i in injson];
pd_temp = pd.DataFrame(injson)
dev_frame = pd_temp[['context','axes']].join(pd.concat([pd.DataFrame(new_dict[i]) for i in param_list],axis=1))
dev_frame = dev_frame[dev_frame['reftime'] == dev_frame['reftime'][0]]
return dev_frame
if not 'count' in kwargs:
kwargs['count'] = count
if not 'z' in kwargs:
kwargs["z"]=z
retjson=parse_urls(self.datahub.server,self.datahub.version,"datasets/{0}/point".format(self.datasetkey),self.datahub.apikey,clean_reftime=False,**kwargs).r.json()
if pandas: retjson=convert_json_to_some_pandas(retjson['entries'])
return retjson
def get_dataset_boundaries(self):
boundaries=parse_urls(self.datahub.server,self.datahub.version,"datasets/"+self.datasetkey,self.datahub.apikey)
rj = boundaries.r.json()['SpatialExtent']
if rj['type'] == 'Polygon':
rdict = rj['coordinates'][0]
elif rj['type'] == 'MultiPolygon':
rdict = rj['coordinates'][0][0]
else:
rdict = rj
return rdict
|
mit
|
phbradley/tcr-dist
|
tcrdist/objects.py
|
1
|
2186
|
import numpy as np
import pandas as pd
from . import translation
class DotDict(dict):
def __getattr__(self, item):
if item in self:
return self[item]
raise AttributeError
def __setattr__(self, key, value):
if key in self:
self[key] = value
return
raise AttributeError
def __str__(self):
return pd.Series(self).to_string()
def to_series(self):
return pd.Series(self)
class TCRClone(DotDict):
"""Object that contains all info for a single TCR clone.
As a DotDict attributes can be accessed using dot notation
or standard dict key access."""
alphaAA = ''
betaAA = ''
gammaAA = ''
deltaAA = ''
subjid = ''
cloneid = ''
epitope = ''
def __init__(self, chain1, chain2, **kwargs):
for k in chain1.keys():
self[k] = chain1[k]
for k in chain2.keys():
self[k] = chain2[k]
for k in kwargs.keys():
self[k] = kwargs[k]
class TCRChain(DotDict):
def __init__(self, **kwargs):
for k in kwargs.keys():
self[k] = kwargs[k]
class TCR_Gene:
cdrs_sep = ';'
gap_character = '.'
def __init__( self, l):
self.id = l['id']
self.organism = l['organism']
self.chain = l['chain']
self.region = l['region']
self.nucseq = l['nucseq']
self.alseq = l['aligned_protseq']
if pd.isnull(l['cdrs']):
self.cdrs = []
self.cdr_columns = []
else:
self.cdrs = l['cdrs'].split(self.cdrs_sep)
## these are still 1-indexed !!!!!!!!!!!!!!
self.cdr_columns = [ map( int, x.split('-')) for x in l['cdr_columns'].split(self.cdrs_sep) ]
frame = l['frame']
assert frame in [1, 2, 3]
self.nucseq_offset = frame - 1 ## 0, 1 or 2 (0-indexed for python)
self.protseq = translation.get_translation( self.nucseq, frame )[0]
assert self.protseq == self.alseq.replace(self.gap_character,'')
# sanity check
if self.cdrs:
assert self.cdrs == [ self.alseq[ x[0]-1 : x[1] ] for x in self.cdr_columns ]
|
mit
|
benitesf/Skin-Lesion-Analysis-Towards-Melanoma-Detection
|
test/gabor/gabor_image_analysis.py
|
1
|
2258
|
import numpy as np
import sys, os
import matplotlib.pyplot as plt
from skimage import io
from skimage.color import rgb2gray
from sklearn import preprocessing
from gabor_filter_banks import gabor_bank
def plot_surface2d(Z):
plt.imshow(Z, cmap='Greys')
#plt.imshow(Z)
#plt.gca().invert_yaxis()
plt.show()
def plot_image_convolved(images, nrows, ncols, figsize=(14,8)):
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=figsize)
plt.gray()
fig.suptitle('Imágenes filtradas con el banco de filtros de gabor', fontsize=12)
for image, ax in zip(images, fig.axes):
ax.imshow(image, interpolation='nearest')
ax.axis('off')
plt.show()
"""
Análisis de frecuencias de imágenes con lesión
----------------------------------------------
El objetivo es hacer un estudio sobre el espectro de frecuencias en las zonas de lesión.
Para ello, se tomarán las imágenes con melanoma y se aislarán las zonas con lesión y se utilizará la transformada
de Fourier para su estudio.
Este análisis servirá para definir con mayor exactidud la frecuencia del banco de filtros de Gabor.
"""
from scipy.misc import imread
from scipy import fftpack
import sys, os
sys.path.append("/home/mrobot/Documentos/TFG/code/Skin-Lesion-Analysis-Towards-Melanoma-Detection/")
os.chdir("/home/mrobot/Documentos/TFG/code/Skin-Lesion-Analysis-Towards-Melanoma-Detection/")
melanoma_path = 'image/ISIC-2017_Training_Data_Clean/'
ground_path = 'image/ISIC-2017_Training_Part1_GroundTruth_Clean/'
image = imread(melanoma_path + 'ISIC_0000013.jpg', mode='F')
ground = imread(ground_path + 'ISIC_0000013_segmentation.png', mode='F')
ground /= 255
lesion = image*ground
F1 = fftpack.fft2(lesion)
# Now shift so that low spatial frequencies are in the center.
F2 = fftpack.fftshift(F1)
# the 2D power spectrum is:
psd2D = np.abs(F2)
mms = preprocessing.MinMaxScaler()
filtered = mms.fit_transform(psd2D)
filtered[filtered>0.5] = 1
io.imshow(filtered, cmap='gray')
io.show()
"""
----------------------------------------------
"""
"""
Para mejor visualización
------------------------
mms = preprocessing.MinMaxScaler()
filtered = mms.fit_transform(filtered)
plot_surface2d(filtered)
# io.imshow(filtered)
# io.show()
"""
|
mit
|
RDCEP/climate_emulator
|
emulator/__init__.py
|
1
|
5958
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from data.co2 import co2
from params.geopolitical import PARAMS
from data.regions import GEO_POL, OCEANS
class EmulatorData(object):
def __init__(self, model, start_year=2005, end_year=2100):
self.start_year = start_year
self.end_year = end_year
self.T = end_year - start_year + 1
self.indexes0 = np.arange(self.T)
self.indexes1 = self.indexes0 + 1
self.model = model
self.co2 = co2
self._all_regions = PARAMS
# self._global_regions = GLOBAL_REGIONS
self._region_info = dict(GEO_POL, **OCEANS)
@property
def models(self):
return [k for k, v in PARAMS.iteritems()]
class Emulator(EmulatorData):
def __init__(self, model='CCSM4', rcp='RCP26'):
super(Emulator, self).__init__(model)
self.eta = np.zeros(self.T)
self.rcp = rcp
self.CO2 = self.co2[self.rcp]
self.logCO2 = np.log(self.CO2 / 1e+6)
self.logCO2pi = -8.1879
self.temp = 'relative'
self._regions = self._all_regions[self.model].\
drop(['GMT', 'GLL', 'GLO'], axis=1)
@property
def regions(self):
return self._regions
@regions.setter
def regions(self, value):
self._regions = value
def set_rcp(self, rcp):
self.rcp = rcp
self.CO2 = self.co2[self.rcp]
self.logCO2 = np.log(self.CO2 / 10.**6)
def summation(self, t):
"""
Calculate the summation in the third term of the equation.
"""
# beta_0 + beta_1 * (1 - rho) * sum(rho ** i * log(co2/co2pi))
l = len(self.regions.columns)
L = (l, 1)
exponent = np.empty(t + 1)
coefficient = np.empty(t + 1)
exponent[:] = np.arange(0, t + 1)
coefficient[:] = self.logCO2.iloc[t::-1] - self.logCO2pi
rho = self.regions.loc['rho'].values.reshape(L)
return np.sum(rho ** exponent * coefficient, axis=1)
def error(self, t):
"""
Calculate error (eta).
"""
if t > 0:
self.eta[t] = self.regions.loc['phi'] * self.eta[t-1] + .000001
return self.eta[t]
def step(self, t):
"""
Calculate value for a single year of the matrix.
"""
return self.regions.loc['beta0'] + (
self.regions.loc['beta1'] * (1 - self.regions.loc['rho']) *
self.summation(t)) + self.error(t)
def curve(self):
self.eta = np.zeros((len(self.co2), len(self.regions.columns)))
years = np.linspace(2005, 2100, 96)
data = pd.DataFrame(index=years,
columns=self.regions.columns, dtype=np.float64)
for i in xrange(len(self.co2)):
data.iloc[i] = self.step(i)
return data
def write_rcp_input(self):
output = []
for co2 in self.co2:
output.append(np.array(self.co2[co2]).tolist())
return output
def get_mean_rcp_output(self, co2=False, rcp=None, temp=None, region='GMT'):
_input = None
if rcp is not None:
self.set_rcp(rcp)
if temp is not None:
self.temp = temp
else:
rcp = 'CUSTOM'
self.co2[rcp] = pd.Series(np.array(co2), index=self.co2.index)
self.CO2 = self.co2[rcp]
self.logCO2 = np.log(self.CO2 / 10.**6)
_input = co2
data = dict(data=list()) # {'data': []}
data['input'] = _input
j = 0
for model in self.models:
self.regions = self._all_regions[model].\
loc[:, ('GMT', 'GLL', 'GLO')]
d = self.curve()
if self.temp == 'absolute':
_t = d.loc[:, region] - \
self.regions.loc['model_mean', region] + \
self.regions.loc['multi_model_mean', region]
_t = np.around(_t - 273.15, decimals=2).tolist()
else:
_t = np.around(
d.loc[:, region] - d.loc[2005, region], decimals=2).tolist()
data['data'].append({
'abbr': model,
'name': model,
'data': _t,
'temp_type': temp,
})
j += 1
return data
def get_model_rcp_output(self, co2=False, model=None, rcp=None, temp=None):
_input = None
#TODO: These ifs are shit.
if model is not None:
self.model = model
self.regions = self._all_regions[self.model].\
drop(['GMT', 'GLL', 'GLO'], axis=1)
if rcp is not None:
self.set_rcp(rcp)
if temp is not None:
self.temp = temp
else:
rcp = 'CUSTOM'
self.co2[rcp] = pd.Series(np.array(co2), index=self.co2.index)
self.CO2 = self.co2[rcp]
self.logCO2 = np.log(self.CO2 / 10.**6)
_input = co2
data = dict(data=[], input=_input)
d = self.curve()
j = 0
for region in d.columns:
if self.temp == 'absolute':
_t = d.loc[:, region] - \
self.regions.loc['model_mean', region] + \
self.regions.loc['multi_model_mean', region]
_t = np.around(_t - 273.15, decimals=2).tolist()
else:
_t = np.around(
d.loc[:, region] - d.loc[2005, region], decimals=2).tolist()
data['data'].append({
'abbr': region,
'data': _t,
'temp_type': temp,
'name': self._region_info[region]['name'],
'class': self._region_info[region]['class'],
})
j += 1
return data
if __name__ == '__main__':
pass
# import cProfile
# cProfile.run('foo()')
# e = Emulator()
# print e.curve()
|
gpl-3.0
|
treycausey/scikit-learn
|
sklearn/metrics/pairwise.py
|
1
|
42965
|
# -*- coding: utf-8 -*-
"""
The :mod:`sklearn.metrics.pairwise` submodule implements utilities to evaluate
pairwise distances, paired distances or affinity of sets of samples.
This module contains both distance metrics and kernels. A brief summary is
given on the two here.
Distance metrics are a function d(a, b) such that d(a, b) < d(a, c) if objects
a and b are considered "more similar" to objects a and c. Two objects exactly
alike would have a distance of zero.
One of the most popular examples is Euclidean distance.
To be a 'true' metric, it must obey the following four conditions::
1. d(a, b) >= 0, for all a and b
2. d(a, b) == 0, if and only if a = b, positive definiteness
3. d(a, b) == d(b, a), symmetry
4. d(a, c) <= d(a, b) + d(b, c), the triangle inequality
Kernels are measures of similarity, i.e. ``s(a, b) > s(a, c)``
if objects ``a`` and ``b`` are considered "more similar" to objects
``a`` and ``c``. A kernel must also be positive semi-definite.
There are a number of ways to convert between a distance metric and a
similarity measure, such as a kernel. Let D be the distance, and S be the
kernel:
1. ``S = np.exp(-D * gamma)``, where one heuristic for choosing
``gamma`` is ``1 / num_features``
2. ``S = 1. / (D / np.max(D))``
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Philippe Gervais <philippe.gervais@inria.fr>
# Lars Buitinck <larsmans@gmail.com>
# License: BSD 3 clause
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import array2d, atleast2d_or_csr
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils import safe_asarray
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def check_pairwise_arrays(X, Y):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
if Y is X or Y is None:
X = Y = atleast2d_or_csr(X)
else:
X = atleast2d_or_csr(X)
Y = atleast2d_or_csr(Y)
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
if not (X.dtype == Y.dtype == np.float32):
if Y is X:
X = Y = safe_asarray(X, dtype=np.float)
else:
X = safe_asarray(X, dtype=np.float)
Y = safe_asarray(Y, dtype=np.float)
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if x varies but y remains unchanged, then the right-most dot
product `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
# should not need X_norm_squared because if you could precompute that as
# well as Y, then you should just pre-compute the output and not even
# call this function.
X, Y = check_pairwise_arrays(X, Y)
if Y_norm_squared is not None:
YY = array2d(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
XX = YY.T
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x] = np.where(
flags, min_indices + chunk_y.start, indices[chunk_x])
values[chunk_x] = np.where(
flags, min_values, values[chunk_x])
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs={}):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
==========
X, Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
Returns
=======
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
========
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Avoid creating temporary matrices bigger than size_threshold (in
bytes). If the problem size gets too big, the implementation then
breaks it down in smaller problems.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances(3, 3)#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances(3, 2)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances(2, 3)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
temporary_size = X.size * Y.shape[-1]
# Convert to bytes
temporary_size *= X.itemsize
if temporary_size > size_threshold and sum_over_features:
# Broadcasting the full thing would be too big: it's on the order
# of magnitude of the gigabyte
D = np.empty((X.shape[0], Y.shape[0]), dtype=X.dtype)
index = 0
increment = 1 + int(size_threshold / float(temporary_size) *
X.shape[0])
while index < X.shape[0]:
this_slice = slice(index, index + increment)
tmp = X[this_slice, np.newaxis, :] - Y[np.newaxis, :, :]
tmp = np.abs(tmp, tmp)
tmp = np.sum(tmp, axis=2)
D[this_slice] = tmp
index += increment
else:
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
if sum_over_features:
D = np.sum(D, axis=2)
else:
D = D.reshape((-1, X.shape[1]))
return D
def cosine_distances(X, Y=None):
"""
Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return np.sqrt(((X - Y) ** 2).sum(axis=-1))
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return np.abs(X - Y).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
X_normalized = normalize(X, copy=True)
X_normalized -= normalize(Y, copy=True)
return .5 * (X_normalized ** 2).sum(axis=-1)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances,
}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Parameters
----------
X, Y : ndarray (n_samples, n_features)
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
degree : int
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = linear_kernel(X, Y)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
degree : int
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = linear_kernel(X, Y)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = linear_kernel(X_normalized, Y_normalized)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances, }
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
ret = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(func)(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Please note that support for sparse matrices is currently limited to
'euclidean', 'l2' and 'cosine'.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
if n_jobs == 1:
return func(X, Y, **kwds)
else:
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
elif callable(metric):
# Check matrices first (this is usually done by the metric).
X, Y = check_pairwise_arrays(X, Y)
n_x, n_y = X.shape[0], Y.shape[0]
# Calculate distance for each element in X and Y.
# FIXME: can use n_jobs here too
# FIXME: np.zeros can be replaced by np.empty
D = np.zeros((n_x, n_y), dtype='float')
for i in range(n_x):
start = 0
if X is Y:
start = i
for j in range(start, n_y):
# distance assumed to be symmetric.
D[i][j] = metric(X[i], Y[j], **kwds)
if X is Y:
D[j][i] = D[i][j]
return D
else:
# Note: the distance module doesn't support sparse matrices!
if type(X) is csr_matrix:
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
if Y is None:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
else:
if type(Y) is csr_matrix:
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
return distance.cdist(X, Y, metric=metric, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
return X
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
if n_jobs == 1:
return func(X, Y, **kwds)
else:
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
elif callable(metric):
# Check matrices first (this is usually done by the metric).
X, Y = check_pairwise_arrays(X, Y)
n_x, n_y = X.shape[0], Y.shape[0]
# Calculate kernel for each element in X and Y.
K = np.zeros((n_x, n_y), dtype='float')
for i in range(n_x):
start = 0
if X is Y:
start = i
for j in range(start, n_y):
# Kernel assumed to be symmetric.
K[i][j] = metric(X[i], Y[j], **kwds)
if X is Y:
K[j][i] = K[i][j]
return K
else:
raise ValueError("Unknown kernel %r" % metric)
|
bsd-3-clause
|
cielling/jupyternbs
|
tests/tester_mmm.py
|
1
|
4838
|
from __future__ import print_function
import pandas as pd
from sys import path as syspath
syspath.insert(0, "..")
from CanslimParams import CanslimParams
import numpy as np
from datetime import date, timedelta, datetime
#from datetime import datetime.now as now
from myAssert import areEqual
from os import path as ospath
## "current date" = 2019 Q1
currentDate = date(2019,2,1)
ticker = "MMM"
## Data from SEC-filings
## 10-Q's
epsQ = np.array([1.54, 2.3, 2.64, 3.14, 1.01, 0.88, 2.39, 2.65, 2.21, 1.92, 2.2, 2.13, 2.1, 1.69, 2.09, 2.06, 1.88])
datesQ = np.array([date(2019, 3, 31), date(2018, 12, 31), date(2018, 9, 30), date(2018, 6, 30), date(2018, 3, 31), date(2017, 12, 31), date(2017, 9, 30), date(2017, 6, 30), date(2017, 3, 31), date(2016, 12, 31), date(2016, 9, 30), date(2016, 6, 30), date(2016, 3, 31), date(2015, 12, 31), date(2015, 9, 30), date(2015, 6, 30), date(2015, 3, 31)])
salesQ = np.array([7863000000, 7945000000, 8152000000, 8390000000, 8278000000, 8008000000, 8172000000, 7810000000, 7685000000, 7329000000, 7709000000, 7662000000, 7409000000, 7298000000, 7712000000, 7686000000, 7578000000])
seQ = np.array([9703000000, 10407000000, 10248000000, 10365000000, 10977000000, 11672000000, 12146000000, 11591000000, 10989000000, 11316000000, 12002000000, 11894000000, 11733000000, 12484000000, 12186000000, 13093000000, 13917000000])
niQ = np.array([891000000, 1361000000, 1543000000, 1857000000, 602000000, 534000000, 1429000000, 1583000000, 1323000000, 1163000000, 1329000000, 1291000000, 1275000000, 1041000000, 1296000000, 1303000000, 1201000000])
delta = datesQ - datesQ[0]
l = []
for d in delta:
l.append(d.days)
daysQ = np.array(l)
## 10-K's
epsY = np.array([9.09, 8.13, 8.35, 7.72])
datesY = np.array([date(2018, 12, 31), date(2017, 12, 31), date(2016, 12, 31), date(2015, 12, 31)])
salesY = np.array([32765000000, 31675000000, 30109000000, 30274000000])
seY = np.array([10407000000, 11672000000, 11316000000, 12484000000])
niY = np.array([5363000000, 4869000000, 5058000000, 4841000000])
delta = datesY - datesY[0]
l = []
for d in delta:
l.append(d.days)
daysY = np.array(l)
print("Testing ticker {:s}".format(ticker))
testDir = ospath.join("..", "TestData")
all10Ks = pd.read_csv(ospath.join(testDir, "{:s}_all_10ks.csv".format(ticker.lower())), parse_dates=['date'], dtype={'cik':str, 'conm':str, 'type':str,'path':str})
all10Qs = pd.read_csv(ospath.join(testDir, "{:s}_all_10qs.csv".format(ticker.lower())), parse_dates=['date'], dtype={'cik':str, 'conm':str, 'type':str,'path':str})
canslim= CanslimParams(ticker, all10Qs, all10Ks)
## Load the data, and proceed if successful.
oldestDate = datetime(2014, 1, 1)
if canslim.loadData(testDir, oldestDate):
## Test all the EPS stuff
for q in range(0, len(epsQ)):
print("Getting EPS for quarter {:d}".format(q))
value = canslim.getEpsQuarter(-q)
areEqual(epsQ[q], value)
for y in range(0, len(epsY)):
print("Getting EPS for year {:d}".format(y))
value = canslim.getEpsAnnual(-y)
areEqual(epsY[y], value)
print("Getting EPS growth for Q-1 to Q-2:")
expect = epsQ[1] / epsQ[2] * 100.0
val = canslim.getEpsGrowthQuarter(-1, -2)
areEqual(expect, val)
# print("Getting EPS growth for Y0 to Y-1:")
# expect = -0.37/-2.58
# val = canslim.getEpsGrowthAnnual(0, -1)
# areEqual(expect, val)
# print("Getting EPS growth rate for Q-2 to Q-1:")
# expect = 0.
# val = canslim.getEpsGrowthRateQuarter(-2, -1)
# areEqual(expect, val)
## Test the Sales stuff
for q in range(0, len(salesQ)):
print("Getting SALES for quarter {:d}".format(q))
value = canslim.getSalesQuarter(-q)
areEqual(salesQ[q], value)
for y in range(0, len(salesY)):
print("Getting SALES for year {:d}".format(y))
value = canslim.getSalesAnnual(-y)
areEqual(salesY[y], value)
# print("Getting sales growth between Q0 and Q-2:")
# expect = 0.
# val = canslim.getSalesGrowthQuarter(0, -2)
# areEqual(expect, val)
# print("Getting sales growth rate between Q0 and Q-2:")
# expect = 0.
# val = canslim.getSalesGrowthRateQuarter(-2, 0)
# areEqual(expect, val)
## Test the ROE
print("Getting current ROE (TTM):")
expect = np.sum(niQ[0:4]) / np.average(seQ[0:4]) * 100.0
val = canslim.getRoeTTM()
areEqual(expect, val)
print("Getting stability of EPS - last 12 Q:")
expect = 3.2385
val = canslim.getStabilityOfEpsGrowth(12)
areEqual(expect, val)
## Test the auxiliary functions
## Print all errors that were logged.
canslim.logErrors()
print("Errors written to Logs/{:s}_log.txt".format(ticker))
else:
print("Unable to load data for {:s}".format(ticker))
del canslim
|
agpl-3.0
|
robblack007/clase-dinamica-robot
|
Practicas/practica1/robots/estaciones.py
|
4
|
2501
|
def estacion_3gdl(puerto_zmq = "5555"):
'''
Esta función crea un socket de 0MQ para publicar datos de tres referencias.
>>> from robots.estaciones import estacion_3gdl
>>> estacion_3gdl("5555")
Iniciando estacion de referencias en el puerto 5555
'''
from zmq import Context, PUB
from msgpack import packb
from ipywidgets import interact
context = Context()
socket = context.socket(PUB)
socket.bind("tcp://*:" + puerto_zmq)
def mandar_mensaje(q1=0, q2=0, q3=0):
socket.send(packb([q1, q2, q3]))
print("Iniciando estacion de referencias en el puerto " + puerto_zmq)
interact(mandar_mensaje, q1=(-180.0, 180.0), q2=(-180.0, 180.0), q3=(-180.0, 180.0));
def estacion_1gdl(puerto_zmq = "5555"):
'''
Esta función crea un socket de 0MQ para publicar datos de tres referencias.
>>> from robots.estaciones import estacion_3gdl
>>> estacion_3gdl("5555")
Iniciando estacion de referencias en el puerto 5555
'''
from zmq import Context, PUB
from msgpack import packb
from ipywidgets import interact
context = Context()
socket = context.socket(PUB)
socket.bind("tcp://*:" + puerto_zmq)
def mandar_mensaje(q1=0):
socket.send(packb([q1]))
print("Iniciando estacion de referencias en el puerto " + puerto_zmq)
interact(mandar_mensaje, q1=(-180.0, 180.0));
def gen_sen_3gdl(puerto_zmq, gen1=True, gen2=True, gen3=True):
from zmq import Context, PUB
from msgpack import packb
from matplotlib.pyplot import figure
from time import time, sleep
from numpy import sin, pi
context = Context()
socket = context.socket(PUB)
socket.bind("tcp://*:" + puerto_zmq)
def mandar_mensaje(señal, g1, g2, g3):
socket.send(packb([señal if gen else 0 for gen in [g1, g2, g3]]))
fig = figure(figsize=(6,3))
ax = fig.gca()
t0 = time()
ts = []
ys = []
while True:
try:
t = time()-t0
if t >= 0.005:
y = 30*sin(pi*t)
mandar_mensaje(y, gen1, gen2, gen3)
ts.append(t)
ys.append(y)
ax.clear()
if len(ys) > 100:
ax.plot(ts[-100:], ys[-100:])
else:
ax.plot(ts, ys)
fig.canvas.draw()
except KeyboardInterrupt:
break
|
mit
|
nachandr/cfme_tests
|
cfme/utils/smem_memory_monitor.py
|
2
|
67126
|
"""Monitor Memory on a CFME/Miq appliance and builds report&graphs displaying usage per process."""
import json
import os
import time
import traceback
from collections import OrderedDict
from datetime import datetime
from threading import Thread
import yaml
from yaycl import AttrDict
from cfme.utils.conf import cfme_performance
from cfme.utils.log import logger
from cfme.utils.path import results_path
from cfme.utils.version import current_version
from cfme.utils.version import get_version
miq_workers = [
'MiqGenericWorker',
'MiqPriorityWorker',
'MiqScheduleWorker',
'MiqUiWorker',
'MiqWebServiceWorker',
'MiqWebsocketWorker',
'MiqReportingWorker',
'MiqReplicationWorker',
'MiqSmartProxyWorker',
'MiqVimBrokerWorker',
'MiqEmsRefreshCoreWorker',
# Refresh Workers:
'ManageIQ::Providers::Microsoft::InfraManager::RefreshWorker',
'ManageIQ::Providers::Openstack::InfraManager::RefreshWorker',
'ManageIQ::Providers::Redhat::InfraManager::RefreshWorker',
'ManageIQ::Providers::Vmware::InfraManager::RefreshWorker',
'MiqEmsRefreshWorkerMicrosoft', # 5.4
'MiqEmsRefreshWorkerRedhat', # 5.4
'MiqEmsRefreshWorkerVmware', # 5.4
'ManageIQ::Providers::Amazon::CloudManager::RefreshWorker',
'ManageIQ::Providers::Azure::CloudManager::RefreshWorker',
'ManageIQ::Providers::Google::CloudManager::RefreshWorker',
'ManageIQ::Providers::Openstack::CloudManager::RefreshWorker',
'MiqEmsRefreshWorkerAmazon', # 5.4
'MiqEmsRefreshWorkerOpenstack', # 5.4
'ManageIQ::Providers::AnsibleTower::ConfigurationManager::RefreshWorker',
'ManageIQ::Providers::Foreman::ConfigurationManager::RefreshWorker',
'ManageIQ::Providers::Foreman::ProvisioningManager::RefreshWorker',
'MiqEmsRefreshWorkerForemanConfiguration', # 5.4
'MiqEmsRefreshWorkerForemanProvisioning', # 5.4
'ManageIQ::Providers::Atomic::ContainerManager::RefreshWorker',
'ManageIQ::Providers::AtomicEnterprise::ContainerManager::RefreshWorker',
'ManageIQ::Providers::Kubernetes::ContainerManager::RefreshWorker',
'ManageIQ::Providers::Openshift::ContainerManager::RefreshWorker',
'ManageIQ::Providers::OpenshiftEnterprise::ContainerManager::RefreshWorker',
'ManageIQ::Providers::StorageManager::CinderManager::RefreshWorker',
'ManageIQ::Providers::StorageManager::SwiftManager::RefreshWorker',
'ManageIQ::Providers::Amazon::NetworkManager::RefreshWorker',
'ManageIQ::Providers::Azure::NetworkManager::RefreshWorker',
'ManageIQ::Providers::Google::NetworkManager::RefreshWorker',
'ManageIQ::Providers::Openstack::NetworkManager::RefreshWorker',
'MiqNetappRefreshWorker',
'MiqSmisRefreshWorker',
# Event Workers:
'MiqEventHandler',
'ManageIQ::Providers::Openstack::InfraManager::EventCatcher',
'ManageIQ::Providers::StorageManager::CinderManager::EventCatcher',
'ManageIQ::Providers::Redhat::InfraManager::EventCatcher',
'ManageIQ::Providers::Vmware::InfraManager::EventCatcher',
'MiqEventCatcherRedhat', # 5.4
'MiqEventCatcherVmware', # 5.4
'ManageIQ::Providers::Amazon::CloudManager::EventCatcher',
'ManageIQ::Providers::Azure::CloudManager::EventCatcher',
'ManageIQ::Providers::Google::CloudManager::EventCatcher',
'ManageIQ::Providers::Openstack::CloudManager::EventCatcher',
'MiqEventCatcherAmazon', # 5.4
'MiqEventCatcherOpenstack', # 5.4
'ManageIQ::Providers::Atomic::ContainerManager::EventCatcher',
'ManageIQ::Providers::AtomicEnterprise::ContainerManager::EventCatcher',
'ManageIQ::Providers::Kubernetes::ContainerManager::EventCatcher',
'ManageIQ::Providers::Openshift::ContainerManager::EventCatcher',
'ManageIQ::Providers::OpenshiftEnterprise::ContainerManager::EventCatcher',
'ManageIQ::Providers::Openstack::NetworkManager::EventCatcher',
# Metrics Processor/Collector Workers
'MiqEmsMetricsProcessorWorker',
'ManageIQ::Providers::Openstack::InfraManager::MetricsCollectorWorker',
'ManageIQ::Providers::Redhat::InfraManager::MetricsCollectorWorker',
'ManageIQ::Providers::Vmware::InfraManager::MetricsCollectorWorker',
'MiqEmsMetricsCollectorWorkerRedhat', # 5.4
'MiqEmsMetricsCollectorWorkerVmware', # 5.4
'ManageIQ::Providers::Amazon::CloudManager::MetricsCollectorWorker',
'ManageIQ::Providers::Azure::CloudManager::MetricsCollectorWorker',
'ManageIQ::Providers::Openstack::CloudManager::MetricsCollectorWorker',
'MiqEmsMetricsCollectorWorkerAmazon', # 5.4
'MiqEmsMetricsCollectorWorkerOpenstack', # 5.4
'ManageIQ::Providers::Atomic::ContainerManager::MetricsCollectorWorker',
'ManageIQ::Providers::AtomicEnterprise::ContainerManager::MetricsCollectorWorker',
'ManageIQ::Providers::Kubernetes::ContainerManager::MetricsCollectorWorker',
'ManageIQ::Providers::Openshift::ContainerManager::MetricsCollectorWorker',
'ManageIQ::Providers::OpenshiftEnterprise::ContainerManager::MetricsCollectorWorker',
'ManageIQ::Providers::Openstack::NetworkManager::MetricsCollectorWorker',
'MiqStorageMetricsCollectorWorker',
'MiqVmdbStorageBridgeWorker']
ruby_processes = list(miq_workers)
ruby_processes.extend(['evm:dbsync:replicate', 'MIQ Server (evm_server.rb)', 'evm_watchdog.rb',
'appliance_console.rb'])
process_order = list(ruby_processes)
process_order.extend(['memcached', 'postgres', 'httpd', 'collectd'])
# Timestamp created at first import, thus grouping all reports of like workload
test_ts = time.strftime('%Y%m%d%H%M%S')
# 10s sample interval (occasionally sampling can take almost 4s on an appliance doing a lot of work)
SAMPLE_INTERVAL = 10
class SmemMemoryMonitor(Thread):
def __init__(self, ssh_client, scenario_data):
super().__init__()
self.ssh_client = ssh_client
self.scenario_data = scenario_data
self.grafana_urls = {}
self.miq_server_id = ''
self.use_slab = False
self.signal = True
def create_process_result(self, process_results, starttime, process_pid, process_name,
memory_by_pid):
if process_pid in list(memory_by_pid.keys()):
if process_name not in process_results:
process_results[process_name] = OrderedDict()
process_results[process_name][process_pid] = OrderedDict()
if process_pid not in process_results[process_name]:
process_results[process_name][process_pid] = OrderedDict()
process_results[process_name][process_pid][starttime] = {}
rss_mem = memory_by_pid[process_pid]['rss']
pss_mem = memory_by_pid[process_pid]['pss']
uss_mem = memory_by_pid[process_pid]['uss']
vss_mem = memory_by_pid[process_pid]['vss']
swap_mem = memory_by_pid[process_pid]['swap']
process_results[process_name][process_pid][starttime]['rss'] = rss_mem
process_results[process_name][process_pid][starttime]['pss'] = pss_mem
process_results[process_name][process_pid][starttime]['uss'] = uss_mem
process_results[process_name][process_pid][starttime]['vss'] = vss_mem
process_results[process_name][process_pid][starttime]['swap'] = swap_mem
del memory_by_pid[process_pid]
else:
logger.warning(f'Process {process_name} PID, not found: {process_pid}')
def get_appliance_memory(self, appliance_results, plottime):
# 5.5/5.6 - RHEL 7 / Centos 7
# Application Memory Used : MemTotal - (MemFree + Slab + Cached)
# 5.4 - RHEL 6 / Centos 6
# Application Memory Used : MemTotal - (MemFree + Buffers + Cached)
# Available memory could potentially be better metric
appliance_results[plottime] = {}
result = self.ssh_client.run_command('cat /proc/meminfo')
if result.failed:
logger.error('Exit_status nonzero in get_appliance_memory: {}, {}'
.format(result.rc, result.output))
del appliance_results[plottime]
else:
meminfo_raw = result.output.replace('kB', '').strip()
meminfo = OrderedDict((k.strip(), v.strip()) for k, v in
(value.strip().split(':') for value in meminfo_raw.split('\n')))
appliance_results[plottime]['total'] = float(meminfo['MemTotal']) / 1024
appliance_results[plottime]['free'] = float(meminfo['MemFree']) / 1024
if 'MemAvailable' in meminfo: # 5.5, RHEL 7/Centos 7
self.use_slab = True
mem_used = (float(meminfo['MemTotal']) - (float(meminfo['MemFree']) + float(
meminfo['Slab']) + float(meminfo['Cached']))) / 1024
else: # 5.4, RHEL 6/Centos 6
mem_used = (float(meminfo['MemTotal']) - (float(meminfo['MemFree']) + float(
meminfo['Buffers']) + float(meminfo['Cached']))) / 1024
appliance_results[plottime]['used'] = mem_used
appliance_results[plottime]['buffers'] = float(meminfo['Buffers']) / 1024
appliance_results[plottime]['cached'] = float(meminfo['Cached']) / 1024
appliance_results[plottime]['slab'] = float(meminfo['Slab']) / 1024
appliance_results[plottime]['swap_total'] = float(meminfo['SwapTotal']) / 1024
appliance_results[plottime]['swap_free'] = float(meminfo['SwapFree']) / 1024
def get_evm_workers(self):
result = self.ssh_client.run_command(
'psql -t -q -d vmdb_production -c '
'\"select pid,type from miq_workers where miq_server_id = \'{}\'\"'.format(
self.miq_server_id))
if result.output.strip():
workers = {}
for worker in result.output.strip().split('\n'):
pid_worker = worker.strip().split('|')
if len(pid_worker) == 2:
workers[pid_worker[0].strip()] = pid_worker[1].strip()
else:
logger.error(f'Unexpected output from psql: {worker}')
return workers
else:
return {}
# Old method of obtaining per process memory (Appliances without smem)
# def get_pids_memory(self):
# result = self.ssh_client.run_command(
# 'ps -A -o pid,rss,vsz,comm,cmd | sed 1d')
# pids_memory = result.output.strip().split('\n')
# memory_by_pid = {}
# for line in pids_memory:
# values = [s for s in line.strip().split(' ') if s]
# pid = values[0]
# memory_by_pid[pid] = {}
# memory_by_pid[pid]['rss'] = float(values[1]) / 1024
# memory_by_pid[pid]['vss'] = float(values[2]) / 1024
# memory_by_pid[pid]['name'] = values[3]
# memory_by_pid[pid]['cmd'] = ' '.join(values[4:])
# return memory_by_pid
def get_miq_server_id(self):
# Obtain the Miq Server GUID:
result = self.ssh_client.run_command('cat /var/www/miq/vmdb/GUID')
logger.info(f'Obtained appliance GUID: {result.output.strip()}')
# Get server id:
result = self.ssh_client.run_command(
'psql -t -q -d vmdb_production -c "select id from miq_servers where guid = \'{}\'"'
''.format(result.output.strip()))
logger.info(f'Obtained miq_server_id: {result.output.strip()}')
self.miq_server_id = result.output.strip()
def get_pids_memory(self):
result = self.ssh_client.run_command(
"/usr/bin/python2.7 /usr/bin/smem -c 'pid rss pss uss vss swap name command' | sed 1d")
pids_memory = result.output.strip().split('\n')
memory_by_pid = {}
for line in pids_memory:
if line.strip():
try:
values = [s for s in line.strip().split(' ') if s]
pid = values[0]
int(pid)
memory_by_pid[pid] = {}
memory_by_pid[pid]['rss'] = float(values[1]) / 1024
memory_by_pid[pid]['pss'] = float(values[2]) / 1024
memory_by_pid[pid]['uss'] = float(values[3]) / 1024
memory_by_pid[pid]['vss'] = float(values[4]) / 1024
memory_by_pid[pid]['swap'] = float(values[5]) / 1024
memory_by_pid[pid]['name'] = values[6]
memory_by_pid[pid]['cmd'] = ' '.join(values[7:])
except Exception as e:
logger.error(f'Processing smem output error: {e.__class__.__name__}')
logger.error(f'Issue with pid: {pid} line: {line}')
logger.error(f'Complete smem output: {result.output}')
return memory_by_pid
def _real_run(self):
""" Result dictionaries:
appliance_results[timestamp][measurement] = value
appliance_results[timestamp]['total'] = value
appliance_results[timestamp]['free'] = value
appliance_results[timestamp]['used'] = value
appliance_results[timestamp]['buffers'] = value
appliance_results[timestamp]['cached'] = value
appliance_results[timestamp]['slab'] = value
appliance_results[timestamp]['swap_total'] = value
appliance_results[timestamp]['swap_free'] = value
appliance measurements: total/free/used/buffers/cached/slab/swap_total/swap_free
process_results[name][pid][timestamp][measurement] = value
process_results[name][pid][timestamp]['rss'] = value
process_results[name][pid][timestamp]['pss'] = value
process_results[name][pid][timestamp]['uss'] = value
process_results[name][pid][timestamp]['vss'] = value
process_results[name][pid][timestamp]['swap'] = value
"""
appliance_results = OrderedDict()
process_results = OrderedDict()
install_smem(self.ssh_client)
self.get_miq_server_id()
logger.info('Starting Monitoring Thread.')
while self.signal:
starttime = time.time()
plottime = datetime.now()
self.get_appliance_memory(appliance_results, plottime)
workers = self.get_evm_workers()
memory_by_pid = self.get_pids_memory()
for worker_pid in workers:
self.create_process_result(process_results, plottime, worker_pid,
workers[worker_pid], memory_by_pid)
for pid in sorted(memory_by_pid.keys()):
if memory_by_pid[pid]['name'] == 'httpd':
self.create_process_result(process_results, plottime, pid, 'httpd',
memory_by_pid)
elif memory_by_pid[pid]['name'] == 'postgres':
self.create_process_result(process_results, plottime, pid, 'postgres',
memory_by_pid)
elif memory_by_pid[pid]['name'] == 'postmaster':
self.create_process_result(process_results, plottime, pid, 'postgres',
memory_by_pid)
elif memory_by_pid[pid]['name'] == 'memcached':
self.create_process_result(process_results, plottime, pid, 'memcached',
memory_by_pid)
elif memory_by_pid[pid]['name'] == 'collectd':
self.create_process_result(process_results, plottime, pid, 'collectd',
memory_by_pid)
elif memory_by_pid[pid]['name'] == 'ruby':
if 'evm_server.rb' in memory_by_pid[pid]['cmd']:
self.create_process_result(process_results, plottime, pid,
'MIQ Server (evm_server.rb)', memory_by_pid)
elif 'MIQ Server' in memory_by_pid[pid]['cmd']:
self.create_process_result(process_results, plottime, pid,
'MIQ Server (evm_server.rb)', memory_by_pid)
elif 'evm_watchdog.rb' in memory_by_pid[pid]['cmd']:
self.create_process_result(process_results, plottime, pid,
'evm_watchdog.rb', memory_by_pid)
elif 'appliance_console.rb' in memory_by_pid[pid]['cmd']:
self.create_process_result(process_results, plottime, pid,
'appliance_console.rb', memory_by_pid)
elif 'evm:dbsync:replicate' in memory_by_pid[pid]['cmd']:
self.create_process_result(process_results, plottime, pid,
'evm:dbsync:replicate', memory_by_pid)
else:
logger.debug(f'Unaccounted for ruby pid: {pid}')
timediff = time.time() - starttime
logger.debug('Monitoring sampled in {}s'.format(round(timediff, 4)))
# Sleep Monitoring interval
# Roughly 10s samples, accounts for collection of memory measurements
time_to_sleep = abs(SAMPLE_INTERVAL - timediff)
time.sleep(time_to_sleep)
logger.info('Monitoring CFME Memory Terminating')
create_report(self.scenario_data, appliance_results, process_results, self.use_slab,
self.grafana_urls)
def run(self):
try:
self._real_run()
except Exception as e:
logger.error(f'Error in Monitoring Thread: {e}')
logger.error(traceback.format_exc())
def install_smem(ssh_client):
# smem is included by default in 5.6 appliances
logger.info('Installing smem.')
ver = get_version()
if ver == '55':
ssh_client.run_command('rpm -i {}'.format(cfme_performance['tools']['rpms']['epel7_rpm']))
ssh_client.run_command('yum install -y smem')
# Patch smem to display longer command line names
logger.info('Patching smem')
ssh_client.run_command(r'sed -i s/\.27s/\.200s/g /usr/bin/smem')
def create_report(scenario_data, appliance_results, process_results, use_slab, grafana_urls):
logger.info('Creating Memory Monitoring Report.')
ver = current_version()
provider_names = 'No Providers'
if 'providers' in scenario_data['scenario']:
provider_names = ', '.join(scenario_data['scenario']['providers'])
workload_path = results_path.join('{}-{}-{}'.format(test_ts, scenario_data['test_dir'], ver))
if not os.path.exists(str(workload_path)):
os.makedirs(str(workload_path))
scenario_path = workload_path.join(scenario_data['scenario']['name'])
if os.path.exists(str(scenario_path)):
logger.warning(f'Duplicate Workload-Scenario Name: {scenario_path}')
scenario_path = workload_path.join('{}-{}'.format(time.strftime('%Y%m%d%H%M%S'),
scenario_data['scenario']['name']))
logger.warning(f'Using: {scenario_path}')
os.mkdir(str(scenario_path))
mem_graphs_path = scenario_path.join('graphs')
if not os.path.exists(str(mem_graphs_path)):
os.mkdir(str(mem_graphs_path))
mem_rawdata_path = scenario_path.join('rawdata')
if not os.path.exists(str(mem_rawdata_path)):
os.mkdir(str(mem_rawdata_path))
graph_appliance_measurements(mem_graphs_path, ver, appliance_results, use_slab, provider_names)
graph_individual_process_measurements(mem_graphs_path, process_results, provider_names)
graph_same_miq_workers(mem_graphs_path, process_results, provider_names)
graph_all_miq_workers(mem_graphs_path, process_results, provider_names)
# Dump scenario Yaml:
with open(str(scenario_path.join('scenario.yml')), 'w') as scenario_file:
yaml.safe_dump(dict(scenario_data['scenario']), scenario_file, default_flow_style=False)
generate_summary_csv(scenario_path.join(f'{ver}-summary.csv'), appliance_results,
process_results, provider_names, ver)
generate_raw_data_csv(mem_rawdata_path, appliance_results, process_results)
generate_summary_html(scenario_path, ver, appliance_results, process_results, scenario_data,
provider_names, grafana_urls)
generate_workload_html(scenario_path, ver, scenario_data, provider_names, grafana_urls)
logger.info('Finished Creating Report')
def compile_per_process_results(procs_to_compile, process_results, ts_end):
alive_pids = 0
recycled_pids = 0
total_running_rss = 0
total_running_pss = 0
total_running_uss = 0
total_running_vss = 0
total_running_swap = 0
for process in procs_to_compile:
if process in process_results:
for pid in process_results[process]:
if ts_end in process_results[process][pid]:
alive_pids += 1
total_running_rss += process_results[process][pid][ts_end]['rss']
total_running_pss += process_results[process][pid][ts_end]['pss']
total_running_uss += process_results[process][pid][ts_end]['uss']
total_running_vss += process_results[process][pid][ts_end]['vss']
total_running_swap += process_results[process][pid][ts_end]['swap']
else:
recycled_pids += 1
return alive_pids, recycled_pids, total_running_rss, total_running_pss, total_running_uss, \
total_running_vss, total_running_swap
def generate_raw_data_csv(directory, appliance_results, process_results):
starttime = time.time()
file_name = str(directory.join('appliance.csv'))
with open(file_name, 'w') as csv_file:
csv_file.write('TimeStamp,Total,Free,Used,Buffers,Cached,Slab,Swap_Total,Swap_Free\n')
for ts in appliance_results:
csv_file.write('{},{},{},{},{},{},{},{},{}\n'.format(ts,
appliance_results[ts]['total'], appliance_results[ts]['free'],
appliance_results[ts]['used'], appliance_results[ts]['buffers'],
appliance_results[ts]['cached'], appliance_results[ts]['slab'],
appliance_results[ts]['swap_total'], appliance_results[ts]['swap_free']))
for process_name in process_results:
for process_pid in process_results[process_name]:
file_name = str(directory.join(f'{process_pid}-{process_name}.csv'))
with open(file_name, 'w') as csv_file:
csv_file.write('TimeStamp,RSS,PSS,USS,VSS,SWAP\n')
for ts in process_results[process_name][process_pid]:
csv_file.write('{},{},{},{},{},{}\n'.format(ts,
process_results[process_name][process_pid][ts]['rss'],
process_results[process_name][process_pid][ts]['pss'],
process_results[process_name][process_pid][ts]['uss'],
process_results[process_name][process_pid][ts]['vss'],
process_results[process_name][process_pid][ts]['swap']))
timediff = time.time() - starttime
logger.info(f'Generated Raw Data CSVs in: {timediff}')
def generate_summary_csv(file_name, appliance_results, process_results, provider_names,
version_string):
starttime = time.time()
with open(str(file_name), 'w') as csv_file:
csv_file.write(f'Version: {version_string}, Provider(s): {provider_names}\n')
csv_file.write('Measurement,Start of test,End of test\n')
start = list(appliance_results.keys())[0]
end = list(appliance_results.keys())[-1]
csv_file.write('Appliance Total Memory,{},{}\n'.format(
round(appliance_results[start]['total'], 2), round(appliance_results[end]['total'], 2)))
csv_file.write('Appliance Free Memory,{},{}\n'.format(
round(appliance_results[start]['free'], 2), round(appliance_results[end]['free'], 2)))
csv_file.write('Appliance Used Memory,{},{}\n'.format(
round(appliance_results[start]['used'], 2), round(appliance_results[end]['used'], 2)))
csv_file.write('Appliance Buffers,{},{}\n'.format(
round(appliance_results[start]['buffers'], 2),
round(appliance_results[end]['buffers'], 2)))
csv_file.write('Appliance Cached,{},{}\n'.format(
round(appliance_results[start]['cached'], 2),
round(appliance_results[end]['cached'], 2)))
csv_file.write('Appliance Slab,{},{}\n'.format(
round(appliance_results[start]['slab'], 2),
round(appliance_results[end]['slab'], 2)))
csv_file.write('Appliance Total Swap,{},{}\n'.format(
round(appliance_results[start]['swap_total'], 2),
round(appliance_results[end]['swap_total'], 2)))
csv_file.write('Appliance Free Swap,{},{}\n'.format(
round(appliance_results[start]['swap_free'], 2),
round(appliance_results[end]['swap_free'], 2)))
summary_csv_measurement_dump(csv_file, process_results, 'rss')
summary_csv_measurement_dump(csv_file, process_results, 'pss')
summary_csv_measurement_dump(csv_file, process_results, 'uss')
summary_csv_measurement_dump(csv_file, process_results, 'vss')
summary_csv_measurement_dump(csv_file, process_results, 'swap')
timediff = time.time() - starttime
logger.info(f'Generated Summary CSV in: {timediff}')
def generate_summary_html(directory, version_string, appliance_results, process_results,
scenario_data, provider_names, grafana_urls):
starttime = time.time()
file_name = str(directory.join('index.html'))
with open(file_name, 'w') as html_file:
html_file.write('<html>\n')
html_file.write('<head><title>{} - {} Memory Usage Performance</title></head>'.format(
version_string, provider_names))
html_file.write('<body>\n')
html_file.write('<b>CFME {} {} Test Results</b><br>\n'.format(version_string,
scenario_data['test_name'].title()))
html_file.write('<b>Appliance Roles:</b> {}<br>\n'.format(
scenario_data['appliance_roles'].replace(',', ', ')))
html_file.write(f'<b>Provider(s):</b> {provider_names}<br>\n')
html_file.write('<b><a href=\'https://{}/\' target="_blank">{}</a></b>\n'.format(
scenario_data['appliance_ip'], scenario_data['appliance_name']))
if grafana_urls:
for g_name in sorted(grafana_urls.keys()):
html_file.write(
' : <b><a href=\'{}\' target="_blank">{}</a></b>'.format(grafana_urls[g_name],
g_name))
html_file.write('<br>\n')
html_file.write(f'<b><a href=\'{version_string}-summary.csv\'>Summary CSV</a></b>')
html_file.write(' : <b><a href=\'workload.html\'>Workload Info</a></b>')
html_file.write(' : <b><a href=\'graphs/\'>Graphs directory</a></b>\n')
html_file.write(' : <b><a href=\'rawdata/\'>CSVs directory</a></b><br>\n')
start = list(appliance_results.keys())[0]
end = list(appliance_results.keys())[-1]
timediff = end - start
total_proc_count = 0
for proc_name in process_results:
total_proc_count += len(list(process_results[proc_name].keys()))
growth = appliance_results[end]['used'] - appliance_results[start]['used']
max_used_memory = 0
for ts in appliance_results:
if appliance_results[ts]['used'] > max_used_memory:
max_used_memory = appliance_results[ts]['used']
html_file.write('<table border="1">\n')
html_file.write('<tr><td>\n')
# Appliance Wide Results
html_file.write('<table style="width:100%" border="1">\n')
html_file.write('<tr>\n')
html_file.write('<td><b>Version</b></td>\n')
html_file.write('<td><b>Start Time</b></td>\n')
html_file.write('<td><b>End Time</b></td>\n')
html_file.write('<td><b>Total Test Time</b></td>\n')
html_file.write('<td><b>Total Memory</b></td>\n')
html_file.write('<td><b>Start Used Memory</b></td>\n')
html_file.write('<td><b>End Used Memory</b></td>\n')
html_file.write('<td><b>Used Memory Growth</b></td>\n')
html_file.write('<td><b>Max Used Memory</b></td>\n')
html_file.write('<td><b>Total Tracked Processes</b></td>\n')
html_file.write('</tr>\n')
html_file.write('<td><a href=\'rawdata/appliance.csv\'>{}</a></td>\n'.format(
version_string))
html_file.write('<td>{}</td>\n'.format(start.replace(microsecond=0)))
html_file.write('<td>{}</td>\n'.format(end.replace(microsecond=0)))
html_file.write('<td>{}</td>\n'.format(str(timediff).partition('.')[0]))
html_file.write('<td>{}</td>\n'.format(round(appliance_results[end]['total'], 2)))
html_file.write('<td>{}</td>\n'.format(round(appliance_results[start]['used'], 2)))
html_file.write('<td>{}</td>\n'.format(round(appliance_results[end]['used'], 2)))
html_file.write('<td>{}</td>\n'.format(round(growth, 2)))
html_file.write('<td>{}</td>\n'.format(round(max_used_memory, 2)))
html_file.write(f'<td>{total_proc_count}</td>\n')
html_file.write('</table>\n')
# CFME/Miq Worker Results
html_file.write('<table style="width:100%" border="1">\n')
html_file.write('<tr>\n')
html_file.write('<td><b>Total CFME/Miq Workers</b></td>\n')
html_file.write('<td><b>End Running Workers</b></td>\n')
html_file.write('<td><b>Recycled Workers</b></td>\n')
html_file.write('<td><b>End Total Worker RSS</b></td>\n')
html_file.write('<td><b>End Total Worker PSS</b></td>\n')
html_file.write('<td><b>End Total Worker USS</b></td>\n')
html_file.write('<td><b>End Total Worker VSS</b></td>\n')
html_file.write('<td><b>End Total Worker SWAP</b></td>\n')
html_file.write('</tr>\n')
a_pids, r_pids, t_rss, t_pss, t_uss, t_vss, t_swap = compile_per_process_results(
miq_workers, process_results, end)
html_file.write('<tr>\n')
html_file.write('<td>{}</td>\n'.format(a_pids + r_pids))
html_file.write(f'<td>{a_pids}</td>\n')
html_file.write(f'<td>{r_pids}</td>\n')
html_file.write('<td>{}</td>\n'.format(round(t_rss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_pss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_uss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_vss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_swap, 2)))
html_file.write('</tr>\n')
html_file.write('</table>\n')
# Per Process Summaries:
html_file.write('<table style="width:100%" border="1">\n')
html_file.write('<tr>\n')
html_file.write('<td><b>Application/Process Group</b></td>\n')
html_file.write('<td><b>Total Processes</b></td>\n')
html_file.write('<td><b>End Running Processes</b></td>\n')
html_file.write('<td><b>Recycled Processes</b></td>\n')
html_file.write('<td><b>End Total Process RSS</b></td>\n')
html_file.write('<td><b>End Total Process PSS</b></td>\n')
html_file.write('<td><b>End Total Process USS</b></td>\n')
html_file.write('<td><b>End Total Process VSS</b></td>\n')
html_file.write('<td><b>End Total Process SWAP</b></td>\n')
html_file.write('</tr>\n')
a_pids, r_pids, t_rss, t_pss, t_uss, t_vss, t_swap = compile_per_process_results(
ruby_processes, process_results, end)
t_a_pids = a_pids
t_r_pids = r_pids
tt_rss = t_rss
tt_pss = t_pss
tt_uss = t_uss
tt_vss = t_vss
tt_swap = t_swap
html_file.write('<tr>\n')
html_file.write('<td>ruby</td>\n')
html_file.write('<td>{}</td>\n'.format(a_pids + r_pids))
html_file.write(f'<td>{a_pids}</td>\n')
html_file.write(f'<td>{r_pids}</td>\n')
html_file.write('<td>{}</td>\n'.format(round(t_rss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_pss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_uss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_vss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_swap, 2)))
html_file.write('</tr>\n')
# memcached Summary
a_pids, r_pids, t_rss, t_pss, t_uss, t_vss, t_swap = compile_per_process_results(
['memcached'], process_results, end)
t_a_pids += a_pids
t_r_pids += r_pids
tt_rss += t_rss
tt_pss += t_pss
tt_uss += t_uss
tt_vss += t_vss
tt_swap += t_swap
html_file.write('<tr>\n')
html_file.write('<td>memcached</td>\n')
html_file.write('<td>{}</td>\n'.format(a_pids + r_pids))
html_file.write(f'<td>{a_pids}</td>\n')
html_file.write(f'<td>{r_pids}</td>\n')
html_file.write('<td>{}</td>\n'.format(round(t_rss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_pss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_uss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_vss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_swap, 2)))
html_file.write('</tr>\n')
# Postgres Summary
a_pids, r_pids, t_rss, t_pss, t_uss, t_vss, t_swap = compile_per_process_results(
['postgres'], process_results, end)
t_a_pids += a_pids
t_r_pids += r_pids
tt_rss += t_rss
tt_pss += t_pss
tt_uss += t_uss
tt_vss += t_vss
tt_swap += t_swap
html_file.write('<tr>\n')
html_file.write('<td>postgres</td>\n')
html_file.write('<td>{}</td>\n'.format(a_pids + r_pids))
html_file.write(f'<td>{a_pids}</td>\n')
html_file.write(f'<td>{r_pids}</td>\n')
html_file.write('<td>{}</td>\n'.format(round(t_rss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_pss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_uss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_vss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_swap, 2)))
html_file.write('</tr>\n')
# httpd Summary
a_pids, r_pids, t_rss, t_pss, t_uss, t_vss, t_swap = compile_per_process_results(['httpd'],
process_results, end)
t_a_pids += a_pids
t_r_pids += r_pids
tt_rss += t_rss
tt_pss += t_pss
tt_uss += t_uss
tt_vss += t_vss
tt_swap += t_swap
html_file.write('<tr>\n')
html_file.write('<td>httpd</td>\n')
html_file.write('<td>{}</td>\n'.format(a_pids + r_pids))
html_file.write(f'<td>{a_pids}</td>\n')
html_file.write(f'<td>{r_pids}</td>\n')
html_file.write('<td>{}</td>\n'.format(round(t_rss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_pss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_uss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_vss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_swap, 2)))
html_file.write('</tr>\n')
# collectd Summary
a_pids, r_pids, t_rss, t_pss, t_uss, t_vss, t_swap = compile_per_process_results(
['collectd'], process_results, end)
t_a_pids += a_pids
t_r_pids += r_pids
tt_rss += t_rss
tt_pss += t_pss
tt_uss += t_uss
tt_vss += t_vss
tt_swap += t_swap
html_file.write('<tr>\n')
html_file.write('<td>collectd</td>\n')
html_file.write('<td>{}</td>\n'.format(a_pids + r_pids))
html_file.write(f'<td>{a_pids}</td>\n')
html_file.write(f'<td>{r_pids}</td>\n')
html_file.write('<td>{}</td>\n'.format(round(t_rss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_pss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_uss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_vss, 2)))
html_file.write('<td>{}</td>\n'.format(round(t_swap, 2)))
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td>total</td>\n')
html_file.write('<td>{}</td>\n'.format(t_a_pids + t_r_pids))
html_file.write(f'<td>{t_a_pids}</td>\n')
html_file.write(f'<td>{t_r_pids}</td>\n')
html_file.write('<td>{}</td>\n'.format(round(tt_rss, 2)))
html_file.write('<td>{}</td>\n'.format(round(tt_pss, 2)))
html_file.write('<td>{}</td>\n'.format(round(tt_uss, 2)))
html_file.write('<td>{}</td>\n'.format(round(tt_vss, 2)))
html_file.write('<td>{}</td>\n'.format(round(tt_swap, 2)))
html_file.write('</tr>\n')
html_file.write('</table>\n')
# Appliance Graph
html_file.write('</td></tr><tr><td>\n')
file_name = f'{version_string}-appliance_memory.png'
html_file.write(f'<img src=\'graphs/{file_name}\'>\n')
file_name = f'{version_string}-appliance_swap.png'
# Check for swap usage through out time frame:
max_swap_used = 0
for ts in appliance_results:
swap_used = appliance_results[ts]['swap_total'] - appliance_results[ts]['swap_free']
if swap_used > max_swap_used:
max_swap_used = swap_used
if max_swap_used < 10: # Less than 10MiB Max, then hide graph
html_file.write(f'<br><a href=\'graphs/{file_name}\'>Swap Graph ')
html_file.write('(Hidden, max_swap_used < 10 MiB)</a>\n')
else:
html_file.write(f'<img src=\'graphs/{file_name}\'>\n')
html_file.write('</td></tr><tr><td>\n')
# Per Process Results
html_file.write('<table style="width:100%" border="1"><tr>\n')
html_file.write('<td><b>Process Name</b></td>\n')
html_file.write('<td><b>Process Pid</b></td>\n')
html_file.write('<td><b>Start Time</b></td>\n')
html_file.write('<td><b>End Time</b></td>\n')
html_file.write('<td><b>Time Alive</b></td>\n')
html_file.write('<td><b>RSS Mem Start</b></td>\n')
html_file.write('<td><b>RSS Mem End</b></td>\n')
html_file.write('<td><b>RSS Mem Change</b></td>\n')
html_file.write('<td><b>PSS Mem Start</b></td>\n')
html_file.write('<td><b>PSS Mem End</b></td>\n')
html_file.write('<td><b>PSS Mem Change</b></td>\n')
html_file.write('<td><b>CSV</b></td>\n')
html_file.write('</tr>\n')
# By Worker Type Memory Used
for ordered_name in process_order:
if ordered_name in process_results:
for pid in process_results[ordered_name]:
start = list(process_results[ordered_name][pid].keys())[0]
end = list(process_results[ordered_name][pid].keys())[-1]
timediff = end - start
html_file.write('<tr>\n')
if len(process_results[ordered_name]) > 1:
html_file.write('<td><a href=\'#{}\'>{}</a></td>\n'.format(ordered_name,
ordered_name))
html_file.write('<td><a href=\'graphs/{}-{}.png\'>{}</a></td>\n'.format(
ordered_name, pid, pid))
else:
html_file.write(f'<td>{ordered_name}</td>\n')
html_file.write('<td><a href=\'#{}-{}.png\'>{}</a></td>\n'.format(
ordered_name, pid, pid))
html_file.write('<td>{}</td>\n'.format(start.replace(microsecond=0)))
html_file.write('<td>{}</td>\n'.format(end.replace(microsecond=0)))
html_file.write('<td>{}</td>\n'.format(str(timediff).partition('.')[0]))
rss_change = process_results[ordered_name][pid][end]['rss'] - \
process_results[ordered_name][pid][start]['rss']
html_file.write('<td>{}</td>\n'.format(
round(process_results[ordered_name][pid][start]['rss'], 2)))
html_file.write('<td>{}</td>\n'.format(
round(process_results[ordered_name][pid][end]['rss'], 2)))
html_file.write('<td>{}</td>\n'.format(round(rss_change, 2)))
pss_change = process_results[ordered_name][pid][end]['pss'] - \
process_results[ordered_name][pid][start]['pss']
html_file.write('<td>{}</td>\n'.format(
round(process_results[ordered_name][pid][start]['pss'], 2)))
html_file.write('<td>{}</td>\n'.format(
round(process_results[ordered_name][pid][end]['pss'], 2)))
html_file.write('<td>{}</td>\n'.format(round(pss_change, 2)))
html_file.write('<td><a href=\'rawdata/{}-{}.csv\'>csv</a></td>\n'.format(
pid, ordered_name))
html_file.write('</tr>\n')
else:
logger.debug(f'Process/Worker not part of test: {ordered_name}')
html_file.write('</table>\n')
# Worker Graphs
for ordered_name in process_order:
if ordered_name in process_results:
html_file.write('<tr><td>\n')
html_file.write('<div id=\'{}\'>Process name: {}</div><br>\n'.format(
ordered_name, ordered_name))
if len(process_results[ordered_name]) > 1:
file_name = f'{ordered_name}-all.png'
html_file.write('<img id=\'{}\' src=\'graphs/{}\'><br>\n'.format(file_name,
file_name))
else:
for pid in sorted(process_results[ordered_name]):
file_name = f'{ordered_name}-{pid}.png'
html_file.write('<img id=\'{}\' src=\'graphs/{}\'><br>\n'.format(
file_name, file_name))
html_file.write('</td></tr>\n')
html_file.write('</table>\n')
html_file.write('</body>\n')
html_file.write('</html>\n')
timediff = time.time() - starttime
logger.info(f'Generated Summary html in: {timediff}')
def generate_workload_html(directory, ver, scenario_data, provider_names, grafana_urls):
starttime = time.time()
file_name = str(directory.join('workload.html'))
with open(file_name, 'w') as html_file:
html_file.write('<html>\n')
html_file.write('<head><title>{} - {}</title></head>'.format(
scenario_data['test_name'], provider_names))
html_file.write('<body>\n')
html_file.write('<b>CFME {} {} Test Results</b><br>\n'.format(ver,
scenario_data['test_name'].title()))
html_file.write('<b>Appliance Roles:</b> {}<br>\n'.format(
scenario_data['appliance_roles'].replace(',', ', ')))
html_file.write(f'<b>Provider(s):</b> {provider_names}<br>\n')
html_file.write('<b><a href=\'https://{}/\' target="_blank">{}</a></b>\n'.format(
scenario_data['appliance_ip'], scenario_data['appliance_name']))
if grafana_urls:
for g_name in sorted(grafana_urls.keys()):
html_file.write(
' : <b><a href=\'{}\' target="_blank">{}</a></b>'.format(grafana_urls[g_name],
g_name))
html_file.write('<br>\n')
html_file.write(f'<b><a href=\'{ver}-summary.csv\'>Summary CSV</a></b>')
html_file.write(' : <b><a href=\'index.html\'>Memory Info</a></b>')
html_file.write(' : <b><a href=\'graphs/\'>Graphs directory</a></b>\n')
html_file.write(' : <b><a href=\'rawdata/\'>CSVs directory</a></b><br>\n')
html_file.write('<br><b>Scenario Data: </b><br>\n')
yaml_html = get_scenario_html(scenario_data['scenario'])
html_file.write(yaml_html + '\n')
html_file.write('<br>\n<br>\n<br>\n<b>Quantifier Data: </b>\n<br>\n<br>\n<br>\n<br>\n')
html_file.write('<table border="1">\n')
html_file.write('<tr>\n')
html_file.write('<td><b><font size="4"> System Information</font></b></td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td>\n')
system_path = ('../version_info/system.csv')
html_file.write('<a href="{}" download="System_Versions-{}-{}"> System Versions</a>'
.format(system_path, test_ts, scenario_data['scenario']['name']))
html_file.write('</td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td> </td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td> </td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td><b><font size="4"> Process Information</font></b></td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td>\n')
process_path = ('../version_info/processes.csv')
html_file.write('<a href="{}" download="Process_Versions-{}-{}"> Process Versions</a>'
.format(process_path, test_ts, scenario_data['scenario']['name']))
html_file.write('</td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td> </td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td> </td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td><b><font size="4"> Ruby Gem Information</font></b></td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td>\n')
gems_path = ('../version_info/gems.csv')
html_file.write('<a href="{}" download="Gem_Versions-{}-{}"> Ruby Gem Versions</a>'
.format(gems_path, test_ts, scenario_data['scenario']['name']))
html_file.write('</td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td> </td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td> </td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td><b><font size="4"> RPM Information</font></b></td>\n')
html_file.write('</tr>\n')
html_file.write('<tr>\n')
html_file.write('<td>\n')
rpms_path = ('../version_info/rpms.csv')
html_file.write('<a href="{}" download="RPM_Versions-{}-{}"> RPM Versions</a>'
.format(rpms_path, test_ts, scenario_data['scenario']['name']))
html_file.write('</td>\n')
html_file.write('</tr>\n')
html_file.write('</table>\n')
html_file.write('</body>\n')
html_file.write('</html>\n')
timediff = time.time() - starttime
logger.info(f'Generated Workload html in: {timediff}')
def add_workload_quantifiers(quantifiers, scenario_data):
starttime = time.time()
ver = current_version()
workload_path = results_path.join('{}-{}-{}'.format(test_ts, scenario_data['test_dir'], ver))
directory = workload_path.join(scenario_data['scenario']['name'])
file_name = str(directory.join('workload.html'))
marker = '<b>Quantifier Data: </b>'
yaml_dict = quantifiers
yaml_string = str(json.dumps(yaml_dict, indent=4))
yaml_html = yaml_string.replace('\n', '<br>\n')
with open(file_name, 'r+') as html_file:
line = ''
while marker not in line:
line = html_file.readline()
marker_pos = html_file.tell()
remainder = html_file.read()
html_file.seek(marker_pos)
html_file.write(f'{yaml_html} \n')
html_file.write(remainder)
timediff = time.time() - starttime
logger.info(f'Added quantifiers in: {timediff}')
def get_scenario_html(scenario_data):
scenario_dict = create_dict(scenario_data)
scenario_yaml = yaml.safe_dump(scenario_dict)
scenario_html = scenario_yaml.replace('\n', '<br>\n')
scenario_html = scenario_html.replace(', ', '<br>\n - ')
scenario_html = scenario_html.replace(' ', ' ')
scenario_html = scenario_html.replace('[', '<br>\n - ')
scenario_html = scenario_html.replace(']', '\n')
return scenario_html
def create_dict(attr_dict):
main_dict = dict(attr_dict)
for key, value in main_dict.items():
if type(value) == AttrDict:
main_dict[key] = create_dict(value)
return main_dict
def graph_appliance_measurements(graphs_path, ver, appliance_results, use_slab, provider_names):
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
from cycler import cycler
starttime = time.time()
dates = list(appliance_results.keys())
total_memory_list = list(appliance_results[ts]['total']
for ts in appliance_results.keys())
free_memory_list = list(appliance_results[ts]['free']
for ts in appliance_results.keys())
used_memory_list = list(appliance_results[ts]['used']
for ts in appliance_results.keys())
buffers_memory_list = list(appliance_results[ts]['buffers']
for ts in appliance_results.keys())
cache_memory_list = list(appliance_results[ts]['cached']
for ts in appliance_results.keys())
slab_memory_list = list(appliance_results[ts]['slab']
for ts in appliance_results.keys())
swap_total_list = list(appliance_results[ts]['swap_total']
for ts in appliance_results.keys())
swap_free_list = list(appliance_results[ts]['swap_free']
for ts in appliance_results.keys())
# Stack Plot Memory Usage
file_name = graphs_path.join(f'{ver}-appliance_memory.png')
mpl.rcParams['axes.prop_cycle'] = cycler('color', ['firebrick', 'coral', 'steelblue',
'forestgreen'])
fig, ax = plt.subplots()
plt.title(f'Provider(s): {provider_names}\nAppliance Memory')
plt.xlabel('Date / Time')
plt.ylabel('Memory (MiB)')
if use_slab:
y = [used_memory_list, slab_memory_list, cache_memory_list, free_memory_list]
else:
y = [used_memory_list, buffers_memory_list, cache_memory_list, free_memory_list]
plt.stackplot(dates, *y, baseline='zero')
ax.annotate(str(round(total_memory_list[0], 2)), xy=(dates[0], total_memory_list[0]),
xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(total_memory_list[-1], 2)), xy=(dates[-1], total_memory_list[-1]),
xytext=(4, -4), textcoords='offset points')
if use_slab:
ax.annotate(str(round(slab_memory_list[0], 2)), xy=(dates[0], used_memory_list[0] +
slab_memory_list[0]), xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(slab_memory_list[-1], 2)), xy=(dates[-1], used_memory_list[-1] +
slab_memory_list[-1]), xytext=(4, -4), textcoords='offset points')
ax.annotate(str(round(cache_memory_list[0], 2)), xy=(dates[0], used_memory_list[0] +
slab_memory_list[0] + cache_memory_list[0]), xytext=(4, 4),
textcoords='offset points')
ax.annotate(str(round(cache_memory_list[-1], 2)), xy=(
dates[-1], used_memory_list[-1] + slab_memory_list[-1] + cache_memory_list[-1]),
xytext=(4, -4), textcoords='offset points')
else:
ax.annotate(str(round(buffers_memory_list[0], 2)), xy=(
dates[0], used_memory_list[0] + buffers_memory_list[0]), xytext=(4, 4),
textcoords='offset points')
ax.annotate(str(round(buffers_memory_list[-1], 2)), xy=(dates[-1],
used_memory_list[-1] + buffers_memory_list[-1]), xytext=(4, -4),
textcoords='offset points')
ax.annotate(str(round(cache_memory_list[0], 2)), xy=(dates[0], used_memory_list[0] +
buffers_memory_list[0] + cache_memory_list[0]), xytext=(4, 4),
textcoords='offset points')
ax.annotate(str(round(cache_memory_list[-1], 2)), xy=(
dates[-1], used_memory_list[-1] + buffers_memory_list[-1] + cache_memory_list[-1]),
xytext=(4, -4), textcoords='offset points')
ax.annotate(str(round(used_memory_list[0], 2)), xy=(dates[0], used_memory_list[0]),
xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(used_memory_list[-1], 2)), xy=(dates[-1], used_memory_list[-1]),
xytext=(4, -4), textcoords='offset points')
datefmt = mdates.DateFormatter('%m-%d %H-%M')
ax.xaxis.set_major_formatter(datefmt)
ax.grid(True)
p1 = plt.Rectangle((0, 0), 1, 1, fc='firebrick')
p2 = plt.Rectangle((0, 0), 1, 1, fc='coral')
p3 = plt.Rectangle((0, 0), 1, 1, fc='steelblue')
p4 = plt.Rectangle((0, 0), 1, 1, fc='forestgreen')
if use_slab:
ax.legend([p1, p2, p3, p4], ['Used', 'Slab', 'Cached', 'Free'],
bbox_to_anchor=(1.45, 0.22), fancybox=True)
else:
ax.legend([p1, p2, p3, p4], ['Used', 'Buffers', 'Cached', 'Free'],
bbox_to_anchor=(1.45, 0.22), fancybox=True)
fig.autofmt_xdate()
plt.savefig(str(file_name), bbox_inches='tight')
plt.close()
# Stack Plot Swap usage
mpl.rcParams['axes.prop_cycle'] = cycler('color', ['firebrick', 'forestgreen'])
file_name = graphs_path.join(f'{ver}-appliance_swap.png')
fig, ax = plt.subplots()
plt.title(f'Provider(s): {provider_names}\nAppliance Swap')
plt.xlabel('Date / Time')
plt.ylabel('Swap (MiB)')
swap_used_list = [t - f for f, t in zip(swap_free_list, swap_total_list)]
y = [swap_used_list, swap_free_list]
plt.stackplot(dates, *y, baseline='zero')
ax.annotate(str(round(swap_total_list[0], 2)), xy=(dates[0], swap_total_list[0]),
xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(swap_total_list[-1], 2)), xy=(dates[-1], swap_total_list[-1]),
xytext=(4, -4), textcoords='offset points')
ax.annotate(str(round(swap_used_list[0], 2)), xy=(dates[0], swap_used_list[0]),
xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(swap_used_list[-1], 2)), xy=(dates[-1], swap_used_list[-1]),
xytext=(4, -4), textcoords='offset points')
datefmt = mdates.DateFormatter('%m-%d %H-%M')
ax.xaxis.set_major_formatter(datefmt)
ax.grid(True)
p1 = plt.Rectangle((0, 0), 1, 1, fc='firebrick')
p2 = plt.Rectangle((0, 0), 1, 1, fc='forestgreen')
ax.legend([p1, p2], ['Used Swap', 'Free Swap'], bbox_to_anchor=(1.45, 0.22), fancybox=True)
fig.autofmt_xdate()
plt.savefig(str(file_name), bbox_inches='tight')
plt.close()
# Reset Colors
mpl.rcdefaults()
timediff = time.time() - starttime
logger.info(f'Plotted Appliance Memory in: {timediff}')
def graph_all_miq_workers(graph_file_path, process_results, provider_names):
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
starttime = time.time()
file_name = graph_file_path.join('all-processes.png')
fig, ax = plt.subplots()
plt.title(f'Provider(s): {provider_names}\nAll Workers/Monitored Processes')
plt.xlabel('Date / Time')
plt.ylabel('Memory (MiB)')
for process_name in process_results:
if 'Worker' in process_name or 'Handler' in process_name or 'Catcher' in process_name:
for process_pid in process_results[process_name]:
dates = list(process_results[process_name][process_pid].keys())
rss_samples = list(process_results[process_name][process_pid][ts]['rss']
for ts in process_results[process_name][process_pid].keys())
vss_samples = list(process_results[process_name][process_pid][ts]['vss']
for ts in process_results[process_name][process_pid].keys())
plt.plot(dates, rss_samples, linewidth=1, label='{} {} RSS'.format(process_pid,
process_name))
plt.plot(dates, vss_samples, linewidth=1, label='{} {} VSS'.format(
process_pid, process_name))
datefmt = mdates.DateFormatter('%m-%d %H-%M')
ax.xaxis.set_major_formatter(datefmt)
ax.grid(True)
plt.legend(loc='upper center', bbox_to_anchor=(1.2, 0.1), fancybox=True)
fig.autofmt_xdate()
plt.savefig(str(file_name), bbox_inches='tight')
plt.close()
timediff = time.time() - starttime
logger.info(f'Plotted All Type/Process Memory in: {timediff}')
def graph_individual_process_measurements(graph_file_path, process_results, provider_names):
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
starttime = time.time()
for process_name in process_results:
for process_pid in process_results[process_name]:
file_name = graph_file_path.join(f'{process_name}-{process_pid}.png')
dates = list(process_results[process_name][process_pid].keys())
rss_samples = list(process_results[process_name][process_pid][ts]['rss']
for ts in process_results[process_name][process_pid].keys())
pss_samples = list(process_results[process_name][process_pid][ts]['pss']
for ts in process_results[process_name][process_pid].keys())
uss_samples = list(process_results[process_name][process_pid][ts]['uss']
for ts in process_results[process_name][process_pid].keys())
vss_samples = list(process_results[process_name][process_pid][ts]['vss']
for ts in process_results[process_name][process_pid].keys())
swap_samples = list(process_results[process_name][process_pid][ts]['swap']
for ts in process_results[process_name][process_pid].keys())
fig, ax = plt.subplots()
plt.title('Provider(s)/Size: {}\nProcess/Worker: {}\nPID: {}'.format(provider_names,
process_name, process_pid))
plt.xlabel('Date / Time')
plt.ylabel('Memory (MiB)')
plt.plot(dates, rss_samples, linewidth=1, label='RSS')
plt.plot(dates, pss_samples, linewidth=1, label='PSS')
plt.plot(dates, uss_samples, linewidth=1, label='USS')
plt.plot(dates, vss_samples, linewidth=1, label='VSS')
plt.plot(dates, swap_samples, linewidth=1, label='Swap')
if rss_samples:
ax.annotate(str(round(rss_samples[0], 2)), xy=(dates[0], rss_samples[0]),
xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(rss_samples[-1], 2)), xy=(dates[-1], rss_samples[-1]),
xytext=(4, -4), textcoords='offset points')
if pss_samples:
ax.annotate(str(round(pss_samples[0], 2)), xy=(dates[0], pss_samples[0]),
xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(pss_samples[-1], 2)), xy=(dates[-1], pss_samples[-1]),
xytext=(4, -4), textcoords='offset points')
if uss_samples:
ax.annotate(str(round(uss_samples[0], 2)), xy=(dates[0], uss_samples[0]),
xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(uss_samples[-1], 2)), xy=(dates[-1], uss_samples[-1]),
xytext=(4, -4), textcoords='offset points')
if vss_samples:
ax.annotate(str(round(vss_samples[0], 2)), xy=(dates[0], vss_samples[0]),
xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(vss_samples[-1], 2)), xy=(dates[-1], vss_samples[-1]),
xytext=(4, -4), textcoords='offset points')
if swap_samples:
ax.annotate(str(round(swap_samples[0], 2)), xy=(dates[0], swap_samples[0]),
xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(swap_samples[-1], 2)), xy=(dates[-1], swap_samples[-1]),
xytext=(4, -4), textcoords='offset points')
datefmt = mdates.DateFormatter('%m-%d %H-%M')
ax.xaxis.set_major_formatter(datefmt)
ax.grid(True)
plt.legend(loc='upper center', bbox_to_anchor=(1.2, 0.1), fancybox=True)
fig.autofmt_xdate()
plt.savefig(str(file_name), bbox_inches='tight')
plt.close()
timediff = time.time() - starttime
logger.info(f'Plotted Individual Process Memory in: {timediff}')
def graph_same_miq_workers(graph_file_path, process_results, provider_names):
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
starttime = time.time()
for process_name in process_results:
if len(process_results[process_name]) > 1:
logger.debug('Plotting {} {} processes on single graph.'.format(
len(process_results[process_name]), process_name))
file_name = graph_file_path.join(f'{process_name}-all.png')
fig, ax = plt.subplots()
pids = 'PIDs: '
for i, pid in enumerate(process_results[process_name], 1):
pids = '{}{}'.format(pids, '{},{}'.format(pid, [' ', '\n'][i % 6 == 0]))
pids = pids[0:-2]
plt.title('Provider: {}\nProcess/Worker: {}\n{}'.format(provider_names,
process_name, pids))
plt.xlabel('Date / Time')
plt.ylabel('Memory (MiB)')
for process_pid in process_results[process_name]:
dates = list(process_results[process_name][process_pid].keys())
rss_samples = list(process_results[process_name][process_pid][ts]['rss']
for ts in process_results[process_name][process_pid].keys())
pss_samples = list(process_results[process_name][process_pid][ts]['pss']
for ts in process_results[process_name][process_pid].keys())
uss_samples = list(process_results[process_name][process_pid][ts]['uss']
for ts in process_results[process_name][process_pid].keys())
vss_samples = list(process_results[process_name][process_pid][ts]['vss']
for ts in process_results[process_name][process_pid].keys())
swap_samples = list(process_results[process_name][process_pid][ts]['swap']
for ts in process_results[process_name][process_pid].keys())
plt.plot(dates, rss_samples, linewidth=1, label=f'{process_pid} RSS')
plt.plot(dates, pss_samples, linewidth=1, label=f'{process_pid} PSS')
plt.plot(dates, uss_samples, linewidth=1, label=f'{process_pid} USS')
plt.plot(dates, vss_samples, linewidth=1, label=f'{process_pid} VSS')
plt.plot(dates, swap_samples, linewidth=1, label=f'{process_pid} SWAP')
if rss_samples:
ax.annotate(str(round(rss_samples[0], 2)), xy=(dates[0], rss_samples[0]),
xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(rss_samples[-1], 2)), xy=(dates[-1],
rss_samples[-1]), xytext=(4, -4), textcoords='offset points')
if pss_samples:
ax.annotate(str(round(pss_samples[0], 2)), xy=(dates[0],
pss_samples[0]), xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(pss_samples[-1], 2)), xy=(dates[-1],
pss_samples[-1]), xytext=(4, -4), textcoords='offset points')
if uss_samples:
ax.annotate(str(round(uss_samples[0], 2)), xy=(dates[0],
uss_samples[0]), xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(uss_samples[-1], 2)), xy=(dates[-1],
uss_samples[-1]), xytext=(4, -4), textcoords='offset points')
if vss_samples:
ax.annotate(str(round(vss_samples[0], 2)), xy=(dates[0],
vss_samples[0]), xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(vss_samples[-1], 2)), xy=(dates[-1],
vss_samples[-1]), xytext=(4, -4), textcoords='offset points')
if swap_samples:
ax.annotate(str(round(swap_samples[0], 2)), xy=(dates[0],
swap_samples[0]), xytext=(4, 4), textcoords='offset points')
ax.annotate(str(round(swap_samples[-1], 2)), xy=(dates[-1],
swap_samples[-1]), xytext=(4, -4), textcoords='offset points')
datefmt = mdates.DateFormatter('%m-%d %H-%M')
ax.xaxis.set_major_formatter(datefmt)
ax.grid(True)
plt.legend(loc='upper center', bbox_to_anchor=(1.2, 0.1), fancybox=True)
fig.autofmt_xdate()
plt.savefig(str(file_name), bbox_inches='tight')
plt.close()
timediff = time.time() - starttime
logger.info(f'Plotted Same Type/Process Memory in: {timediff}')
def summary_csv_measurement_dump(csv_file, process_results, measurement):
csv_file.write('---------------------------------------------\n')
csv_file.write(f'Per Process {measurement.upper()} Memory Usage\n')
csv_file.write('---------------------------------------------\n')
csv_file.write('Process/Worker Type,PID,Start of test,End of test\n')
for ordered_name in process_order:
if ordered_name in process_results:
for process_pid in sorted(process_results[ordered_name]):
start = list(process_results[ordered_name][process_pid].keys())[0]
end = list(process_results[ordered_name][process_pid].keys())[-1]
csv_file.write('{},{},{},{}\n'.format(ordered_name, process_pid,
round(process_results[ordered_name][process_pid][start][measurement], 2),
round(process_results[ordered_name][process_pid][end][measurement], 2)))
|
gpl-2.0
|
lordkman/burnman
|
contrib/tutorial/step_2.py
|
4
|
4765
|
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2015 by the BurnMan team, released under the GNU GPL v2 or later.
# Ian Rose ian.r.rose@gmail.com
"""
CIDER 2014 BurnMan Tutorial --- step 2
--------------------------------------
In this second part of the tutorial we try to get a closer fit to our
1D seismic reference model. In the simple Mg, Si, and O model that
we used in step 1 there was one free parameter, namely phase_1_fraction,
which goes between zero and one.
In this script we want to explore how good of a fit to PREM we can get
by varying this fraction. We create a simple function that calculates
a misfit between PREM and our mineral model as a function of phase_1_fraction,
and then plot this misfit function to try to find a best model.
This script may be run by typing
python step_2.py
"""
from __future__ import absolute_import
from __future__ import print_function
# The imports here are identical to those from step 1
import numpy as np
import matplotlib.pyplot as plt
# hack to allow scripts to be placed in subdirectories next to burnman:
import os
import sys
if not os.path.exists('burnman') and os.path.exists('../../burnman'):
sys.path.insert(1, os.path.abspath('../..'))
import burnman
from burnman import minerals
if __name__ == '__main__':
# Again, we load the PREM seismic model and query it for pressure,
# density, and elastic properties at lower mantle depths. This, too,
# is identical to step 1
n_depths = 20
min_depth = 850.e3
max_depth = 2800.e3
depths = np.linspace(min_depth, max_depth, n_depths)
seismic_model = burnman.seismic.PREM()
pressure, seis_rho, seis_vphi, seis_vs = seismic_model.evaluate(
['pressure', 'density', 'v_phi', 'v_s'], depths)
temperature = burnman.geotherm.brown_shankland(depths)
"""
This is the main workhorse of this script. We define the function ``misfit''
which takes a single parameter phase_1_fraction. We create the rock we want,
calculate its elastic properties, and then calculate an L2 misfit between
the calculated profiles for Vs, Vphi, and density and those for PREM.
Here again is our model with stishovite and wuestite. Instead of that, you
will want to copy the ``rock'' you created in step 1. If you experimented with
other rocks than those with Mg perovskite and periclase, you can also try those.
"""
def misfit(phase_1_fraction):
# Here we define the rock as before.
phase_2_fraction = 1.0 - phase_1_fraction
rock = burnman.Composite(
[minerals.SLB_2011.stishovite(), minerals.SLB_2011.wuestite()], [phase_1_fraction, phase_2_fraction])
# Just as in step 1, we want to set which equation of state we use,
# then call burnman.velocities_from_rock, which evaluates the
# elastic properties and seismic velocities at the predefined
# pressures and temperatures
rock.set_method('slb3')
density, vphi, vs = rock.evaluate(
['density', 'v_phi', 'v_s'], pressure, temperature)
# Since we will call this misfit function many times, we may be interested
# in a status report. These lines print some debug output so we
# can keep track of what the script is doing.
print("Calculations are done for:")
rock.debug_print()
# Here we integrate an L2 difference with depth between our calculated seismic
# profiles and PREM. We then return those misfits.
[vs_err, vphi_err, rho_err] = burnman.compare_l2(
depths, [vs, vphi, density], [seis_vs, seis_vphi, seis_rho])
return vs_err, vphi_err, rho_err
"""
With the misfit function now defined, we can call it many times for different
phase_1_fraction values, and see how good of a fit we can get.
"""
# We create the array ``fraction'', which has 101 fractions between
# zero and one, and call our misfit function for each of those fractions.
fraction = np.linspace(0.0, 1.0, 101)
errs = np.array([misfit(f) for f in fraction])
vs_misfit = errs[:, 0]
vphi_misfit = errs[:, 1]
rho_misfit = errs[:, 2]
# Finally, we plot the misfits against the phase_1_fraction. You will probably
# find that it is difficult to fit shear wave speed, bulk sound speed, and
# density all at the same time.
plt.plot(fraction, vs_misfit, "r-x", label=("Vs misfit"))
plt.plot(fraction, vphi_misfit, "b-x", label=("Vphi misfit"))
plt.plot(fraction, rho_misfit, "g-x", label=("Density misfit"))
plt.yscale('log')
plt.xlabel('Fraction Phase 1')
plt.ylabel('Misfit')
plt.legend()
plt.show()
|
gpl-2.0
|
ldirer/scikit-learn
|
sklearn/tests/test_config.py
|
29
|
2476
|
from sklearn import get_config, set_config, config_context
from sklearn.utils.testing import assert_equal, assert_raises
def test_config_context():
assert_equal(get_config(), {'assume_finite': False})
# Not using as a context manager affects nothing
config_context(assume_finite=True)
assert_equal(get_config(), {'assume_finite': False})
with config_context(assume_finite=True):
assert_equal(get_config(), {'assume_finite': True})
assert_equal(get_config(), {'assume_finite': False})
with config_context(assume_finite=True):
with config_context(assume_finite=None):
assert_equal(get_config(), {'assume_finite': True})
assert_equal(get_config(), {'assume_finite': True})
with config_context(assume_finite=False):
assert_equal(get_config(), {'assume_finite': False})
with config_context(assume_finite=None):
assert_equal(get_config(), {'assume_finite': False})
# global setting will not be retained outside of context that
# did not modify this setting
set_config(assume_finite=True)
assert_equal(get_config(), {'assume_finite': True})
assert_equal(get_config(), {'assume_finite': False})
assert_equal(get_config(), {'assume_finite': True})
assert_equal(get_config(), {'assume_finite': False})
# No positional arguments
assert_raises(TypeError, config_context, True)
# No unknown arguments
assert_raises(TypeError, config_context(do_something_else=True).__enter__)
def test_config_context_exception():
assert_equal(get_config(), {'assume_finite': False})
try:
with config_context(assume_finite=True):
assert_equal(get_config(), {'assume_finite': True})
raise ValueError()
except ValueError:
pass
assert_equal(get_config(), {'assume_finite': False})
def test_set_config():
assert_equal(get_config(), {'assume_finite': False})
set_config(assume_finite=None)
assert_equal(get_config(), {'assume_finite': False})
set_config(assume_finite=True)
assert_equal(get_config(), {'assume_finite': True})
set_config(assume_finite=None)
assert_equal(get_config(), {'assume_finite': True})
set_config(assume_finite=False)
assert_equal(get_config(), {'assume_finite': False})
# No unknown arguments
assert_raises(TypeError, set_config, do_something_else=True)
|
bsd-3-clause
|
sdsc/xsede_stats
|
tacc_stats/site/comet/views.py
|
1
|
20771
|
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render_to_response, render
from django.views.generic import DetailView, ListView
from django.db.models import Q
import os,sys,pwd
from tacc_stats.analysis import exam
from tacc_stats.site.comet.models import Job, Host, Libraries, TestInfo
from tacc_stats.site.xalt.models import run, join_run_object, lib
import tacc_stats.cfg as cfg
import tacc_stats.analysis.plot as plots
from tacc_stats.analysis.gen import lariat_utils
from tacc_stats.pickler import job_stats, batch_acct
# Compatibility with old pickle versions
sys.modules['pickler.job_stats'] = job_stats
sys.modules['pickler.batch_acct'] = batch_acct
sys.modules['job_stats'] = job_stats
sys.modules['batch_acct'] = batch_acct
import cPickle as pickle
import time,pytz
from datetime import datetime
import numpy as np
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from django.core.cache import cache,get_cache
import traceback
def update_comp_info(thresholds = None):
schema_map = {'HighCPI' : ['cpi','>',1.5],
'HighCPLD' : ['cpld','>',1.5],
'Load_L1Hits' : ['Load_L1Hits','>',1.5],
'Load_L2Hits' : ['Load_L2Hits','>',1.5],
'Load_LLCHits' : ['Load_LLCHits','>',1.5],
'MemBw' : ['mbw', '<', 1.0 ],
'Catastrophe' : ['cat', '<',0.01] ,
'MemUsage' : ['mem','>',31],
'PacketRate' : ['packetrate','>',0],
'PacketSize' : ['packetsize','>',0],
'Idle' : ['idle','>',0.99],
'LowFLOPS' : ['flops','<',10],
'VecPercent' : ['VecPercent','<',0.05],
'GigEBW' : ['GigEBW','>',1e7],
'CPU_Usage' : ['CPU_Usage','<',800],
'MIC_Usage' : ['MIC_Usage','>',0.0],
'Load_All' : ['Load_All','<',1e7],
}
if thresholds:
for key,val in thresholds.iteritems():
schema_map[key][1:3] = val
for name in schema_map:
if TestInfo.objects.filter(test_name = name).exists():
TestInfo.objects.filter(test_name = name).delete()
obj = TestInfo(test_name = name,
field_name = schema_map[name][0],
comparator = schema_map[name][1],
threshold = schema_map[name][2])
obj.save()
def update(date,rerun=False):
tz = pytz.timezone('US/Pacific')
pickle_dir = os.path.join(cfg.pickles_dir,date)
ctr = 0
for root, directory, pickle_files in os.walk(pickle_dir):
num_files = len(pickle_files)
print "Number of pickle files in",root,'=',num_files
for pickle_file in sorted(pickle_files):
ctr += 1
print pickle_file
try:
if rerun: pass
elif Job.objects.filter(id = pickle_file).exists():
continue
except:
print pickle_file,"doesn't look like a pickled job"
continue
try:
pickle_path = os.path.join(root,str(pickle_file))
with open(pickle_path, 'rb') as f:
data = np.load(f)
json = data.acct
hosts = data.hosts.keys()
del json['yesno']
utc_start = datetime.utcfromtimestamp(
json['start_time']).replace(tzinfo=pytz.utc)
utc_end = datetime.utcfromtimestamp(
json['end_time']).replace(tzinfo=pytz.utc)
json['run_time'] = json['end_time'] - json['start_time']
if json.has_key('unknown'):
json['requested_time'] = json['unknown']*60
del json['unknown']
else: json['requested_time'] = json['requested_time']*60
json['start_epoch'] = json['start_time']
json['end_epoch'] = json['end_time']
json['start_time'] = utc_start.astimezone(tz)
json['end_time'] = utc_end.astimezone(tz)
json['date'] = json['end_time'].date()
json['name'] = json['name'][0:128]
json['wayness'] = json['cores']/json['nodes']
try: json['user']=pwd.getpwuid(int(json['uid']))[0]
except: json['user']='unknown'
### If xalt is available add data to the DB
xd = None
try:
xd = run.objects.using('xalt').filter(job_id = json['id'])[0]
json['user'] = xd.user
json['exe'] = xd.exec_path.split('/')[-1][0:128]
json['exec_path'] = xd.exec_path
json['cwd'] = xd.cwd[0:128]
json['threads'] = xd.num_threads
except: xd = False
obj, created = Job.objects.update_or_create(**json)
for host_name in hosts:
h = Host(name=host_name)
h.save()
h.jobs.add(obj)
if xd:
for join in join_run_object.objects.using('xalt').filter(run_id = xd.run_id):
try:
object_path = lib.objects.using('xalt').get(obj_id = join.obj_id).object_path
module_name = lib.objects.using('xalt').get(obj_id = join.obj_id).module_name
if not module_name: module_name = 'none'
library = Libraries(object_path = object_path, module_name = module_name)
library.save()
library.jobs.add(obj)
except: pass
except:
print pickle_file,'failed'
print traceback.format_exc()
print date
print "Percentage Completed =",100*float(ctr)/num_files
def update_metric_fields(date,rerun=False):
update_comp_info()
aud = exam.Auditor(processes=4)
aud.stage(exam.GigEBW, ignore_qs=[], min_time = 0)
aud.stage(exam.HighCPI, ignore_qs=[], min_time = 0)
aud.stage(exam.HighCPLD, ignore_qs=[], min_time = 0)
aud.stage(exam.Load_L1Hits, ignore_qs=[], min_time = 0)
aud.stage(exam.Load_L2Hits, ignore_qs=[], min_time = 0)
aud.stage(exam.Load_LLCHits, ignore_qs=[], min_time = 0)
aud.stage(exam.MemBw, ignore_qs=[], min_time = 0)
aud.stage(exam.Catastrophe, ignore_qs=[], min_time = 0)
aud.stage(exam.MemUsage, ignore_qs=[], min_time = 0)
aud.stage(exam.PacketRate, ignore_qs=[], min_time = 0)
aud.stage(exam.PacketSize, ignore_qs=[], min_time = 0)
aud.stage(exam.Idle, ignore_qs=[], min_time = 0)
aud.stage(exam.LowFLOPS, ignore_qs=[], min_time = 0)
aud.stage(exam.VecPercent, ignore_qs=[], min_time = 0)
aud.stage(exam.CPU_Usage, ignore_qs = [], min_time = 0)
aud.stage(exam.MIC_Usage, ignore_qs = [], min_time = 0)
aud.stage(exam.Load_All, ignore_qs = [], min_time = 0)
print 'Run the following tests for:',date
for name, test in aud.measures.iteritems():
print name
obj = TestInfo.objects.get(test_name = name)
print obj.field_name,obj.threshold,obj.comparator
jobs_list = Job.objects.filter(date = date).exclude(run_time__lt = 0)
# Use mem to see if job was tested. It will always exist
#if not rerun:
#jobs_list = jobs_list.filter(Load_L1Hits = None)
paths = []
for job in jobs_list:
paths.append(os.path.join(cfg.pickles_dir,
job.date.strftime('%Y-%m-%d'),
str(job.id)))
num_jobs = jobs_list.count()
print '# Jobs to be tested:',num_jobs
if num_jobs == 0 : return
aud.run(paths)
print 'finished computing metrics'
for name, results in aud.metrics.iteritems():
obj = TestInfo.objects.get(test_name = name)
for jobid in results.keys():
try:
jobs_list.filter(id = jobid).update(**{ obj.field_name : results[jobid]})
except:
pass
def sys_plot(request, pk):
racks = []
nodes = []
clust_name = ""
for host in Host.objects.values_list('name',flat=True).distinct():
clust_name,r,n=host.split('-')
racks.append("{0:02d}".format(int(r)))
nodes.append(n)
racks = sorted(set(racks))
nodes = sorted(set(nodes))
job = Job.objects.get(id=pk)
hosts = job.host_set.all().values_list('name',flat=True)
x = np.zeros((len(nodes),len(racks)))
for r in range(len(racks)):
for n in range(len(nodes)):
name = clust_name+'-'+str(racks[r])+'-'+str(nodes[n])
if name in hosts: x[n][r] = 1.0
fig = Figure(figsize=(17,5))
ax=fig.add_subplot(1,1,1)
fig.tight_layout()
ax.set_yticks(range(len(nodes)))
ax.set_yticklabels(nodes,fontsize=6)
ax.set_xticks(range(len(racks)))
ax.set_xticklabels(racks,fontsize=6,rotation=90)
pcm = ax.pcolor(np.array(range(len(racks)+1)),np.array(range(len(nodes)+1)),x)
canvas = FigureCanvas(fig)
response = HttpResponse(content_type='image/png')
response['Content-Disposition'] = "attachment; filename="+pk+"-sys.png"
fig.savefig(response, format='png')
return response
def dates(request, error = False):
month_dict ={}
dates = Job.objects.dates('date','day')
for date in dates:
y,m,d = date.strftime('%Y-%m-%d').split('-')
key = y+'-'+m
month_dict.setdefault(key, [])
month_dict[key].append((y+'-'+m+'-'+d, d))
field = {}
field["machine_name"] = cfg.host_name_ext
field['date_list'] = sorted(month_dict.iteritems())[::-1]
field['error'] = error
return render_to_response("comet/search.html", field)
def search(request):
if 'jobid' in request.GET:
try:
job = Job.objects.get(id = request.GET['jobid'])
return HttpResponseRedirect("/comet/job/"+str(job.id)+"/")
except: pass
try:
fields = request.GET.dict()
new_fields = {k:v for k,v in fields.items() if v}
fields = new_fields
if 'opt_field0' in fields.keys() and 'value0' in fields.keys():
fields[fields['opt_field0']] = fields['value0']
del fields['opt_field0'], fields['value0']
if 'opt_field1' in fields.keys() and 'value1' in fields.keys():
fields[fields['opt_field1']] = fields['value1']
del fields['opt_field1'], fields['value1']
if 'opt_field2' in fields.keys() and 'value2' in fields.keys():
fields[fields['opt_field2']] = fields['value2']
del fields['opt_field2'], fields['value2']
print 'search', fields
return index(request, **fields)
except: pass
return dates(request, error = True)
def index(request, **field):
print 'index',field
name = ''
for key, val in field.iteritems():
name += '['+key+'='+val+']-'
if 'run_time__gte' in field: pass
else: field['run_time__gte'] = 60
order_key = '-id'
if 'order_key' in field:
order_key = field['order_key']
del field['order_key']
if field.has_key('date'):
date = field['date'].split('-')
if len(date) == 2:
field['date__year'] = date[0]
field['date__month'] = date[1]
del field['date']
job_list = Job.objects.filter(**field).order_by(order_key)
field['name'] = name + 'search'
field['histograms'] = hist_summary(job_list)
field['job_list'] = job_list
field['nj'] = job_list.count()
# Computed Metrics
field['cat_job_list'] = job_list.filter(Q(cat__lte = 0.001) | Q(cat__gte = 1000)).exclude(cat = None)
completed_list = job_list.exclude(status__in=['CANCELLED','FAILED']).order_by('-id')
field['idle_job_list'] = completed_list.filter(idle__gte = 0.99)
field['mem_job_list'] = completed_list.filter(mem__lte = 30, queue = 'largemem')
field['cpi_thresh'] = 1.5
field['cpi_job_list'] = completed_list.exclude(cpi = None).filter(cpi__gte = field['cpi_thresh'])
field['cpi_per'] = 100*field['cpi_job_list'].count()/float(completed_list.count())
field['gigebw_thresh'] = 2**20
field['gigebw_job_list'] = completed_list.exclude(GigEBW = None).filter(GigEBW__gte = field['gigebw_thresh'])
field['idle_job_list'] = list_to_dict(field['idle_job_list'],'idle')
field['cat_job_list'] = list_to_dict(field['cat_job_list'],'cat')
field['cpi_job_list'] = list_to_dict(field['cpi_job_list'],'cpi')
field['mem_job_list'] = list_to_dict(field['mem_job_list'],'mem')
field['gigebw_job_list'] = list_to_dict(field['gigebw_job_list'],'GigEBW')
return render_to_response("comet/index.html", field)
def list_to_dict(job_list,metric):
job_dict={}
for job in job_list:
job_dict.setdefault(job.user,[]).append((job.id,round(job.__dict__[metric],3)))
return job_dict
def hist_summary(job_list):
fig = Figure(figsize=(16,6))
# Runtimes
jobs = np.array(job_list.values_list('run_time',flat=True))/3600.
ax = fig.add_subplot(221)
bins = np.linspace(0, max(jobs), max(5, 5*np.log(len(jobs))))
ax.hist(jobs, bins = bins, log=True)
ax.set_ylabel('# of jobs')
ax.set_xlabel('hrs')
ax.set_title('Runtime')
# Nodes
jobs = np.array(job_list.values_list('nodes',flat=True))
ax = fig.add_subplot(222)
bins = np.linspace(0, max(jobs), max(5, 5*np.log(len(jobs))))
ax.hist(jobs, bins = bins, log=True)
ax.set_title('Size')
ax.set_xlabel('nodes')
# Queue Wait Time
jobs = (np.array(job_list.values_list('start_epoch',flat=True))-np.array(job_list.values_list('queue_time',flat=True)))/3600.
ax = fig.add_subplot(223)
bins = np.linspace(0, max(jobs), max(5, 5*np.log(len(jobs))))
ax.hist(jobs, bins = bins, log=True)
ax.set_ylabel('# of jobs')
ax.set_title('Queue Wait Time')
ax.set_xlabel('hrs')
jobs = np.array(job_list.filter(status = "FAILED").values_list('nodes',flat=True))
ax = fig.add_subplot(224)
try:
bins = np.linspace(0, max(jobs), max(5, 5*np.log(len(jobs))))
ax.hist(jobs, bins = bins, log=True)
except: pass
ax.set_title('Failed Jobs')
ax.set_xlabel('nodes')
fig.subplots_adjust(hspace=0.5)
canvas = FigureCanvas(fig)
import StringIO,base64,urllib
imgdata = StringIO.StringIO()
fig.savefig(imgdata, format='png')
imgdata.seek(0)
response = "data:image/png;base64,%s" % base64.b64encode(imgdata.buf)
return response
def figure_to_response(p):
response = HttpResponse(content_type='image/png')
response['Content-Disposition'] = "attachment; filename="+p.fname+".png"
p.fig.savefig(response, format='png')
return response
def get_data(pk):
if cache.has_key(pk):
data = cache.get(pk)
else:
job = Job.objects.get(pk = pk)
with open(os.path.join(cfg.pickles_dir,job.date.strftime('%Y-%m-%d'),str(job.id)),'rb') as f:
data = pickle.load(f)
cache.set(job.id, data)
return data
def master_plot(request, pk):
data = get_data(pk)
mp = plots.MasterPlot(lariat_data="pass")
mp.plot(pk,job_data=data)
return figure_to_response(mp)
def heat_map(request, pk):
data = get_data(pk)
hm = plots.HeatMap(k1={'intel_snb' : ['intel_snb','intel_snb'],
'intel_hsw' : ['intel_hsw','intel_hsw'],
'intel_pmc3' : ['intel_pmc3','intel_pmc3']
},
k2={'intel_snb' : ['CLOCKS_UNHALTED_REF',
'INSTRUCTIONS_RETIRED'],
'intel_hsw' : ['CLOCKS_UNHALTED_REF',
'INSTRUCTIONS_RETIRED'],
'intel_pmc3' : ['CLOCKS_UNHALTED_REF',
'INSTRUCTIONS_RETIRED']
},
lariat_data="pass")
hm.plot(pk,job_data=data)
return figure_to_response(hm)
def build_schema(data,name):
schema = []
for key,value in data.get_schema(name).iteritems():
if value.unit:
schema.append(value.key + ','+value.unit)
else: schema.append(value.key)
return schema
class JobDetailView(DetailView):
model = Job
def get_context_data(self, **kwargs):
context = super(JobDetailView, self).get_context_data(**kwargs)
job = context['job']
data = get_data(job.id)
import operator
comp = {'>': operator.gt, '>=': operator.ge,
'<': operator.le, '<=': operator.le,
'==': operator.eq}
print ">>>>>>>>>>>>>>>>>>>>>>>>"
testinfo_dict = {}
for obj in TestInfo.objects.all():
obj.test_name,
test_type = getattr(sys.modules[exam.__name__],obj.test_name)
test = test_type(min_time=0,ignore_qs=[])
try:
metric = test.test(job.path,data)
print metric
if not metric: continue
setattr(job,obj.field_name,metric)
result = comp[obj.comparator](metric, obj.threshold)
if result: string = 'Failed'
else: string = 'Passed'
testinfo_dict[obj.test_name] = (metric,obj.threshold,string)
except: continue
context['testinfo_dict'] = testinfo_dict
proc_list = []
type_list = []
host_list = []
for host_name, host in data.hosts.iteritems():
if host.stats.has_key('proc'):
proc_list += host.stats['proc']
proc_list = list(set(proc_list))
host_list.append(host_name)
if len(host_list) != job.nodes:
job.status = str(job.nodes-len(host_list))+"_NODES_MISSING"
host0=data.hosts.values()[0]
for type_name, type in host0.stats.iteritems():
schema = ' '.join(build_schema(data,type_name))
type_list.append( (type_name, schema[0:200]) )
type_list = sorted(type_list, key = lambda type_name: type_name[0])
context['proc_list'] = proc_list
context['host_list'] = host_list
context['type_list'] = type_list
urlstring="https://scribe.tacc.utexas.edu:8000/en-US/app/search/search?q=search%20kernel:"
hoststring=urlstring+"%20host%3D"+host_list[0]
serverstring=urlstring+"%20mds*%20OR%20%20oss*"
for host in host_list[1:]:
hoststring+="%20OR%20%20host%3D"+host
hoststring+="&earliest="+str(job.start_epoch)+"&latest="+str(job.end_epoch)+"&display.prefs.events.count=50"
serverstring+="&earliest="+str(job.start_epoch)+"&latest="+str(job.end_epoch)+"&display.prefs.events.count=50"
context['client_url'] = hoststring
context['server_url'] = serverstring
return context
def type_plot(request, pk, type_name):
data = get_data(pk)
schema = build_schema(data,type_name)
schema = [x.split(',')[0] for x in schema]
k1 = {'intel_snb' : [type_name]*len(schema),
'intel_hsw' : [type_name]*len(schema),
'intel_pmc3' : [type_name]*len(schema)
}
k2 = {'intel_snb': schema,
'intel_hsw': schema,
'intel_pmc3': schema
}
tp = plots.DevPlot(k1=k1,k2=k2,lariat_data='pass')
tp.plot(pk,job_data=data)
return figure_to_response(tp)
def proc_detail(data):
print data.get_schema('proc').keys()
for host_name, host in data.hosts.iteritems():
for proc_name, proc in host.stats['proc'].iteritems():
print proc_name, proc[-1]
def type_detail(request, pk, type_name):
data = get_data(pk)
if type_name == 'proc':
proc_detail(data)
else: pass
schema = build_schema(data,type_name)
raw_stats = data.aggregate_stats(type_name)[0]
stats = []
scale = 1.0
for t in range(len(raw_stats)):
temp = []
times = data.times-data.times[0]
for event in range(len(raw_stats[t])):
temp.append(raw_stats[t,event]*scale)
stats.append((times[t],temp))
return render_to_response("comet/type_detail.html",{"type_name" : type_name, "jobid" : pk, "stats_data" : stats, "schema" : schema})
|
lgpl-2.1
|
meduz/scikit-learn
|
examples/applications/plot_model_complexity_influence.py
|
323
|
6372
|
"""
==========================
Model Complexity Influence
==========================
Demonstrate how model complexity influences both prediction accuracy and
computational performance.
The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for
regression (resp. classification).
For each class of models we make the model complexity vary through the choice
of relevant model parameters and measure the influence on both computational
performance (latency) and predictive power (MSE or Hamming Loss).
"""
print(__doc__)
# Author: Eustache Diemert <eustache@diemert.fr>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot
from mpl_toolkits.axisartist.axislines import Axes
from scipy.sparse.csr import csr_matrix
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.svm.classes import NuSVR
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.metrics import hamming_loss
###############################################################################
# Routines
# initialize random generator
np.random.seed(0)
def generate_data(case, sparse=False):
"""Generate regression/classification data."""
bunch = None
if case == 'regression':
bunch = datasets.load_boston()
elif case == 'classification':
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
X, y = shuffle(bunch.data, bunch.target)
offset = int(X.shape[0] * 0.8)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
if sparse:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
else:
X_train = np.array(X_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_train = np.array(y_train)
data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train,
'y_test': y_test}
return data
def benchmark_influence(conf):
"""
Benchmark influence of :changing_param: on both MSE and latency.
"""
prediction_times = []
prediction_powers = []
complexities = []
for param_value in conf['changing_param_values']:
conf['tuned_params'][conf['changing_param']] = param_value
estimator = conf['estimator'](**conf['tuned_params'])
print("Benchmarking %s" % estimator)
estimator.fit(conf['data']['X_train'], conf['data']['y_train'])
conf['postfit_hook'](estimator)
complexity = conf['complexity_computer'](estimator)
complexities.append(complexity)
start_time = time.time()
for _ in range(conf['n_samples']):
y_pred = estimator.predict(conf['data']['X_test'])
elapsed_time = (time.time() - start_time) / float(conf['n_samples'])
prediction_times.append(elapsed_time)
pred_score = conf['prediction_performance_computer'](
conf['data']['y_test'], y_pred)
prediction_powers.append(pred_score)
print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % (
complexity, conf['prediction_performance_label'], pred_score,
elapsed_time))
return prediction_powers, prediction_times, complexities
def plot_influence(conf, mse_values, prediction_times, complexities):
"""
Plot influence of model complexity on both accuracy and latency.
"""
plt.figure(figsize=(12, 6))
host = host_subplot(111, axes_class=Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
host.set_xlabel('Model Complexity (%s)' % conf['complexity_label'])
y1_label = conf['prediction_performance_label']
y2_label = "Time (s)"
host.set_ylabel(y1_label)
par1.set_ylabel(y2_label)
p1, = host.plot(complexities, mse_values, 'b-', label="prediction error")
p2, = par1.plot(complexities, prediction_times, 'r-',
label="latency")
host.legend(loc='upper right')
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__)
plt.show()
def _count_nonzero_coefficients(estimator):
a = estimator.coef_.toarray()
return np.count_nonzero(a)
###############################################################################
# main code
regression_data = generate_data('regression')
classification_data = generate_data('classification', sparse=True)
configurations = [
{'estimator': SGDClassifier,
'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss':
'modified_huber', 'fit_intercept': True},
'changing_param': 'l1_ratio',
'changing_param_values': [0.25, 0.5, 0.75, 0.9],
'complexity_label': 'non_zero coefficients',
'complexity_computer': _count_nonzero_coefficients,
'prediction_performance_computer': hamming_loss,
'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)',
'postfit_hook': lambda x: x.sparsify(),
'data': classification_data,
'n_samples': 30},
{'estimator': NuSVR,
'tuned_params': {'C': 1e3, 'gamma': 2 ** -15},
'changing_param': 'nu',
'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9],
'complexity_label': 'n_support_vectors',
'complexity_computer': lambda x: len(x.support_vectors_),
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
{'estimator': GradientBoostingRegressor,
'tuned_params': {'loss': 'ls'},
'changing_param': 'n_estimators',
'changing_param_values': [10, 50, 100, 200, 500],
'complexity_label': 'n_trees',
'complexity_computer': lambda x: x.n_estimators,
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
]
for conf in configurations:
prediction_performances, prediction_times, complexities = \
benchmark_influence(conf)
plot_influence(conf, prediction_performances, prediction_times,
complexities)
|
bsd-3-clause
|
xclxxl414/rqalpha
|
rqalpha/data/trading_dates_mixin.py
|
1
|
3184
|
# -*- coding: utf-8 -*-
#
# Copyright 2017 Ricequant, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import pandas as pd
from rqalpha.utils.py2 import lru_cache
def _to_timestamp(d):
return pd.Timestamp(d).replace(hour=0, minute=0, second=0, microsecond=0)
class TradingDatesMixin(object):
def __init__(self, dates):
self._dates = dates
def get_trading_dates(self, start_date, end_date):
# 只需要date部分
start_date = _to_timestamp(start_date)
end_date = _to_timestamp(end_date)
left = self._dates.searchsorted(start_date)
right = self._dates.searchsorted(end_date, side='right')
return self._dates[left:right]
def get_previous_trading_date(self, date, n=1):
date = _to_timestamp(date)
pos = self._dates.searchsorted(date)
if pos >= n:
return self._dates[pos - n]
else:
return self._dates[0]
def get_next_trading_date(self, date, n=1):
date = _to_timestamp(date)
pos = self._dates.searchsorted(date, side='right')
if pos + n > len(self._dates):
return self._dates[-1]
else:
return self._dates[pos + n - 1]
def is_trading_date(self, date):
date = _to_timestamp(date)
pos = self._dates.searchsorted(date)
return pos < len(self._dates) and self._dates[pos] == date
@lru_cache(512)
def _get_future_trading_date(self, dt):
dt1 = dt - datetime.timedelta(hours=4)
td = pd.Timestamp(dt1.date())
pos = self._dates.searchsorted(td)
if self._dates[pos] != td:
raise RuntimeError('invalid future calendar datetime: {}'.format(dt))
if dt1.hour >= 16:
return self._dates[pos + 1]
return td
def get_trading_dt(self, calendar_dt):
trading_date = self.get_future_trading_date(calendar_dt)
return datetime.datetime.combine(trading_date, calendar_dt.time())
def get_future_trading_date(self, dt):
return self._get_future_trading_date(dt.replace(minute=0, second=0, microsecond=0))
get_nth_previous_trading_date = get_previous_trading_date
def get_n_trading_dates_until(self, dt, n):
date = _to_timestamp(dt)
pos = self._dates.searchsorted(date, side='right')
if pos >= n:
return self._dates[pos - n:pos]
return self._dates[:pos]
def count_trading_dates(self, start_date, end_date):
start_date = _to_timestamp(start_date)
end_date = _to_timestamp(end_date)
return self._dates.searchsorted(end_date, side='right') - self._dates.searchsorted(start_date)
|
apache-2.0
|
herilalaina/scikit-learn
|
examples/exercises/plot_iris_exercise.py
|
44
|
1690
|
"""
================================
SVM Exercise
================================
A tutorial exercise for using different SVM kernels.
This exercise is used in the :ref:`using_kernels_tut` part of the
:ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 0, :2]
y = y[y != 0]
n_sample = len(X)
np.random.seed(0)
order = np.random.permutation(n_sample)
X = X[order]
y = y[order].astype(np.float)
X_train = X[:int(.9 * n_sample)]
y_train = y[:int(.9 * n_sample)]
X_test = X[int(.9 * n_sample):]
y_test = y[int(.9 * n_sample):]
# fit the model
for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):
clf = svm.SVC(kernel=kernel, gamma=10)
clf.fit(X_train, y_train)
plt.figure(fig_num)
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired,
edgecolor='k', s=20)
# Circle out the test data
plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none',
zorder=10, edgecolor='k')
plt.axis('tight')
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'],
linestyles=['--', '-', '--'], levels=[-.5, 0, .5])
plt.title(kernel)
plt.show()
|
bsd-3-clause
|
IssamLaradji/scikit-learn
|
benchmarks/bench_lasso.py
|
297
|
3305
|
"""
Benchmarks of Lasso vs LassoLars
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import gc
from time import time
import numpy as np
from sklearn.datasets.samples_generator import make_regression
def compute_bench(alpha, n_samples, n_features, precompute):
lasso_results = []
lars_lasso_results = []
it = 0
for ns in n_samples:
for nf in n_features:
it += 1
print('==================')
print('Iteration %s of %s' % (it, max(len(n_samples),
len(n_features))))
print('==================')
n_informative = nf // 10
X, Y, coef_ = make_regression(n_samples=ns, n_features=nf,
n_informative=n_informative,
noise=0.1, coef=True)
X /= np.sqrt(np.sum(X ** 2, axis=0)) # Normalize data
gc.collect()
print("- benchmarking Lasso")
clf = Lasso(alpha=alpha, fit_intercept=False,
precompute=precompute)
tstart = time()
clf.fit(X, Y)
lasso_results.append(time() - tstart)
gc.collect()
print("- benchmarking LassoLars")
clf = LassoLars(alpha=alpha, fit_intercept=False,
normalize=False, precompute=precompute)
tstart = time()
clf.fit(X, Y)
lars_lasso_results.append(time() - tstart)
return lasso_results, lars_lasso_results
if __name__ == '__main__':
from sklearn.linear_model import Lasso, LassoLars
import pylab as pl
alpha = 0.01 # regularization parameter
n_features = 10
list_n_samples = np.linspace(100, 1000000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples,
[n_features], precompute=True)
pl.figure('scikit-learn LASSO benchmark results')
pl.subplot(211)
pl.plot(list_n_samples, lasso_results, 'b-',
label='Lasso')
pl.plot(list_n_samples, lars_lasso_results, 'r-',
label='LassoLars')
pl.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
pl.axis('tight')
n_samples = 2000
list_n_features = np.linspace(500, 3000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples],
list_n_features, precompute=False)
pl.subplot(212)
pl.plot(list_n_features, lasso_results, 'b-', label='Lasso')
pl.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars')
pl.title('%d samples, alpha=%s' % (n_samples, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
|
bsd-3-clause
|
HealthCatalystSLC/healthcareai-py
|
setup.py
|
4
|
2465
|
# -*- coding: utf-8 -*-
# from __future__ import unicode_literals
from setuptools import setup, find_packages
def readme():
# I really prefer Markdown to reStructuredText. PyPi does not. This allows me
# to have things how I'd like, but not throw complaints when people are trying
# to install the package and they don't have pypandoc or the README in the
# right place.
# From https://coderwall.com/p/qawuyq/use-markdown-readme-s-in-python-modules
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except (IOError, ImportError):
with open('README.md') as f:
return f.read()
else:
return long_description
setup(name='healthcareai',
version='1.0',
maintainer='Levi Thatcher',
maintainer_email='levi.thatcher@healthcatalyst.com',
license='MIT',
description='Tools for healthcare machine learning',
keywords='machine learning healthcare data science',
long_description=readme(),
url='http://healthcare.ai',
packages=find_packages(),
install_requires=[
'matplotlib>=1.5.3',
'numpy>=1.11.2',
'pandas>=0.20.0',
'tabulate==0.7.7',
# 'pyodbc>=3.0.10',
'scipy>=0.18.1',
'scikit-learn>=0.18',
'imbalanced-learn>=0.2.1',
'sqlalchemy>=1.1.5', 'sklearn'
],
package_data={
'examples': ['*.py', '*.ipynb']
},
tests_require=[
'nose',
],
test_suite='nose.collector',
zip_safe=False,
classifiers=[
"Development Status :: 1 - Planning",
"Intended Audience :: Healthcare Industry",
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Software Development :: Libraries :: Python Modules",
],
include_package_data=True)
|
mit
|
mojoboss/scikit-learn
|
examples/applications/svm_gui.py
|
287
|
11161
|
"""
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onclick(self, event):
if event.xdata and event.ydata:
if event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
elif event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
|
bsd-3-clause
|
stuart-knock/bokeh
|
examples/charts/file/boxplot.py
|
37
|
1117
|
from collections import OrderedDict
import pandas as pd
from bokeh.charts import BoxPlot, output_file, show
from bokeh.sampledata.olympics2014 import data
# create a DataFrame with the sample data
df = pd.io.json.json_normalize(data['data'])
# filter by countries with at least one medal and sort
df = df[df['medals.total'] > 0]
df = df.sort("medals.total", ascending=False)
# get the countries and group the data by medal type
countries = df.abbr.values.tolist()
gold = df['medals.gold'].astype(float).values
silver = df['medals.silver'].astype(float).values
bronze = df['medals.bronze'].astype(float).values
# build a dict containing the grouped data
medals = OrderedDict(bronze=bronze, silver=silver, gold=gold)
# any of the following commented are valid BoxPlot inputs
#medals = pd.DataFrame(medals)
#medals = list(medals.values())
#medals = tuple(medals.values())
#medals = np.array(list(medals.values()))
output_file("boxplot.html")
boxplot = BoxPlot(
medals, marker='circle', outliers=True, title="boxplot test",
xlabel="medal type", ylabel="medal count", width=800, height=600)
show(boxplot)
|
bsd-3-clause
|
mclumd/swarm-simulator
|
swarm/distribute.py
|
1
|
3667
|
# sarsim.distribute
# distribute random points in a geometric space
#
# Author: Benjamin Bengfort <benjamin@bengfort.com>
# Created: Tue Apr 22 09:47:47 2014 -0400
#
# Copyright (C) 2014 Bengfort.com
# For license information, see LICENSE.txt
#
# ID: distribute.py [] benjamin@bengfort.com $
"""
This module helps distribute points in a geometric space. There are two
supported distributions currently:
1. Generate points evenly (or randomly) along a line with a lenght a slope
and a y-intercept value.
2. Generate random points in a circle with a given radius and center point.
"""
##########################################################################
## Imports
##########################################################################
import numpy as np
##########################################################################
## Linear helper functions
##########################################################################
def linear_distribute(num=50, l=100, m=1, b=0, rand=False):
"""
Distribute num points randomly along a line with a slope, m and a
y-intercept b. The l parameter determines how far out the line will go.
"""
xvals = np.linspace(0, l, num)
if rand:
xvals = xvals * np.random.rand((num))
yvals = xvals * m + b
return xvals, yvals
def linear_line(num=50, l=100, m=1, b=0):
"""
A helper function for drawing a boundary line with the given length,
slope, and y-intercept.
"""
xvals = np.linspace(0, l, num)
yvals = xvals * m + b
return xvals, yvals
def linear_graph(num=50, l=100, m=1, b=0):
"""
Visualize the distribution of the points along a line.
"""
try:
import pylab as plt
except ImportError:
print "Must have pylab/matplotlib installed to graph"
return
plt.figure(figsize=(7,6))
plt.plot(*linear_line(num,l,m,b), linestyle='-', linewidth=2, label='Circle')
plt.plot(*linear_distribute(num,l,m,b), marker='o', linestyle='.', label='Samples')
plt.grid()
plt.legend(loc='upper right')
plt.show(block=True)
##########################################################################
## Circular helper functions
##########################################################################
def circular_distribute(num=50, r=100, center=(0,0)):
"""
Distrubte num points randomly around a center point with a particular
radius. Used to deploy particles around their home position.
"""
theta = np.linspace(0, 2*np.pi, num)
rands = np.random.rand((num))
xvals = r * rands * np.cos(theta) + center[0]
yvals = r * rands * np.sin(theta) + center[1]
return xvals, yvals
def circular_line(num=50, r=100, center=(0,0)):
"""
A helper function for drawing a boundary line around the center with
the given radius. Used to visualize how particles are being deployed.
"""
theta = np.linspace(0, 2*np.pi, num)
return r * np.cos(theta) + center[0], r * np.sin(theta) + center[1]
def circular_graph(num=50, r=100, center=(0,0)):
"""
Visualize the distribution of the points inside of a circle.
"""
try:
import pylab as plt
except ImportError:
print "Must have pylab/matplotlib installed to graph"
return
plt.figure(figsize=(7,6))
plt.plot(*circular_line(num,r,center), linestyle='-', linewidth=2, label='Circle')
plt.plot(*circular_distribute(num,r,center), marker='o', linestyle='.', label='Samples')
plt.grid()
plt.legend(loc='upper right')
plt.show(block=True)
if __name__ == "__main__":
#circular_graph(50, 280, (629, 1283))
linear_graph(50, 100, 3, 70)
|
mit
|
raghavrv/scikit-learn
|
sklearn/svm/tests/test_sparse.py
|
63
|
13366
|
import numpy as np
from scipy import sparse
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
from sklearn import datasets, svm, linear_model, base
from sklearn.datasets import make_classification, load_digits, make_blobs
from sklearn.svm.tests import test_svm
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.extmath import safe_sparse_dot
from sklearn.utils.testing import (assert_raises, assert_true, assert_false,
assert_warns, assert_raise_message,
ignore_warnings)
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
X_sp = sparse.lil_matrix(X)
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2
X2 = np.array([[0, 0, 0], [1, 1, 1], [2, 0, 0, ],
[0, 0, 2], [3, 3, 3]])
X2_sp = sparse.dok_matrix(X2)
Y2 = [1, 2, 2, 2, 3]
T2 = np.array([[-1, -1, -1], [1, 1, 1], [2, 2, 2]])
true_result2 = [1, 2, 3]
iris = datasets.load_iris()
# permute
rng = np.random.RandomState(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# sparsify
iris.data = sparse.csr_matrix(iris.data)
def check_svm_model_equal(dense_svm, sparse_svm, X_train, y_train, X_test):
dense_svm.fit(X_train.toarray(), y_train)
if sparse.isspmatrix(X_test):
X_test_dense = X_test.toarray()
else:
X_test_dense = X_test
sparse_svm.fit(X_train, y_train)
assert_true(sparse.issparse(sparse_svm.support_vectors_))
assert_true(sparse.issparse(sparse_svm.dual_coef_))
assert_array_almost_equal(dense_svm.support_vectors_,
sparse_svm.support_vectors_.toarray())
assert_array_almost_equal(dense_svm.dual_coef_, sparse_svm.dual_coef_.toarray())
if dense_svm.kernel == "linear":
assert_true(sparse.issparse(sparse_svm.coef_))
assert_array_almost_equal(dense_svm.coef_, sparse_svm.coef_.toarray())
assert_array_almost_equal(dense_svm.support_, sparse_svm.support_)
assert_array_almost_equal(dense_svm.predict(X_test_dense), sparse_svm.predict(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test))
assert_array_almost_equal(dense_svm.decision_function(X_test_dense),
sparse_svm.decision_function(X_test_dense))
if isinstance(dense_svm, svm.OneClassSVM):
msg = "cannot use sparse input in 'OneClassSVM' trained on dense data"
else:
assert_array_almost_equal(dense_svm.predict_proba(X_test_dense),
sparse_svm.predict_proba(X_test), 4)
msg = "cannot use sparse input in 'SVC' trained on dense data"
if sparse.isspmatrix(X_test):
assert_raise_message(ValueError, msg, dense_svm.predict, X_test)
def test_svc():
"""Check that sparse SVC gives the same result as SVC"""
# many class dataset:
X_blobs, y_blobs = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, Y, T], [X2_sp, Y2, T2],
[X_blobs[:80], y_blobs[:80], X_blobs[80:]],
[iris.data, iris.target, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.SVC(kernel=kernel, probability=True, random_state=0,
decision_function_shape='ovo')
sp_clf = svm.SVC(kernel=kernel, probability=True, random_state=0,
decision_function_shape='ovo')
check_svm_model_equal(clf, sp_clf, *dataset)
def test_unsorted_indices():
# test that the result with sorted and unsorted indices in csr is the same
# we use a subset of digits as iris, blobs or make_classification didn't
# show the problem
digits = load_digits()
X, y = digits.data[:50], digits.target[:50]
X_test = sparse.csr_matrix(digits.data[50:100])
X_sparse = sparse.csr_matrix(X)
coef_dense = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X, y).coef_
sparse_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse, y)
coef_sorted = sparse_svc.coef_
# make sure dense and sparse SVM give the same result
assert_array_almost_equal(coef_dense, coef_sorted.toarray())
X_sparse_unsorted = X_sparse[np.arange(X.shape[0])]
X_test_unsorted = X_test[np.arange(X_test.shape[0])]
# make sure we scramble the indices
assert_false(X_sparse_unsorted.has_sorted_indices)
assert_false(X_test_unsorted.has_sorted_indices)
unsorted_svc = svm.SVC(kernel='linear', probability=True,
random_state=0).fit(X_sparse_unsorted, y)
coef_unsorted = unsorted_svc.coef_
# make sure unsorted indices give same result
assert_array_almost_equal(coef_unsorted.toarray(), coef_sorted.toarray())
assert_array_almost_equal(sparse_svc.predict_proba(X_test_unsorted),
sparse_svc.predict_proba(X_test))
def test_svc_with_custom_kernel():
kfunc = lambda x, y: safe_sparse_dot(x, y.T)
clf_lin = svm.SVC(kernel='linear').fit(X_sp, Y)
clf_mylin = svm.SVC(kernel=kfunc).fit(X_sp, Y)
assert_array_equal(clf_lin.predict(X_sp), clf_mylin.predict(X_sp))
def test_svc_iris():
# Test the sparse SVC with the iris dataset
for k in ('linear', 'poly', 'rbf'):
sp_clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
clf = svm.SVC(kernel=k).fit(iris.data.toarray(), iris.target)
assert_array_almost_equal(clf.support_vectors_,
sp_clf.support_vectors_.toarray())
assert_array_almost_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
if k == 'linear':
assert_array_almost_equal(clf.coef_, sp_clf.coef_.toarray())
def test_sparse_decision_function():
#Test decision_function
#Sanity check, test that decision_function implemented in python
#returns the same as the one in libsvm
# multi class:
svc = svm.SVC(kernel='linear', C=0.1, decision_function_shape='ovo')
clf = svc.fit(iris.data, iris.target)
dec = safe_sparse_dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int).ravel()])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
def test_error():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X_sp, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X_sp, Y2)
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict(T), true_result)
def test_linearsvc():
# Similar to test_SVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
sp_clf = svm.LinearSVC(random_state=0).fit(X_sp, Y)
assert_true(sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
assert_array_almost_equal(clf.predict(X), sp_clf.predict(X_sp))
clf.fit(X2, Y2)
sp_clf.fit(X2_sp, Y2)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=4)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=4)
def test_linearsvc_iris():
# Test the sparse LinearSVC with the iris dataset
sp_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
clf = svm.LinearSVC(random_state=0).fit(iris.data.toarray(), iris.target)
assert_equal(clf.fit_intercept, sp_clf.fit_intercept)
assert_array_almost_equal(clf.coef_, sp_clf.coef_, decimal=1)
assert_array_almost_equal(clf.intercept_, sp_clf.intercept_, decimal=1)
assert_array_almost_equal(
clf.predict(iris.data.toarray()), sp_clf.predict(iris.data))
# check decision_function
pred = np.argmax(sp_clf.decision_function(iris.data), 1)
assert_array_almost_equal(pred, clf.predict(iris.data.toarray()))
# sparsify the coefficients on both models and check that they still
# produce the same results
clf.sparsify()
assert_array_equal(pred, clf.predict(iris.data))
sp_clf.sparsify()
assert_array_equal(pred, sp_clf.predict(iris.data))
def test_weight():
# Test class weights
X_, y_ = make_classification(n_samples=200, n_features=100,
weights=[0.833, 0.167], random_state=0)
X_ = sparse.csr_matrix(X_)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0),
svm.SVC()):
clf.set_params(class_weight={0: 5})
clf.fit(X_[:180], y_[:180])
y_pred = clf.predict(X_[180:])
assert_true(np.sum(y_pred == y_[180:]) >= 11)
def test_sample_weights():
# Test weights on individual samples
clf = svm.SVC()
clf.fit(X_sp, Y)
assert_array_equal(clf.predict([X[2]]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X_sp, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict([X[2]]), [2.])
def test_sparse_liblinear_intercept_handling():
# Test that sparse liblinear honours intercept_scaling param
test_svm.test_dense_liblinear_intercept_handling(svm.LinearSVC)
def test_sparse_oneclasssvm():
"""Check that sparse OneClassSVM gives the same result as dense OneClassSVM"""
# many class dataset:
X_blobs, _ = make_blobs(n_samples=100, centers=10, random_state=0)
X_blobs = sparse.csr_matrix(X_blobs)
datasets = [[X_sp, None, T], [X2_sp, None, T2],
[X_blobs[:80], None, X_blobs[80:]],
[iris.data, None, iris.data]]
kernels = ["linear", "poly", "rbf", "sigmoid"]
for dataset in datasets:
for kernel in kernels:
clf = svm.OneClassSVM(kernel=kernel, random_state=0)
sp_clf = svm.OneClassSVM(kernel=kernel, random_state=0)
check_svm_model_equal(clf, sp_clf, *dataset)
def test_sparse_realdata():
# Test on a subset from the 20newsgroups dataset.
# This catches some bugs if input is not correctly converted into
# sparse format or weights are not correctly initialized.
data = np.array([0.03771744, 0.1003567, 0.01174647, 0.027069])
indices = np.array([6, 5, 35, 31])
indptr = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 4, 4, 4])
X = sparse.csr_matrix((data, indices, indptr))
y = np.array(
[1., 0., 2., 2., 1., 1., 1., 2., 2., 0., 1., 2., 2.,
0., 2., 0., 3., 0., 3., 0., 1., 1., 3., 2., 3., 2.,
0., 3., 1., 0., 2., 1., 2., 0., 1., 0., 2., 3., 1.,
3., 0., 1., 0., 0., 2., 0., 1., 2., 2., 2., 3., 2.,
0., 3., 2., 1., 2., 3., 2., 2., 0., 1., 0., 1., 2.,
3., 0., 0., 2., 2., 1., 3., 1., 1., 0., 1., 2., 1.,
1., 3.])
clf = svm.SVC(kernel='linear').fit(X.toarray(), y)
sp_clf = svm.SVC(kernel='linear').fit(sparse.coo_matrix(X), y)
assert_array_equal(clf.support_vectors_, sp_clf.support_vectors_.toarray())
assert_array_equal(clf.dual_coef_, sp_clf.dual_coef_.toarray())
def test_sparse_svc_clone_with_callable_kernel():
# Test that the "dense_fit" is called even though we use sparse input
# meaning that everything works fine.
a = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0)
b = base.clone(a)
b.fit(X_sp, Y)
pred = b.predict(X_sp)
b.predict_proba(X_sp)
dense_svm = svm.SVC(C=1, kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0)
pred_dense = dense_svm.fit(X, Y).predict(X)
assert_array_equal(pred_dense, pred)
# b.decision_function(X_sp) # XXX : should be supported
def test_timeout():
sp = svm.SVC(C=1, kernel=lambda x, y: x * y.T, probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, sp.fit, X_sp, Y)
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
with ignore_warnings(category=ConvergenceWarning):
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
with ignore_warnings(category=ConvergenceWarning):
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
|
bsd-3-clause
|
datapythonista/pandas
|
pandas/tests/io/parser/test_encoding.py
|
3
|
7326
|
"""
Tests encoding functionality during parsing
for all of the parsers defined in parsers.py
"""
from io import BytesIO
import os
import tempfile
import numpy as np
import pytest
from pandas import (
DataFrame,
read_csv,
)
import pandas._testing as tm
def test_bytes_io_input(all_parsers):
encoding = "cp1255"
parser = all_parsers
data = BytesIO("שלום:1234\n562:123".encode(encoding))
result = parser.read_csv(data, sep=":", encoding=encoding)
expected = DataFrame([[562, 123]], columns=["שלום", "1234"])
tm.assert_frame_equal(result, expected)
def test_read_csv_unicode(all_parsers):
parser = all_parsers
data = BytesIO("\u0141aski, Jan;1".encode())
result = parser.read_csv(data, sep=";", encoding="utf-8", header=None)
expected = DataFrame([["\u0141aski, Jan", 1]])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("sep", [",", "\t"])
@pytest.mark.parametrize("encoding", ["utf-16", "utf-16le", "utf-16be"])
def test_utf16_bom_skiprows(all_parsers, sep, encoding):
# see gh-2298
parser = all_parsers
data = """skip this
skip this too
A,B,C
1,2,3
4,5,6""".replace(
",", sep
)
path = f"__{tm.rands(10)}__.csv"
kwargs = {"sep": sep, "skiprows": 2}
utf8 = "utf-8"
with tm.ensure_clean(path) as path:
from io import TextIOWrapper
bytes_data = data.encode(encoding)
with open(path, "wb") as f:
f.write(bytes_data)
bytes_buffer = BytesIO(data.encode(utf8))
bytes_buffer = TextIOWrapper(bytes_buffer, encoding=utf8)
result = parser.read_csv(path, encoding=encoding, **kwargs)
expected = parser.read_csv(bytes_buffer, encoding=utf8, **kwargs)
bytes_buffer.close()
tm.assert_frame_equal(result, expected)
def test_utf16_example(all_parsers, csv_dir_path):
path = os.path.join(csv_dir_path, "utf16_ex.txt")
parser = all_parsers
result = parser.read_csv(path, encoding="utf-16", sep="\t")
assert len(result) == 50
def test_unicode_encoding(all_parsers, csv_dir_path):
path = os.path.join(csv_dir_path, "unicode_series.csv")
parser = all_parsers
result = parser.read_csv(path, header=None, encoding="latin-1")
result = result.set_index(0)
got = result[1][1632]
expected = "\xc1 k\xf6ldum klaka (Cold Fever) (1994)"
assert got == expected
@pytest.mark.parametrize(
"data,kwargs,expected",
[
# Basic test
("a\n1", {}, DataFrame({"a": [1]})),
# "Regular" quoting
('"a"\n1', {"quotechar": '"'}, DataFrame({"a": [1]})),
# Test in a data row instead of header
("b\n1", {"names": ["a"]}, DataFrame({"a": ["b", "1"]})),
# Test in empty data row with skipping
("\n1", {"names": ["a"], "skip_blank_lines": True}, DataFrame({"a": [1]})),
# Test in empty data row without skipping
(
"\n1",
{"names": ["a"], "skip_blank_lines": False},
DataFrame({"a": [np.nan, 1]}),
),
],
)
def test_utf8_bom(all_parsers, data, kwargs, expected):
# see gh-4793
parser = all_parsers
bom = "\ufeff"
utf8 = "utf-8"
def _encode_data_with_bom(_data):
bom_data = (bom + _data).encode(utf8)
return BytesIO(bom_data)
result = parser.read_csv(_encode_data_with_bom(data), encoding=utf8, **kwargs)
tm.assert_frame_equal(result, expected)
def test_read_csv_utf_aliases(all_parsers, utf_value, encoding_fmt):
# see gh-13549
expected = DataFrame({"mb_num": [4.8], "multibyte": ["test"]})
parser = all_parsers
encoding = encoding_fmt.format(utf_value)
data = "mb_num,multibyte\n4.8,test".encode(encoding)
result = parser.read_csv(BytesIO(data), encoding=encoding)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"file_path,encoding",
[
(("io", "data", "csv", "test1.csv"), "utf-8"),
(("io", "parser", "data", "unicode_series.csv"), "latin-1"),
(("io", "parser", "data", "sauron.SHIFT_JIS.csv"), "shiftjis"),
],
)
def test_binary_mode_file_buffers(
all_parsers, csv_dir_path, file_path, encoding, datapath
):
# gh-23779: Python csv engine shouldn't error on files opened in binary.
# gh-31575: Python csv engine shouldn't error on files opened in raw binary.
parser = all_parsers
fpath = datapath(*file_path)
expected = parser.read_csv(fpath, encoding=encoding)
with open(fpath, encoding=encoding) as fa:
result = parser.read_csv(fa)
assert not fa.closed
tm.assert_frame_equal(expected, result)
with open(fpath, mode="rb") as fb:
result = parser.read_csv(fb, encoding=encoding)
assert not fb.closed
tm.assert_frame_equal(expected, result)
with open(fpath, mode="rb", buffering=0) as fb:
result = parser.read_csv(fb, encoding=encoding)
assert not fb.closed
tm.assert_frame_equal(expected, result)
@pytest.mark.parametrize("pass_encoding", [True, False])
def test_encoding_temp_file(all_parsers, utf_value, encoding_fmt, pass_encoding):
# see gh-24130
parser = all_parsers
encoding = encoding_fmt.format(utf_value)
expected = DataFrame({"foo": ["bar"]})
with tm.ensure_clean(mode="w+", encoding=encoding, return_filelike=True) as f:
f.write("foo\nbar")
f.seek(0)
result = parser.read_csv(f, encoding=encoding if pass_encoding else None)
tm.assert_frame_equal(result, expected)
def test_encoding_named_temp_file(all_parsers):
# see gh-31819
parser = all_parsers
encoding = "shift-jis"
if parser.engine == "python":
pytest.skip("NamedTemporaryFile does not work with Python engine")
title = "てすと"
data = "こむ"
expected = DataFrame({title: [data]})
with tempfile.NamedTemporaryFile() as f:
f.write(f"{title}\n{data}".encode(encoding))
f.seek(0)
result = parser.read_csv(f, encoding=encoding)
tm.assert_frame_equal(result, expected)
assert not f.closed
@pytest.mark.parametrize(
"encoding", ["utf-8", "utf-16", "utf-16-be", "utf-16-le", "utf-32"]
)
def test_parse_encoded_special_characters(encoding):
# GH16218 Verify parsing of data with encoded special characters
# Data contains a Unicode 'FULLWIDTH COLON' (U+FF1A) at position (0,"a")
data = "a\tb\n:foo\t0\nbar\t1\nbaz\t2"
encoded_data = BytesIO(data.encode(encoding))
result = read_csv(encoded_data, delimiter="\t", encoding=encoding)
expected = DataFrame(data=[[":foo", 0], ["bar", 1], ["baz", 2]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("encoding", ["utf-8", None, "utf-16", "cp1255", "latin-1"])
def test_encoding_memory_map(all_parsers, encoding):
# GH40986
parser = all_parsers
expected = DataFrame(
{
"name": ["Raphael", "Donatello", "Miguel Angel", "Leonardo"],
"mask": ["red", "purple", "orange", "blue"],
"weapon": ["sai", "bo staff", "nunchunk", "katana"],
}
)
with tm.ensure_clean() as file:
expected.to_csv(file, index=False, encoding=encoding)
df = parser.read_csv(file, encoding=encoding, memory_map=True)
tm.assert_frame_equal(df, expected)
|
bsd-3-clause
|
spectralDNS/shenfun
|
demo/NavierStokesPC.py
|
1
|
7513
|
# 2nd order rotational pressure correction for Navier-Stokes equation
# Author: Shashank Jaiswal, jaiswal0@purdue.edu
import numpy as np
from sympy import symbols, sin, cos, lambdify
from shenfun import *
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
# pylint: disable=multiple-statements
from mpltools import annotation
pa = {'fill': False, 'edgecolor': 'black'}
ta = {'fontsize': 10}
pex = lambda *args: print(*args) + exit(0)
x, y, t = symbols("x, y, t", real=True)
# Define the initial solution
uex = (sin(np.pi*x)**2)*sin(2*np.pi*y)*sin(t)
uey = -sin(2*np.pi*x)*(sin(np.pi*y)**2)*sin(t)
pe = cos(np.pi*x)*cos(np.pi*y)*sin(t)
fex = -uex.diff(x, 2) - uex.diff(y, 2) + pe.diff(x, 1) + uex.diff(t, 1) \
+ uex*uex.diff(x, 1) + uey*uex.diff(y, 1)
fey = -uey.diff(x, 2) - uey.diff(y, 2) + pe.diff(y, 1) + uey.diff(t, 1) \
+ uex*uey.diff(x, 1) + uey*uey.diff(y, 1)
he = uex.diff(x, 1) + uey.diff(y, 1)
uexf, ueyf, pef, fexf, feyf = map(lambda v: lambdify((x, y, t), v),
(uex, uey, pe, fex, fey))
def main(n):
# number of modes in x and y direction
N = (32, 32)
# basis function for velocity components in x and y directions: P_{N}
D0X = FunctionSpace(N[0], 'Legendre', quad='GL', dtype='d', bc=(0, 0))
D0Y = FunctionSpace(N[1], 'Legendre', quad='GL', dtype='d', bc=(0, 0))
# basis function for pressure: P_{N-2}
PX = FunctionSpace(N[0], 'Legendre', quad='GL')
PY = FunctionSpace(N[1], 'Legendre', quad='GL')
PX.slice = lambda: slice(0, N[0]-2)
PY.slice = lambda: slice(0, N[1]-2)
# define a multi-dimensional tensor product basis
Vs = TensorProductSpace(comm, (D0X, D0Y))
Ps = TensorProductSpace(comm, (PX, PY), modify_spaces_inplace=True)
# Create vector space for velocity
Ws = VectorSpace([Vs, Vs])
Cs = TensorSpace([Ws, Ws]) # cauchy stress tensor
# Create test and trial spaces for velocity and pressure
u = TrialFunction(Ws); v = TestFunction(Ws)
p = TrialFunction(Ps); q = TestFunction(Ps)
X = Vs.local_mesh(True)
# Define the initial solution on quadrature points at t=0
U = Array(Ws, buffer=(uex.subs(t, 0), uey.subs(t, 0)))
P = Array(Ps); P.fill(0)
F = Array(Ws, buffer=(fex.subs(t, 0), fey.subs(t, 0)))
U0 = U.copy()
# Define the coefficient vector
U_hat = Function(Ws); U_hat = Ws.forward(U, U_hat)
P_hat = Function(Ps); P_hat = Ps.forward(P, P_hat)
F_hat = Function(Ws); F_hat = Ws.forward(F, F_hat)
# Initial time, time step, final time
ti, dt, tf = 0., 5e-3/n, 5e-2
nsteps = np.int(np.ceil((tf - ti)/dt))
dt = (tf - ti)/nsteps
X = Ws.local_mesh(True)
# Define the implicit operator for BDF-2
Lb1 = BlockMatrix(inner(v, u*(1.5/dt)) + inner(grad(v), grad(u)))
Lb2 = BlockMatrix(inner(-grad(q), grad(p)))
# Define the implicit operator for Euler
Le1 = BlockMatrix(inner(v, u*(1./dt)) + inner(grad(v), grad(u)))
Le2 = BlockMatrix(inner(-grad(q), grad(p)))
# Define the implicit operator for updating
Lu1 = BlockMatrix([inner(v, u)])
Lu2 = BlockMatrix([inner(q, p)])
# temporary storage
rhsU, rhsP = Function(Ws), Function(Ps)
U0_hat = Function(Ws); U0_hat = Ws.forward(U, U0_hat)
Ut_hat = Function(Ws); Ut_hat = Ws.forward(U, Ut_hat)
P0_hat = Function(Ps); P0_hat = Ps.forward(P, P0_hat)
Phi_hat = Function(Ps); Phi_hat = Ps.forward(P, Phi_hat)
# Create work arrays for nonlinear part
UiUj = Array(Cs)
UiUj_hat = Function(Cs)
# integrate in time
time = ti
# storage
rhsU, rhsP = rhsU, rhsP
u_hat, p_hat = U_hat, P_hat
u0_hat, p0_hat = U0_hat, P0_hat
ut_hat, phi_hat = Ut_hat, Phi_hat
# Euler time-step
# evaluate the forcing function
F[0] = fexf(X[0], X[1], time+dt)
F[1] = feyf(X[0], X[1], time+dt)
# Solve (9.102)
rhsU.fill(0)
rhsU += -inner(v, grad(p_hat))
rhsU += inner(v, F)
rhsU += inner(v, u_hat/dt)
U = Ws.backward(U_hat, U)
UiUj = outer(U, U, UiUj)
UiUj_hat = UiUj.forward(UiUj_hat)
rhsU += -inner(v, div(UiUj_hat))
ut_hat = Le1.solve(rhsU, u=ut_hat)
# Solve (9.107)
rhsP.fill(0)
rhsP += (1/dt)*inner(q, div(ut_hat))
phi_hat = Le2.solve(rhsP, u=phi_hat, constraints=((0, 0, 0),))
# Update for next time step
u0_hat[:] = u_hat; p0_hat[:] = p_hat
# Update (9.107)
rhsU.fill(0)
rhsU += inner(v, ut_hat) - inner(v, dt*grad(phi_hat))
u_hat = Lu1.solve(rhsU, u=u_hat)
# Update (9.105)
rhsP.fill(0)
rhsP += inner(q, phi_hat) + inner(q, p_hat) - inner(q, div(ut_hat))
p_hat = Lu2.solve(rhsP, u=p_hat, constraints=((0, 0, 0),))
time += dt
# BDF time step
for step in range(2, nsteps+1):
# evaluate the forcing function
F[0] = fexf(X[0], X[1], time+dt)
F[1] = feyf(X[0], X[1], time+dt)
# Solve (9.102)
rhsU.fill(0)
rhsU += -inner(v, grad(p_hat))
rhsU += inner(v, F)
rhsU += inner(v, u_hat*2/dt) - inner(v, u0_hat*0.5/dt)
U = Ws.backward(U_hat, U)
UiUj = outer(U, U, UiUj)
UiUj_hat = UiUj.forward(UiUj_hat)
rhsU += -2*inner(v, div(UiUj_hat))
U0 = Ws.backward(U0_hat, U0)
UiUj = outer(U0, U0, UiUj)
UiUj_hat = UiUj.forward(UiUj_hat)
rhsU += inner(v, div(UiUj_hat))
ut_hat = Lb1.solve(rhsU, u=ut_hat)
# Solve (9.107)
rhsP.fill(0)
rhsP += 1.5/dt*inner(q, div(ut_hat))
phi_hat = Lb2.solve(rhsP, u=phi_hat, constraints=((0, 0, 0),))
# update for next time step
u0_hat[:] = u_hat; p0_hat[:] = p_hat
# Update (9.107, 9.105)
rhsU.fill(0)
rhsU += inner(v, ut_hat) - inner(v, ((2.*dt/3))*grad(phi_hat))
u_hat = Lu1.solve(rhsU, u=u_hat)
rhsP.fill(0)
rhsP += inner(q, phi_hat) + inner(q, p_hat) - inner(q, div(ut_hat))
p_hat = Lu2.solve(rhsP, u=p_hat, constraints=((0, 0, 0),))
# increment time
time += dt
# Transform the solution to physical space
UP = [*U_hat.backward(U), P_hat.backward(P)]
# compute error
Ue = Array(Ws, buffer=(uex.subs(t, tf), uey.subs(t, tf)))
Pe = Array(Ps, buffer=(pe.subs(t, tf)))
UPe = [*Ue, Pe]
l2_error = list(map(np.linalg.norm, [u-ue for u, ue in zip(UP, UPe)]))
return l2_error
if __name__ == "__main__":
N = 2**np.arange(0, 4)
E = np.zeros((3, len(N)))
for (j, n) in enumerate(N):
E[:, j] = main(n)
fig = plt.figure(figsize=(5.69, 4.27))
ax = plt.gca()
marks = ('or', '-g', '-ob')
vars = (r'$u_x$', r'$u_y$', r'$p$')
for i in range(3):
plt.loglog(N, E[i, :], marks[i], label=vars[i])
slope, intercept = np.polyfit(np.log(N[-2:]), np.log(E[i, -2:]), 1)
if i != 1:
annotation.slope_marker((N[-2], E[i, -2]), ("{0:.2f}".format(slope), 1),
ax=ax, poly_kwargs=pa, text_kwargs=ta)
plt.text(N[0], 2e-5, r"$\Delta t=5 \times 10^{-3},\; N=32^2$")
plt.text(N[0], 1e-5, r"Final Time = $5 \times 10^{-2}$")
plt.title(r"Navier-Stokes: $2^{nd}$-order Rotational Pressure-Correction")
plt.legend(); plt.autoscale()
plt.ylabel(r'$|Error|_{L^2}$')
plt.xticks(N)
ax.get_xaxis().set_minor_formatter(NullFormatter())
fmt = lambda v: r"$\Delta t/{0}$".format(v) if v!=1 else r"$\Delta t$"
plt.gca().set_xticklabels(list(map(fmt, N)))
#plt.savefig("navier-stokes.pdf", orientation='portrait')
plt.show()
|
bsd-2-clause
|
robcarver17/pysystemtrade
|
sysquant/estimators/correlation_over_time.py
|
1
|
2230
|
import pandas as pd
from syscore.genutils import progressBar
from sysquant.estimators.correlation_estimator import correlationEstimator
from sysquant.fitting_dates import generate_fitting_dates
from sysquant.estimators.correlations import CorrelationList
def correlation_over_time_for_returns(returns_for_correlation: pd.DataFrame,
frequency="W",
forward_fill_price_index=True,
**kwargs
) -> CorrelationList:
index_prices_for_correlation = returns_for_correlation.cumsum()
if forward_fill_price_index:
index_prices_for_correlation = index_prices_for_correlation.ffill()
index_prices_for_correlation = index_prices_for_correlation.resample(frequency).last()
returns_for_correlation = index_prices_for_correlation.diff()
correlation_list = correlation_over_time(returns_for_correlation,
**kwargs)
return correlation_list
def correlation_over_time(data_for_correlation: pd.DataFrame,
date_method="expanding",
rollyears=20,
interval_frequency: str = "12M",
**kwargs
) -> CorrelationList:
column_names = list(data_for_correlation.columns)
# Generate time periods
fit_dates = generate_fitting_dates(
data_for_correlation, date_method=date_method, rollyears=rollyears,
interval_frequency = interval_frequency
)
progress = progressBar(len(fit_dates), "Estimating correlations")
correlation_estimator_for_one_period = correlationEstimator(
data_for_correlation, **kwargs
)
corr_list = []
# Now for each time period, estimate correlation
for fit_period in fit_dates:
progress.iterate()
corrmat = correlation_estimator_for_one_period.calculate_estimate_for_period(
fit_period)
corr_list.append(corrmat)
correlation_list = CorrelationList(corr_list = corr_list, column_names = column_names,
fit_dates=fit_dates)
return correlation_list
|
gpl-3.0
|
jgliss/pyplis
|
pyplis/dilutioncorr.py
|
1
|
25064
|
# -*- coding: utf-8 -*-
#
# Pyplis is a Python library for the analysis of UV SO2 camera data
# Copyright (C) 2017 Jonas Gliss (jonasgliss@gmail.com)
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License a
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Pyplis module for image based correction of the signal dilution effect."""
from __future__ import (absolute_import, division)
from numpy import asarray, linspace, exp, ones, nan
from scipy.ndimage.filters import median_filter
from matplotlib.pyplot import subplots, rcParams
from collections import OrderedDict as od
from pandas import Series, DataFrame
import six
from pyplis import logger, print_log
from .utils import LineOnImage
from .image import Img
from .optimisation import dilution_corr_fit
from .model_functions import dilutioncorr_model
from .geometry import MeasGeometry
from .helpers import check_roi, isnum
from .imagelists import ImgList
from .exceptions import ImgModifiedError
LABEL_SIZE = rcParams["font.size"] + 2
class DilutionCorr(object):
r"""Class for management of dilution correction.
The class provides functionality to retrieve topographic distances from
meas geometry, to manage lines in the image used for the retrieval, to
perform the actual dilution fit (i.e. retrieval of atmospheric scattering
coefficients) and to apply the dilution correction.
This class does not store any results related to individual images.
Parameters
----------
lines : list
optional, list containing :class:`LineOnImage` objects used to
retrieve terrain distances for the dilution fit
meas_geometry : MeasGeometry
optional, measurement geometry (required for terrain distance
retrieval)
**settings :
settings for terrain distance retrieval:
- skip_pix: specify pixel step on line for which topo \
intersections are searched
- min_slope_angle: minimum slope of topography in order to be \
considered for topo distance retrieval
- topo_res_m: interpolation resolution applied to \
:class:`ElevationProfile` objects used to find intersections \
of pixel viewing direction with topography
"""
def __init__(self, lines=None, meas_geometry=None, **settings):
if lines is None:
lines = []
elif isinstance(lines, LineOnImage):
lines = [lines]
if not isinstance(lines, list):
raise TypeError("Invalid input type for parameter lines, need "
"LineOnGrid class or a python list containing "
"LineOnGrid objects")
if not isinstance(meas_geometry, MeasGeometry):
meas_geometry = MeasGeometry()
self.meas_geometry = meas_geometry
self.lines = od()
self.settings = {"skip_pix": 5,
"min_slope_angle": 5.0,
"topo_res_m": 5.0}
self._masks_lines = od()
self._dists_lines = od()
# additional retrieval points that were added manually using
# method add_retrieval_point
self._add_points = []
self._skip_pix = od()
self._geopoints = od()
self._geopoints["add_points"] = []
for line in lines:
self.lines[line.line_id] = line
self.update_settings(**settings)
@property
def line_ids(self):
"""Get IDs of all :class:`LineOnImage` objects for distance retrieval.
"""
return list(self.lines.keys())
def update_settings(self, **settings):
"""Update settings dict for topo distance retrieval."""
for k, v in six.iteritems(settings):
if k in self.settings:
self.settings[k] = v
def add_retrieval_line(self, line):
"""Add one topography retrieval line."""
if not isinstance(line, LineOnImage):
raise TypeError("Need LineOnImage object")
if line.line_id in self.line_ids:
raise KeyError("A line with ID %s is already assigned to Dilution "
"correction engine" % line.line_id)
self.lines[line.line_id] = line
def add_retrieval_point(self, pos_x_abs, pos_y_abs, dist=None):
"""Add a distinct pixel with known distance to image.
Parameters
----------
pos_x_abs : int
x-pixel position of point in image in absolute coordinate (i.e.
pyramid level 0 and not cropped)
pos_y_abs : int
y-pixel position of point in image in absolute coordinate (i.e.
pyramid level 0 and not cropped)
dist : :obj:`float`, optional
distance to feature in image in m. If None (default), the distance
will be estimated
"""
if not isnum(dist):
logger.info("Input distance for point unspecified, trying automatic "
"access")
(dist,
derr,
p) = self.meas_geometry.get_topo_distance_pix(pos_x_abs,
pos_y_abs)
self._geopoints["add_points"].append(p)
dist *= 1000.0
self._add_points.append((pos_x_abs, pos_y_abs, dist))
def det_topo_dists_all_lines(self, **settings):
"""Estimate distances to topo distances to all assigned lines.
Parameters
----------
**settings
keyword args passed to update search settings (:attr:`settings`)
and passed to
:func:`get_topo_distances_line` in :class:`MeasGeometry`
"""
for lid, line in six.iteritems(self.lines):
self.det_topo_dists_line(lid, **settings)
def det_topo_dists_line(self, line_id, **settings):
"""Estimate distances to pixels on current lines.
Retrieves distances to all :class:`LineOnImage` objects in
``self.lines`` using ``self.meas_geometry`` (i.e. camera position
and viewing direction).
Parameters
----------
line_id : str
ID of line
**settings :
additional key word args used to update search settings (passed to
:func:`get_topo_distances_line` in :class:`MeasGeometry`)
Returns
-------
array
retrieved distances
"""
if line_id not in self.lines.keys():
raise KeyError("No line with ID %s available" % line_id)
logger.info("Searching topo distances for pixels on line %s" % line_id)
self.update_settings(**settings)
l = self.lines[line_id]
res = self.meas_geometry.get_topo_distances_line(l, **self.settings)
dists = res["dists"] * 1000. # convert to m
self._geopoints[line_id] = res["geo_points"]
self._dists_lines[line_id] = dists
self._masks_lines[line_id] = res["ok"]
self._skip_pix[line_id] = self.settings["skip_pix"]
return dists
def get_radiances(self, img, line_ids=None):
"""Get radiances for dilution fit along terrain lines.
The data is only extracted along specified input lines. The terrain
distance retrieval :func:`det_topo_dists_lines_line` must have been
performed for that.
Parameters
----------
img : Img
vignetting corrected plume image from which the radiances are
extracted
line_ids : list
if desired, the data can also be accessed for specified line ids,
which have to be provided in a list. If empty (default), all lines
assigned to this class are considered
"""
if line_ids is None:
line_ids = []
if not isinstance(img, Img) or not img.edit_log["vigncorr"]:
raise ValueError("Invalid input, need Img class and Img needs to "
"be corrected for vignetting")
if img.is_cropped or img.is_resized:
raise ImgModifiedError("Image must not be cropped or rescaled")
if len(line_ids) == 0:
line_ids = self.line_ids
dists, rads = [], []
for line_id in line_ids:
if line_id in self._dists_lines:
skip = int(self._skip_pix[line_id])
l = self.lines[line_id]
mask = self._masks_lines[line_id]
dists.extend(self._dists_lines[line_id][mask])
rads.extend(l.get_line_profile(img)[::skip][mask])
else:
print_log.warning("Distances to line %s not available, please apply "
"distance retrieval first using class method "
"det_topo_dists_line")
for x, y, dist in self._add_points:
dists.append(dist)
rads.append(img.img[y, x])
return asarray(dists), asarray(rads)
def apply_dilution_fit(self, img, rad_ambient, i0_guess=None,
i0_min=0, i0_max=None, ext_guess=1e-4, ext_min=0,
ext_max=1e-3, line_ids=None, plot=True, **kwargs):
r"""Perform dilution correction fit to retrieve extinction coefficient.
Uses :func:`dilution_corr_fit` of :mod:`optimisation` which is a
bounded least square fit based on the following model function
.. math::
I_{meas}(\lambda) = I_0(\lambda)e^{-\epsilon(\lambda)d} +
I_A(\lambda)(1-e^{-\epsilon(\lambda)d})
Parameters
----------
img : Img
vignetting corrected image for radiance extraction
rad_ambient : float
ambient intensity (:math:`I_A` in model)
i0_guess : float
optional: guess value for initial intensity of topographic
features, i.e. the reflected radiation before entering scattering
medium (:math:`I_0` in model, if None, then it is set 5% of the
ambient intensity ``rad_ambient``)
i0_min : float
optional: minimum initial intensity of topographic features
i0_max : float
optional: maximum initial intensity of topographic features
ext_guess : float
guess value for atm. extinction coefficient
(:math:`\epsilon` in model)
ext_min : float
minimum value for atm. extinction coefficient
ext_max : float
maximum value for atm. extinction coefficient
line_ids : list
if desired, the data can also be accessed for specified line ids,
which have to be provided in a list. If empty (default), all lines
are considered
plot : bool
if True, the result is plotted
**kwargs :
additional keyword args passed to plotting function (e.g. to
pass an axes object)
Returns
-------
tuple
4-element tuple containing
- retrieved extinction coefficient
- retrieved initial intensity
- fit result object
- axes instance or None (dependent on :param:`plot`)
"""
if line_ids is None:
line_ids = []
dists, rads = self.get_radiances(img, line_ids)
fit_res = dilution_corr_fit(rads, dists, rad_ambient, i0_guess,
i0_min, i0_max, ext_guess,
ext_min, ext_max)
i0, ext = fit_res.x
ax = None
if plot:
ax = self.plot_fit_result(dists, rads, rad_ambient, i0, ext,
**kwargs)
return ext, i0, fit_res, ax
def get_ext_coeffs_imglist(self, lst, roi_ambient=None, apply_median=5,
**kwargs):
"""Apply dilution fit to all images in an :class:`ImgList`.
Parameters
----------
lst : ImgList
image list for which the coefficients are supposed to be retrieved
roi_ambient : list
region of interest used to estimage ambient intensity, if None
(default), usd :attr:`scale_rect` of :class:`PlumeBackgroundModel`
of the input list
apply_median : int
if > 0, then a median filter of provided width is applied to
the result time series (ext. coeffs and initial intensities)
**kwargs :
additional keyword args passed to dilution fit method
:func:`apply_dilution_fit`.
Returns
-------
DataFrame
pandas data frame containing time series of retrieved extinction
coefficients and initial intensities as well as the ambient
intensities used, access keys are:
- ``coeffs``: retrieved extinction coefficients
- ``i0``: retrieved initial intensities
- ``ia``: retrieved ambient intensities
"""
if not isinstance(lst, ImgList):
raise ValueError("Invalid input type for param lst, need ImgList")
lst.vigncorr_mode = True
if not check_roi(roi_ambient):
try:
roi_ambient = lst.bg_model.scale_rect
except BaseException:
pass
if not check_roi(roi_ambient):
raise ValueError("Input parameter roi_ambient is not a valied"
"ROI and neither is scale_rect in background "
"model of input image list...")
cfn = lst.cfn
lst.goto_img(0)
nof = lst.nof
times = lst.acq_times
coeffs = []
i0s = []
ias = []
for k in range(nof):
img = lst.current_img()
try:
ia = img.crop(roi_ambient, True).mean()
ext, i0, _, _ = self.apply_dilution_fit(img=img,
rad_ambient=ia,
plot=False,
**kwargs)
coeffs.append(ext)
i0s.append(i0)
ias.append(ia)
except BaseException:
coeffs.append(nan)
i0s.append(nan)
ias.append(nan)
lst.goto_next()
lst.goto_img(cfn)
if apply_median > 0:
coeffs = median_filter(coeffs, apply_median)
i0s = median_filter(i0s, apply_median)
ias = median_filter(ias, apply_median)
return DataFrame(dict(coeffs=coeffs, i0=i0s, ia=ias), index=times)
def correct_img(self, plume_img, ext, plume_bg_img, plume_dists,
plume_pix_mask):
"""Perform dilution correction for a plume image.
Note
-----
See :func:`correct_img` for description
Returns
-------
Img
dilution corrected image
"""
return correct_img(plume_img, ext, plume_bg_img, plume_dists,
plume_pix_mask)
def plot_fit_result(self, dists, rads, rad_ambient, i0, ext, ax=None):
"""Plot result of dilution fit."""
if ax is None:
fig, ax = subplots(1, 1)
x = linspace(0, dists.max(), 100)
ints = dilutioncorr_model(x, rad_ambient, i0, ext)
ax.plot(dists / 1000.0, rads, " x", label="Data")
ext_perkm = ext * 1000
lbl_fit = (r"Fit: $I_0$=%.1f DN, $\epsilon$ = %.4f km$^{-1}$"
% (i0, ext_perkm))
ax.plot(x / 1000.0, ints, "--c", label=lbl_fit)
ax.set_xlabel("Distance [km]", fontsize=LABEL_SIZE)
ax.set_ylabel("Radiances [DN]", fontsize=LABEL_SIZE)
ax.set_title(r"$I_A$ = %.1f" % rad_ambient, fontsize=LABEL_SIZE + 2)
ax.grid()
# ax = rotate_ytick_labels(ax, deg=45, va="center")
ax.legend(loc="best", fancybox=True, framealpha=0.5, fontsize=13)
return ax
def get_extinction_coeffs_imglist(self, imglist, ambient_roi_abs,
darkcorr=True, line_ids=None,
**fit_settings):
"""Retrieve extinction coefficients for all imags in list.
.. note::
Alpha version: not yet tested
"""
if line_ids is None:
line_ids = []
imglist.aa_mode = False
imglist.tau_mode = False
imglist.auto_reload = False
imglist.darkcorr_mode = True
if imglist.gaussian_blurring and imglist.pyrlevel == 0:
logger.info("Adding gaussian blurring of 2 for topographic radiance "
"retrieval")
imglist.gaussian_blurring = 2
if imglist.pyrlevel != list(self.lines.values())[0].pyrlevel:
raise ValueError("Mismatch in pyramid level of lines and imglist")
if len(line_ids) == 0:
line_ids = self.line_ids
imglist.vigncorr_mode = True
imglist.goto_img(0)
imglist.auto_reload = True
num = imglist.nof
i0s, exts, acq_times = ones(num) * nan, ones(num) * nan, [nan] * num
for k in range(num):
img = imglist.current_img()
rad_ambient = img.crop(ambient_roi_abs, True).mean()
ext, i0, _, _ = self.apply_dilution_fit(img, rad_ambient,
line_ids=line_ids,
plot=False,
**fit_settings)
acq_times[k] = img.meta["start_acq"]
i0s[k] = i0
exts[k] = ext
return Series(exts, acq_times), Series(i0s, acq_times)
def plot_distances_3d(self, draw_cam=1, draw_source=1, draw_plume=0,
draw_fov=0, cmap_topo="Oranges",
contour_color="#708090", contour_antialiased=True,
contour_lw=0.2, axis_off=True, line_ids=None,
**kwargs):
"""Draw 3D map of scene including geopoints of distance retrievals.
Parameters
----------
draw_cam : bool
insert camera position into map
draw_source : bool
insert source position into map
draw_plume : bool
insert plume vector into map
draw_fov : bool
insert camera FOV (az range) into map
cmap_topo : str
string specifying colormap for topography surface plot defaults to
"Oranges"
contour_color : str
string specifying color of contour lines colors of topo contour
lines (default: "#708090")
contour_antialiased : bool
apply antialiasing to surface plot of topography, defaults to False
contour_lw :
width of drawn contour lines, defaults to 0.5, use 0 if you do not
want contour lines inserted
axis_off : bool
if True, then the rendering of axes is excluded
line_ids : list
if desired, the data can also be accessed for specified line ids,
which have to be provided in a list. If empty (default), all topo
lines are drawn
Returns
-------
Map
plotted map instance (is of type Basemap)
"""
if line_ids is None:
line_ids = []
map3d = self.meas_geometry.draw_map_3d(
draw_cam, draw_source,
draw_plume, draw_fov,
cmap_topo,
contour_color=contour_color,
contour_antialiased=contour_antialiased,
contour_lw=contour_lw)
if len(line_ids) == 0:
line_ids = self.line_ids
for line_id in self.line_ids:
if line_id in self._dists_lines:
line = self.lines[line_id]
mask = self._masks_lines[line_id]
pts = self._geopoints[line_id][mask]
map3d.add_geo_points_3d(pts, color=line.color, **kwargs)
for pt in self._geopoints["add_points"]:
map3d.draw_geo_point_3d(pt, color="r")
if axis_off:
map3d.ax.set_axis_off()
return map3d
def correct_img(plume_img, ext, plume_bg_img, plume_dists, plume_pix_mask):
"""Perform dilution correction for a plume image.
Corresponds to Eq. 4 in in `Campion et al., 2015 <http://
www.sciencedirect.com/science/article/pii/S0377027315000189>`_.
Parameters
----------
plume_img : Img
vignetting corrected plume image
ext : float
atmospheric extinction coefficient
plume_bg_img : Img
vignetting corrected plume background image (can be, for instance,
retrieved using :mod:`plumebackground`)
plume_dists : :obj:`array`, :obj:`Img`, :obj:`float`
plume distance(s) in m. If input is numpy array or :class:`Img` then,
it must have the same shape as :param:`plume_img`
plume_pix_mask : ndarray
mask specifying plume pixels (only those are corrected), can also be
type :class:`Img`
Returns
-------
Img
dilution corrected image
"""
for im in [plume_img, plume_bg_img]:
if not isinstance(im, Img) or im.edit_log["vigncorr"] is False:
raise ValueError("Plume and background image need to be Img "
"objects and vignetting corrected")
try:
plume_dists = plume_dists.img
except BaseException:
pass
try:
plume_pix_mask = plume_pix_mask.img
except BaseException:
pass
dists = plume_pix_mask * plume_dists
plume_img.img = ((plume_img.img - plume_bg_img.img *
(1 - exp(-ext * dists))) / exp(-ext * dists))
plume_img.edit_log["dilcorr"] = True
return plume_img
def get_topo_dists_lines(lines, geom, img=None, skip_pix=5, topo_res_m=5.0,
min_slope_angle=5.0, plot=False, line_color="lime"):
if isinstance(lines, LineOnImage):
lines = [lines]
ax = None
map3d = None
pts, dists, mask = [], [], []
for line in lines:
l = line.to_list() # line coords as list
res = geom.get_topo_distances_line(l, skip_pix, topo_res_m,
min_slope_angle)
pts.extend(res["geo_points"])
dists.extend(res["dists"])
mask.extend(res["ok"])
pts, dists = asarray(pts), asarray(dists) * 1000.
if plot:
if isinstance(img, Img):
ax = img.show()
h, w = img.img.shape
for line in lines:
line.plot_line_on_grid(ax=ax, color=line_color, marker="")
ax.set_xlim([0, w - 1])
ax.set_ylim([h - 1, 0])
map3d = geom.draw_map_3d(0, 0, 0, 0, cmap_topo="gray")
# insert camera position into 3D map
map3d.add_geo_points_3d(pts, color=line_color)
geom.cam_pos.plot_3d(map=map3d, add_name=True, dz_text=40)
map3d.ax.set_axis_off()
return dists, asarray(mask), map3d, ax
def perform_dilution_correction(plume_img, ext, plume_bg_img, plume_dist_img,
plume_pix_mask):
dists = plume_pix_mask * plume_dist_img
return ((plume_img - plume_bg_img *
(1 - exp(-ext * dists))) / exp(-ext * dists))
def get_extinction_coeff(rads, dists, rad_ambient, plot=True, **kwargs):
"""Perform dilution correction fit to retrieve extinction coefficient.
:param ndarray rads: radiances retrieved for topographic features
:param ndarray dists: distances corresponding to ``rads``
:param rad_ambient: ambient sky intensity
:param bool plot: if True, the result is plotted
:param **kwargs: additional keyword arguments for fit settings (passed
to :func:`dilution_corr_fit` of module :mod:`optimisation`)
"""
fit_res = dilution_corr_fit(rads, dists, rad_ambient, **kwargs)
i0, ext = fit_res.x
ax = None
if plot:
x = linspace(0, dists.max(), 100)
ints = dilutioncorr_model(x, rad_ambient, i0, ext)
fig, ax = subplots(1, 1)
ax.plot(dists, rads, " x", label="Data")
lbl_fit = r"Fit result: $I_0$=%.1f DN, $\epsilon$ = %.2e" % (i0, ext)
ax.plot(x, ints, "--c", label=lbl_fit)
ax.set_xlabel("Distance [m]")
ax.set_ylabel("Radiances [DN]")
ax.legend(loc="best", fancybox=True, framealpha=0.5, fontsize=12)
return ext, i0, fit_res, ax
|
gpl-3.0
|
alexsavio/scikit-learn
|
sklearn/tree/export.py
|
35
|
16873
|
"""
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Trevor Stephens <trev.stephens@gmail.com>
# License: BSD 3 clause
import numpy as np
import warnings
from ..externals import six
from . import _criterion
from . import _tree
def _color_brew(n):
"""Generate n colors with equally spaced hues.
Parameters
----------
n : int
The number of colors required.
Returns
-------
color_list : list, length n
List of n tuples of form (R, G, B) being the components of each color.
"""
color_list = []
# Initialize saturation & value; calculate chroma & value shift
s, v = 0.75, 0.9
c = s * v
m = v - c
for h in np.arange(25, 385, 360. / n).astype(int):
# Calculate some intermediate values
h_bar = h / 60.
x = c * (1 - abs((h_bar % 2) - 1))
# Initialize RGB with same hue & chroma as our color
rgb = [(c, x, 0),
(x, c, 0),
(0, c, x),
(0, x, c),
(x, 0, c),
(c, 0, x),
(c, x, 0)]
r, g, b = rgb[int(h_bar)]
# Shift the initial RGB values to match value and store
rgb = [(int(255 * (r + m))),
(int(255 * (g + m))),
(int(255 * (b + m)))]
color_list.append(rgb)
return color_list
class Sentinel(object):
def __repr__():
return '"tree.dot"'
SENTINEL = Sentinel()
def export_graphviz(decision_tree, out_file=SENTINEL, max_depth=None,
feature_names=None, class_names=None, label='all',
filled=False, leaves_parallel=False, impurity=True,
node_ids=False, proportion=False, rotate=False,
rounded=False, special_characters=False):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default='tree.dot')
Handle or name of the output file. If ``None``, the result is
returned as a string. This will the default from version 0.20.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
feature_names : list of strings, optional (default=None)
Names of each of the features.
class_names : list of strings, bool or None, optional (default=None)
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
If ``True``, shows a symbolic representation of the class name.
label : {'all', 'root', 'none'}, optional (default='all')
Whether to show informative labels for impurity, etc.
Options include 'all' to show at every node, 'root' to show only at
the top root node, or 'none' to not show at any node.
filled : bool, optional (default=False)
When set to ``True``, paint nodes to indicate majority class for
classification, extremity of values for regression, or purity of node
for multi-output.
leaves_parallel : bool, optional (default=False)
When set to ``True``, draw all leaf nodes at the bottom of the tree.
impurity : bool, optional (default=True)
When set to ``True``, show the impurity at each node.
node_ids : bool, optional (default=False)
When set to ``True``, show the ID number on each node.
proportion : bool, optional (default=False)
When set to ``True``, change the display of 'values' and/or 'samples'
to be proportions and percentages respectively.
rotate : bool, optional (default=False)
When set to ``True``, orient tree left to right rather than top-down.
rounded : bool, optional (default=False)
When set to ``True``, draw node boxes with rounded corners and use
Helvetica fonts instead of Times-Roman.
special_characters : bool, optional (default=False)
When set to ``False``, ignore special characters for PostScript
compatibility.
Returns
-------
dot_data : string
String representation of the input tree in GraphViz dot format.
Only returned if ``out_file`` is None.
.. versionadded:: 0.18
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
def get_color(value):
# Find the appropriate color & intensity for a node
if colors['bounds'] is None:
# Classification tree
color = list(colors['rgb'][np.argmax(value)])
sorted_values = sorted(value, reverse=True)
if len(sorted_values) == 1:
alpha = 0
else:
alpha = int(np.round(255 * (sorted_values[0] - sorted_values[1]) /
(1 - sorted_values[1]), 0))
else:
# Regression tree or multi-output
color = list(colors['rgb'][0])
alpha = int(np.round(255 * ((value - colors['bounds'][0]) /
(colors['bounds'][1] -
colors['bounds'][0])), 0))
# Return html color code in #RRGGBBAA format
color.append(alpha)
hex_codes = [str(i) for i in range(10)]
hex_codes.extend(['a', 'b', 'c', 'd', 'e', 'f'])
color = [hex_codes[c // 16] + hex_codes[c % 16] for c in color]
return '#' + ''.join(color)
def node_to_str(tree, node_id, criterion):
# Generate the node content string
if tree.n_outputs == 1:
value = tree.value[node_id][0, :]
else:
value = tree.value[node_id]
# Should labels be shown?
labels = (label == 'root' and node_id == 0) or label == 'all'
# PostScript compatibility for special characters
if special_characters:
characters = ['#', '<SUB>', '</SUB>', '≤', '<br/>', '>']
node_string = '<'
else:
characters = ['#', '[', ']', '<=', '\\n', '"']
node_string = '"'
# Write node ID
if node_ids:
if labels:
node_string += 'node '
node_string += characters[0] + str(node_id) + characters[4]
# Write decision criteria
if tree.children_left[node_id] != _tree.TREE_LEAF:
# Always write node decision criteria, except for leaves
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X%s%s%s" % (characters[1],
tree.feature[node_id],
characters[2])
node_string += '%s %s %s%s' % (feature,
characters[3],
round(tree.threshold[node_id], 4),
characters[4])
# Write impurity
if impurity:
if isinstance(criterion, _criterion.FriedmanMSE):
criterion = "friedman_mse"
elif not isinstance(criterion, six.string_types):
criterion = "impurity"
if labels:
node_string += '%s = ' % criterion
node_string += (str(round(tree.impurity[node_id], 4)) +
characters[4])
# Write node sample count
if labels:
node_string += 'samples = '
if proportion:
percent = (100. * tree.n_node_samples[node_id] /
float(tree.n_node_samples[0]))
node_string += (str(round(percent, 1)) + '%' +
characters[4])
else:
node_string += (str(tree.n_node_samples[node_id]) +
characters[4])
# Write node class distribution / regression value
if proportion and tree.n_classes[0] != 1:
# For classification this will show the proportion of samples
value = value / tree.weighted_n_node_samples[node_id]
if labels:
node_string += 'value = '
if tree.n_classes[0] == 1:
# Regression
value_text = np.around(value, 4)
elif proportion:
# Classification
value_text = np.around(value, 2)
elif np.all(np.equal(np.mod(value, 1), 0)):
# Classification without floating-point weights
value_text = value.astype(int)
else:
# Classification with floating-point weights
value_text = np.around(value, 4)
# Strip whitespace
value_text = str(value_text.astype('S32')).replace("b'", "'")
value_text = value_text.replace("' '", ", ").replace("'", "")
if tree.n_classes[0] == 1 and tree.n_outputs == 1:
value_text = value_text.replace("[", "").replace("]", "")
value_text = value_text.replace("\n ", characters[4])
node_string += value_text + characters[4]
# Write node majority class
if (class_names is not None and
tree.n_classes[0] != 1 and
tree.n_outputs == 1):
# Only done for single-output classification trees
if labels:
node_string += 'class = '
if class_names is not True:
class_name = class_names[np.argmax(value)]
else:
class_name = "y%s%s%s" % (characters[1],
np.argmax(value),
characters[2])
node_string += class_name
# Clean up any trailing newlines
if node_string[-2:] == '\\n':
node_string = node_string[:-2]
if node_string[-5:] == '<br/>':
node_string = node_string[:-5]
return node_string + characters[5]
def recurse(tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
# Collect ranks for 'leaf' option in plot_options
if left_child == _tree.TREE_LEAF:
ranks['leaves'].append(str(node_id))
elif str(depth) not in ranks:
ranks[str(depth)] = [str(node_id)]
else:
ranks[str(depth)].append(str(node_id))
out_file.write('%d [label=%s'
% (node_id,
node_to_str(tree, node_id, criterion)))
if filled:
# Fetch appropriate color for node
if 'rgb' not in colors:
# Initialize colors and bounds if required
colors['rgb'] = _color_brew(tree.n_classes[0])
if tree.n_outputs != 1:
# Find max and min impurities for multi-output
colors['bounds'] = (np.min(-tree.impurity),
np.max(-tree.impurity))
elif tree.n_classes[0] == 1 and len(np.unique(tree.value)) != 1:
# Find max and min values in leaf nodes for regression
colors['bounds'] = (np.min(tree.value),
np.max(tree.value))
if tree.n_outputs == 1:
node_val = (tree.value[node_id][0, :] /
tree.weighted_n_node_samples[node_id])
if tree.n_classes[0] == 1:
# Regression
node_val = tree.value[node_id][0, :]
else:
# If multi-output color node by impurity
node_val = -tree.impurity[node_id]
out_file.write(', fillcolor="%s"' % get_color(node_val))
out_file.write('] ;\n')
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d' % (parent, node_id))
if parent == 0:
# Draw True/False labels if parent is root node
angles = np.array([45, -45]) * ((rotate - .5) * -2)
out_file.write(' [labeldistance=2.5, labelangle=')
if node_id == 1:
out_file.write('%d, headlabel="True"]' % angles[0])
else:
out_file.write('%d, headlabel="False"]' % angles[1])
out_file.write(' ;\n')
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, criterion=criterion, parent=node_id,
depth=depth + 1)
recurse(tree, right_child, criterion=criterion, parent=node_id,
depth=depth + 1)
else:
ranks['leaves'].append(str(node_id))
out_file.write('%d [label="(...)"' % node_id)
if filled:
# color cropped nodes grey
out_file.write(', fillcolor="#C0C0C0"')
out_file.write('] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
own_file = False
return_string = False
try:
if out_file == SENTINEL:
warnings.warn("out_file can be set to None starting from 0.18. "
"This will be the default in 0.20.",
DeprecationWarning)
out_file = "tree.dot"
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
if out_file is None:
return_string = True
out_file = six.StringIO()
# The depth of each node for plotting with 'leaf' option
ranks = {'leaves': []}
# The colors to render each node with
colors = {'bounds': None}
out_file.write('digraph Tree {\n')
# Specify node aesthetics
out_file.write('node [shape=box')
rounded_filled = []
if filled:
rounded_filled.append('filled')
if rounded:
rounded_filled.append('rounded')
if len(rounded_filled) > 0:
out_file.write(', style="%s", color="black"'
% ", ".join(rounded_filled))
if rounded:
out_file.write(', fontname=helvetica')
out_file.write('] ;\n')
# Specify graph & edge aesthetics
if leaves_parallel:
out_file.write('graph [ranksep=equally, splines=polyline] ;\n')
if rounded:
out_file.write('edge [fontname=helvetica] ;\n')
if rotate:
out_file.write('rankdir=LR ;\n')
# Now recurse the tree and add node & edge attributes
if isinstance(decision_tree, _tree.Tree):
recurse(decision_tree, 0, criterion="impurity")
else:
recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
# If required, draw leaf nodes at same depth as each other
if leaves_parallel:
for rank in sorted(ranks):
out_file.write("{rank=same ; " +
"; ".join(r for r in ranks[rank]) + "} ;\n")
out_file.write("}")
if return_string:
return out_file.getvalue()
finally:
if own_file:
out_file.close()
|
bsd-3-clause
|
eranchetz/nupic
|
examples/opf/tools/testDiagnostics.py
|
58
|
1606
|
import numpy as np
def printMatrix(inputs, spOutput):
''' (i,j)th cell of the diff matrix will have the number of inputs for which the input and output
pattern differ by i bits and the cells activated differ at j places.
Parameters:
--------------------------------------------------------------------
inputs: the input encodings
spOutput: the coincidences activated in response to each input
'''
from pylab import matplotlib as mat
w=len(np.nonzero(inputs[0])[0])
numActive=len(np.nonzero(spOutput[0])[0])
matrix = np.zeros([2*w+1,2*numActive+1])
for x in xrange(len(inputs)):
i = [_hammingDistance(inputs[x], z) for z in inputs[x:]]
j = [_hammingDistance(spOutput[x], a) for a in spOutput[x:]]
for p, q in zip(i,j):
matrix[p,q]+=1
for y in xrange(len(matrix)) :
matrix[y]=[max(10*x, 100) if (x<100 and x>0) else x for x in matrix[y]]
cdict = {'red':((0.0,0.0,0.0),(0.01,0.7,0.5),(0.3,1.0,0.7),(1.0,1.0,1.0)),\
'green': ((0.0,0.0,0.0),(0.01,0.7,0.5),(0.3,1.0,0.0),(1.0,1.0,1.0)),\
'blue': ((0.0,0.0,0.0),(0.01,0.7,0.5),(0.3,1.0,0.0),(1.0,0.5,1.0))}
my_cmap = mat.colors.LinearSegmentedColormap('my_colormap',cdict,256)
pyl=mat.pyplot
pyl.matshow(matrix, cmap = my_cmap)
pyl.colorbar()
pyl.ylabel('Number of bits by which the inputs differ')
pyl.xlabel('Number of cells by which input and output differ')
pyl.title('The difference matrix')
pyl.show()
def _hammingDistance(s1, s2):
"""Hamming distance between two numpy arrays s1 and s2"""
return sum(abs(s1-s2))
|
agpl-3.0
|
rgommers/pywt
|
doc/source/pyplots/plot_2d_bases.py
|
3
|
1568
|
from itertools import product
import numpy as np
from matplotlib import pyplot as plt
from pywt._doc_utils import (wavedec_keys, wavedec2_keys, draw_2d_wp_basis,
draw_2d_fswavedecn_basis)
shape = (512, 512)
max_lev = 4 # how many levels of decomposition to draw
label_levels = 2 # how many levels to explicitly label on the plots
if False:
fig, axes = plt.subplots(1, 4, figsize=[16, 4])
axes = axes.ravel()
else:
fig, axes = plt.subplots(2, 2, figsize=[8, 8])
axes = axes.ravel()
# plot a 5-level standard DWT basis
draw_2d_wp_basis(shape, wavedec2_keys(max_lev), ax=axes[0],
label_levels=label_levels)
axes[0].set_title('wavedec2 ({} level)'.format(max_lev))
# plot for the fully separable case
draw_2d_fswavedecn_basis(shape, max_lev, ax=axes[1], label_levels=label_levels)
axes[1].set_title('fswavedecn ({} level)'.format(max_lev))
# get all keys corresponding to a full wavelet packet decomposition
wp_keys = list(product(['a', 'd', 'h', 'v'], repeat=max_lev))
draw_2d_wp_basis(shape, wp_keys, ax=axes[2])
axes[2].set_title('wavelet packet\n(full: {} level)'.format(max_lev))
# plot an example of a custom wavelet packet basis
keys = ['aaaa', 'aaad', 'aaah', 'aaav', 'aad', 'aah', 'aava', 'aavd',
'aavh', 'aavv', 'ad', 'ah', 'ava', 'avd', 'avh', 'avv', 'd', 'h',
'vaa', 'vad', 'vah', 'vav', 'vd', 'vh', 'vv']
draw_2d_wp_basis(shape, keys, ax=axes[3], label_levels=label_levels)
axes[3].set_title('wavelet packet\n(custom)'.format(max_lev))
plt.tight_layout()
plt.show()
|
mit
|
mikebenfield/scikit-learn
|
examples/decomposition/plot_pca_3d.py
|
354
|
2432
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
print(__doc__)
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
###############################################################################
# Create the data
e = np.exp(1)
np.random.seed(4)
def pdf(x):
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
+ stats.norm(scale=4 / e).pdf(x))
y = np.random.normal(scale=0.5, size=(30000))
x = np.random.normal(scale=0.5, size=(30000))
z = np.random.normal(scale=0.1, size=len(x))
density = pdf(x) * pdf(y)
pdf_z = pdf(5 * z)
density *= pdf_z
a = x + y
b = 2 * y
c = a - b + z
norm = np.sqrt(a.var() + b.var())
a /= norm
b /= norm
###############################################################################
# Plot the figures
def plot_figs(fig_num, elev, azim):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim)
ax.scatter(a[::10], b[::10], c[::10], c=density[::10], marker='+', alpha=.4)
Y = np.c_[a, b, c]
# Using SciPy's SVD, this would be:
# _, pca_score, V = scipy.linalg.svd(Y, full_matrices=False)
pca = PCA(n_components=3)
pca.fit(Y)
pca_score = pca.explained_variance_ratio_
V = pca.components_
x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score / pca_score.min()
x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T
x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]
y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]
z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]]
x_pca_plane.shape = (2, 2)
y_pca_plane.shape = (2, 2)
z_pca_plane.shape = (2, 2)
ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
elev = -40
azim = -80
plot_figs(1, elev, azim)
elev = 30
azim = 20
plot_figs(2, elev, azim)
plt.show()
|
bsd-3-clause
|
sknepneklab/SAMoS
|
analysis/plot_analysis_polar/plot_profiles_polar_v2_nuslices.py
|
1
|
6735
|
# ################################################################
#
# Active Particles on Curved Spaces (APCS)
#
# Author: Silke Henkes
#
# ICSMB, Department of Physics
# University of Aberdeen
#
# (c) 2013, 2014
#
# This program cannot be used, copied, or modified without
# explicit permission of the author.
#
# ################################################################
#! /usr/bin/python
import sys, os, glob
import cPickle as pickle
import numpy as np
import scipy as sp
from scipy.io import savemat
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
#from matplotlib import rc
import matplotlib
from mpl_toolkits.mplot3d import Axes3D
# setting global parameters
#matplotlib.rcParams['text.usetex'] = 'true'
matplotlib.rcParams['lines.linewidth'] = 2
matplotlib.rcParams['axes.linewidth'] = 2
matplotlib.rcParams['xtick.major.size'] = 8
matplotlib.rcParams['ytick.major.size'] = 8
matplotlib.rcParams['font.size']=20
matplotlib.rcParams['legend.fontsize']=14
cdict = {'red': [(0.0, 0.75, 0.75),
(0.3, 1.0, 1.0),
(0.5, 0.4, 0.0),
(1.0, 0.0, 0.0)],
'green': [(0.0, 0.0, 0.0),
(0.25, 0.0, 0.5),
(0.5, 1.0, 1.0),
(0.75, 0.5, 0.0),
(1.0, 0.0, 0.0)],
'blue': [(0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(0.7, 1.0, 1.0),
(1.0, 0.25, 0.25)]}
# This is the structured data file hierarchy. Replace as appropriate (do not go the Yaouen way and fully automatize ...)
#basedir='/media/drogon/Documents/Curved/Runs_nuslices_phi0.5/'
basedir='/media/drogon/home/silke/Documents/Curved/Runs_nuslices/'
JList=['1']
#vList=[ '0.01', '0.1','0.5']
vList=['0.5']
mList=['s','o']
#vList=[ '0.005', '0.01', '0.02', '0.05', '0.1', '0.2', '0.5']
#JList=['10']
#vList=['1']
nbin=180
rval=28.2094791
#RList=['8','12','16','20','40','60']
nuList=['0.001', '0.1', '0.2', '0.3', '0.5', '0.7', '1', '1.5', '2', '2.5','3','4','5']
testmap=LinearSegmentedColormap('test',cdict,N=len(vList))
testmap2=LinearSegmentedColormap('test',cdict,N=len(nuList))
nstep=20000000
nsave=10000
nsnap=int(nstep/nsave)
skip=int(nsnap/3)
dt=0.001
# Profiles
# Set column to plot
usecolumn=4
profList=[r'$\theta$',r'$\rho$',r'$\sqrt{\langle v^2 \rangle}/v_0$','energy','pressure',r'$\Sigma_{\theta \theta}$',r'$\Sigma_{\theta \phi}$',r'$\Sigma_{\phi \theta}$',r'$\Sigma_{\phi \phi}$',r'$\alpha$',r'$\alpha_v$']
profName=['theta','rho','vrms','energy','pressure','stt','stp','spt','spp','alpha','alpha_v']
for i in range(len(vList)):
plt.figure(figsize=(10,7),linewidth=2.0)
for j in range(len(nuList)):
print vList[i],nuList[j]
ax=plt.gca()
outfile=basedir+'/profiles_v0' + vList[i] + '_nu' + nuList[j] + '.dat'
outfile2=basedir + '/axis_v0' + vList[i] + '_nu' + nuList[j] + '.dat'
# header='theta rho vel energy pressure alpha alpha_v'
profiles=sp.loadtxt(outfile, unpack=True)[:,:]
isdata=[index for index,value in enumerate(profiles[1,:]) if (value >0)]
# Corrected
## Forgot the normalization of rho by the angle band width
#if usecolumn==1:
#normz=2*np.pi*rval*abs(np.cos(profiles[0,:]))
#profiles[1,isdata]=profiles[1,isdata]/normz[isdata]
#profiles[1,:]/=np.mean(profiles[1,:])
if usecolumn==2:
plt.plot(profiles[0,isdata],profiles[usecolumn,isdata]/float(vList[i]),color=testmap2(j), linestyle='solid',label=nuList[j])
else:
plt.plot(profiles[0,isdata],profiles[usecolumn,isdata],color=testmap2(j), linestyle='solid',label=nuList[j])
#if usecolumn<=8:
#plt.ylim(0,1.25*profiles[usecolumn,nbin/2])
if usecolumn==9:
plt.plot(2*profiles[0,isdata],0.45*2*profiles[0,isdata],'k--')
plt.plot(2*profiles[0,isdata],0.0*2*profiles[0,isdata],'k--')
plt.text(0.0,0.25,'slope 0.45')
#if j==1:
#plt.plot(2*profiles[0,isdata],1.25*2*profiles[0,isdata],'k--')
#plt.text(0.5,0.75,'slope 1.25')
#if j==0:
#plt.plot(2*profiles[0,isdata],0.1*2*profiles[0,isdata],'k--')
#plt.text(0.5,0.3,'slope 0.1')
#plt.ylim(-0.4,0.4)
plt.xlim(-np.pi/2,np.pi/2)
#plt.xlim(-0.9,0.9)
#plt.ylim(-0.35,0.35)
plt.xlabel(profList[0])
plt.ylabel(profList[usecolumn])
plt.legend(loc=2,ncol=2)
#plt.title('Velocity' + r'$v_0=$' + vList[i])
#filename=picsfolder + '/profile_' + profName[usecolumn] + '_J' + JList[j] +'.pdf'
#plt.savefig(filename)
## Order parameter
#plt.figure(figsize=(10,7),linewidth=2.0)
#corr=0.01 # 1/effective correlation time: number of independent samples (fudge factor, honestly)
#for i in range(len(vList)):
#orderpar=np.zeros((len(nuList),))
#dorder=np.zeros((len(nuList),))
#nuval=np.zeros((len(nuList),))
#for j in range(len(nuList)):
#print vList[i],nuList[j]
#outfile2=basedir + '/axis_v0' + vList[i] + '_nu' + nuList[j] + '.dat'
#axis=sp.loadtxt(outfile2, unpack=True)[:,:]
#orderpar0=np.sqrt(axis[3,:]**2+axis[4,:]**2+axis[5,:]**2)
#orderpar[j]=np.mean(orderpar0)
#dorder[j]=np.std(orderpar0)/np.sqrt(corr*len(orderpar0))
#nuval[j]=float(nuList[j])
#plt.errorbar(nuval,orderpar,yerr=dorder,color=testmap(i), linestyle='solid',marker=mList[i],markersize=10,label=r'$v_0=$' + vList[i])
##hmm=np.linspace(-3,1,10)
##plt.plot(hmm,hmm/hmm,linestyle='--',color='k')
#plt.xlabel(r'$\nu_r$')
#plt.ylabel('p')
##plt.ylim(0,1.1)
##plt.xlim(0,2.2)
#plt.legend()
##plt.title('Order parameter')
## Defect statistics ...
## Defect statistics ...
#plt.figure(figsize=(10,7),linewidth=2.0)
#for i in range(len(vList)):
#argh=[]
#mdefects=np.empty((len(nuList),2))
#for j in range(len(nuList)):
#print vList[i],nuList[j]
##ax=plt.gca()
#outfile=basedir+'/defects_nu_' + nuList[j] + 'v0_'+ vList[i] +'_polar.dat'
#ndefects=sp.loadtxt(outfile, unpack=True)[0:2,:]
#print ndefects
#mdefects[j,0]=np.mean(ndefects[0,:])
#mdefects[j,1]=np.mean(ndefects[1,:])
#argh.append(2.0)
#plt.semilogy(nuList,mdefects[:,1],color=testmap(i),marker=mList[i],markersize=10,linestyle='solid',label=r'$v_0=$' + vList[i])
#plt.semilogy(nuList,mdefects[:,0],color=testmap(i),marker=mList[i],markersize=10,linestyle='--')
#plt.semilogy(nuList,argh,color='k',linestyle='-.')
#plt.xlabel(r'$\nu_r$')
#plt.ylabel('defect #')
##plt.title('Defects')
#plt.legend()
plt.show()
|
gpl-3.0
|
dagbldr/dagbldr
|
examples/mnist_vae/sample_mnist_vae.py
|
5
|
2193
|
import argparse
import numpy as np
import os
from dagbldr.datasets import fetch_binarized_mnist
from dagbldr.utils import load_checkpoint, make_gif, interpolate_between_points
parser = argparse.ArgumentParser()
parser.add_argument("saved_functions_file",
help="Saved pickle file from vae training")
parser.add_argument("--seed", "-s",
help="random seed for path calculation",
action="store", default=1979, type=int)
args = parser.parse_args()
if not os.path.exists(args.saved_functions_file):
raise ValueError("Please provide a valid path for saved pickle file!")
checkpoint_dict = load_checkpoint(args.saved_functions_file)
encode_function = checkpoint_dict["encode_function"]
decode_function = checkpoint_dict["decode_function"]
random_state = np.random.RandomState(args.seed)
mnist = fetch_binarized_mnist()
# visualize against validation so we aren't cheating
valid_indices = mnist["valid_indices"]
X = mnist["data"][valid_indices]
# number of samples
n_plot_samples = 5
# MNIST dimensions
width = 28
height = 28
# Get random data samples
ind = np.arange(len(X))
random_state.shuffle(ind)
sample_X = X[ind[:n_plot_samples]]
def gen_samples(arr):
mu, log_sig = encode_function(arr)
# No noise at test time
out, = decode_function(mu + np.exp(log_sig))
return out
# VAE specific plotting
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
samples = gen_samples(sample_X)
f, axarr = plt.subplots(n_plot_samples, 2)
for n, (X_i, s_i) in enumerate(zip(sample_X, samples)):
axarr[n, 0].matshow(X_i.reshape(width, height), cmap="gray")
axarr[n, 1].matshow(s_i.reshape(width, height), cmap="gray")
axarr[n, 0].axis('off')
axarr[n, 1].axis('off')
plt.savefig('vae_reconstruction.png')
plt.close()
# Calculate linear path between points in space
mus, log_sigmas = encode_function(sample_X)
mu_path = interpolate_between_points(mus)
log_sigma_path = interpolate_between_points(log_sigmas)
# Path across space from one point to another
path = mu_path + np.exp(log_sigma_path)
out, = decode_function(path)
make_gif(out, "vae_code.gif", width, height, delay=1, grayscale=True)
|
bsd-3-clause
|
tswast/google-cloud-python
|
bigquery_storage/docs/conf.py
|
2
|
10632
|
# -*- coding: utf-8 -*-
#
# google-cloud-bigquerystorage documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(".."))
__version__ = "0.1.0"
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.intersphinx",
"sphinx.ext.coverage",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
]
# autodoc/autosummary flags
autoclass_content = "both"
autodoc_default_flags = ["members"]
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"google-cloud-bigquerystorage"
copyright = u"2017, Google"
author = u"Google APIs"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = __version__
# The short X.Y version.
version = ".".join(release.split(".")[0:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "alabaster"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "google-cloud-bigquerystorage-doc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"google-cloud-bigquerystorage.tex",
u"google-cloud-bigquerystorage Documentation",
author,
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
"google-cloud-bigquerystorage",
u"google-cloud-bigquerystorage Documentation",
[author],
1,
)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"google-cloud-bigquerystorage",
u"google-cloud-bigquerystorage Documentation",
author,
"google-cloud-bigquerystorage",
"GAPIC library for the {metadata.shortName} v1beta1 service",
"APIs",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("http://python.readthedocs.org/en/latest/", None),
"gax": ("https://gax-python.readthedocs.org/en/latest/", None),
"fastavro": ("https://fastavro.readthedocs.io/en/stable/", None),
"pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None),
}
# Napoleon settings
napoleon_google_docstring = True
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
|
apache-2.0
|
terkkila/scikit-learn
|
sklearn/cluster/tests/test_mean_shift.py
|
121
|
3429
|
"""
Testing for mean shift clustering methods
"""
import numpy as np
import warnings
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.cluster import MeanShift
from sklearn.cluster import mean_shift
from sklearn.cluster import estimate_bandwidth
from sklearn.cluster import get_bin_seeds
from sklearn.datasets.samples_generator import make_blobs
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=300, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=11)
def test_estimate_bandwidth():
# Test estimate_bandwidth
bandwidth = estimate_bandwidth(X, n_samples=200)
assert_true(0.9 <= bandwidth <= 1.5)
def test_mean_shift():
# Test MeanShift algorithm
bandwidth = 1.2
ms = MeanShift(bandwidth=bandwidth)
labels = ms.fit(X).labels_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
cluster_centers, labels = mean_shift(X, bandwidth=bandwidth)
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
def test_meanshift_predict():
# Test MeanShift.predict
ms = MeanShift(bandwidth=1.2)
labels = ms.fit_predict(X)
labels2 = ms.predict(X)
assert_array_equal(labels, labels2)
def test_meanshift_all_orphans():
# init away from the data, crash with a sensible warning
ms = MeanShift(bandwidth=0.1, seeds=[[-9, -9], [-10, -10]])
msg = "No point was within bandwidth=0.1"
assert_raise_message(ValueError, msg, ms.fit, X,)
def test_unfitted():
# Non-regression: before fit, there should be not fitted attributes.
ms = MeanShift()
assert_false(hasattr(ms, "cluster_centers_"))
assert_false(hasattr(ms, "labels_"))
def test_bin_seeds():
# Test the bin seeding technique which can be used in the mean shift
# algorithm
# Data is just 6 points in the plane
X = np.array([[1., 1.], [1.4, 1.4], [1.8, 1.2],
[2., 1.], [2.1, 1.1], [0., 0.]])
# With a bin coarseness of 1.0 and min_bin_freq of 1, 3 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.), (0., 0.)])
test_bins = get_bin_seeds(X, 1, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin coarseness of 1.0 and min_bin_freq of 2, 2 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.)])
test_bins = get_bin_seeds(X, 1, 2)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin size of 0.01 and min_bin_freq of 1, 6 bins should be found
# we bail and use the whole data here.
with warnings.catch_warnings(record=True):
test_bins = get_bin_seeds(X, 0.01, 1)
assert_array_equal(test_bins, X)
# tight clusters around [0, 0] and [1, 1], only get two bins
X, _ = make_blobs(n_samples=100, n_features=2, centers=[[0, 0], [1, 1]],
cluster_std=0.1, random_state=0)
test_bins = get_bin_seeds(X, 1)
assert_array_equal(test_bins, [[0, 0], [1, 1]])
|
bsd-3-clause
|
bundgus/python-playground
|
matplotlib-playground/examples/api/two_scales.py
|
1
|
1264
|
#!/usr/bin/env python
"""
Demonstrate how to do two plots on the same axes with different left
right scales.
The trick is to use *2 different axes*. Turn the axes rectangular
frame off on the 2nd axes to keep it from obscuring the first.
Manually set the tick locs and labels as desired. You can use
separate matplotlib.ticker formatters and locators as desired since
the two axes are independent.
This is achieved in the following example by calling the Axes.twinx()
method, which performs this work. See the source of twinx() in
axes.py for an example of how to do it for different x scales. (Hint:
use the xaxis instance and call tick_bottom and tick_top in place of
tick_left and tick_right.)
The twinx and twiny methods are also exposed as pyplot functions.
"""
import numpy as np
import matplotlib.pyplot as plt
fig, ax1 = plt.subplots()
t = np.arange(0.01, 10.0, 0.01)
s1 = np.exp(t)
ax1.plot(t, s1, 'b-')
ax1.set_xlabel('time (s)')
# Make the y-axis label and tick labels match the line color.
ax1.set_ylabel('exp', color='b')
for tl in ax1.get_yticklabels():
tl.set_color('b')
ax2 = ax1.twinx()
s2 = np.sin(2*np.pi*t)
ax2.plot(t, s2, 'r.')
ax2.set_ylabel('sin', color='r')
for tl in ax2.get_yticklabels():
tl.set_color('r')
plt.show()
|
mit
|
brooksandrew/postman_problems
|
postman_problems/examples/star/rpp_star.py
|
1
|
6614
|
import pkg_resources
import logging
import string
import networkx as nx
from postman_problems.tests.utils import create_mock_csv_from_dataframe
from postman_problems.stats import calculate_postman_solution_stats
from postman_problems.solver import rpp, cpp
def create_star_graph(n_nodes=10, ring=True):
"""
Create a star graph with the points connected by
Args:
n_nodes (int): number of nodes in graph (max 26)
ring (Boolean): add ring around the border with low (distance=2) weights
Returns:
networkx MultiGraoh in the shape of a star
"""
graph = nx.MultiGraph()
node_names = list(string.ascii_lowercase)[:n_nodes]
graph.add_star(node_names)
nx.set_edge_attributes(graph, 10, 'distance')
nx.set_edge_attributes(graph, 1, 'required')
nx.set_edge_attributes(graph, 'solid', 'style')
if ring:
for e in list(zip(node_names[1:-1] + [node_names[1]], node_names[2:] + [node_names[-1]])):
graph.add_edge(e[0], e[1], distance=2, required=0, style='dashed')
return graph
def main():
"""Solve the RPP and save visualizations of the solution"""
# PARAMS / DATA ---------------------------------------------------------------------
# inputs
START_NODE = 'a'
N_NODES = 13
# filepaths
CPP_REQUIRED_SVG_FILENAME = pkg_resources.resource_filename('postman_problems',
'examples/star/output/cpp_graph_req')
CPP_OPTIONAL_SVG_FILENAME = pkg_resources.resource_filename('postman_problems',
'examples/star/output/cpp_graph_opt')
RPP_SVG_FILENAME = pkg_resources.resource_filename('postman_problems', 'examples/star/output/rpp_graph')
RPP_BASE_SVG_FILENAME = pkg_resources.resource_filename('postman_problems', 'examples/star/output/base_rpp_graph')
PNG_PATH = pkg_resources.resource_filename('postman_problems', 'examples/star/output/png/')
RPP_GIF_FILENAME = pkg_resources.resource_filename('postman_problems', 'examples/star/output/rpp_graph.gif')
# setup logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# CREATE GRAPH ----------------------------------------------------------------------
logger.info('Solve RPP')
graph_base = create_star_graph(N_NODES)
edgelist = nx.to_pandas_edgelist(graph_base, source='_node1', target='_node2')
# SOLVE CPP -------------------------------------------------------------------------
# with required edges only
edgelist_file = create_mock_csv_from_dataframe(edgelist)
circuit_cpp_req, graph_cpp_req = cpp(edgelist_file, start_node=START_NODE)
logger.info('Print the CPP solution (required edges only):')
for e in circuit_cpp_req:
logger.info(e)
# with required and optional edges as required
edgelist_all_req = edgelist.copy()
edgelist_all_req.drop(['required'], axis=1, inplace=True)
edgelist_file = create_mock_csv_from_dataframe(edgelist_all_req)
circuit_cpp_opt, graph_cpp_opt = cpp(edgelist_file, start_node=START_NODE)
logger.info('Print the CPP solution (optional and required edges):')
for e in circuit_cpp_opt:
logger.info(e)
# SOLVE RPP -------------------------------------------------------------------------
edgelist_file = create_mock_csv_from_dataframe(edgelist) # need to regenerate
circuit_rpp, graph_rpp = rpp(edgelist_file, start_node=START_NODE)
logger.info('Print the RPP solution:')
for e in circuit_rpp:
logger.info(e)
logger.info('Solution summary stats:')
for k, v in calculate_postman_solution_stats(circuit_rpp).items():
logger.info(str(k) + ' : ' + str(v))
# VIZ -------------------------------------------------------------------------------
try:
from postman_problems.viz import plot_circuit_graphviz, plot_graphviz, make_circuit_images, make_circuit_video
logger.info('Creating single SVG of base graph')
plot_graphviz(graph=graph_base,
filename=RPP_BASE_SVG_FILENAME,
edge_label_attr='distance',
format='svg',
engine='circo',
graph_attr={'label': 'Base Graph: Distances', 'labelloc': 't'}
)
logger.info('Creating single SVG of CPP solution (required edges only)')
plot_circuit_graphviz(circuit=circuit_cpp_req,
graph=graph_cpp_req,
filename=CPP_REQUIRED_SVG_FILENAME,
format='svg',
engine='circo',
graph_attr={
'label': 'Base Graph: Chinese Postman Solution (required edges only)',
'labelloc': 't'}
)
logger.info('Creating single SVG of CPP solution (required & optional edges)')
plot_circuit_graphviz(circuit=circuit_cpp_opt,
graph=graph_cpp_opt,
filename=CPP_OPTIONAL_SVG_FILENAME,
format='svg',
engine='circo',
graph_attr={'label': 'Base Graph: Chinese Postman Solution (required & optional edges)',
'labelloc': 't'}
)
logger.info('Creating single SVG of RPP solution')
plot_circuit_graphviz(circuit=circuit_rpp,
graph=graph_rpp,
filename=RPP_SVG_FILENAME,
format='svg',
engine='circo',
graph_attr={'label': 'Base Graph: Rural Postman Solution', 'labelloc': 't'}
)
logger.info('Creating PNG files for GIF')
make_circuit_images(circuit=circuit_rpp,
graph=graph_rpp,
outfile_dir=PNG_PATH,
format='png',
engine='circo')
logger.info('Creating GIF')
make_circuit_video(infile_dir_images=PNG_PATH,
outfile_movie=RPP_GIF_FILENAME,
fps=1)
except FileNotFoundError(OSError) as e:
print(e)
print("Sorry, looks like you don't have all the needed visualization dependencies.")
if __name__ == '__main__':
main()
|
mit
|
nikitasingh981/scikit-learn
|
sklearn/cluster/tests/test_dbscan.py
|
56
|
13916
|
"""
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from sklearn.neighbors import NearestNeighbors
from sklearn.cluster.dbscan_ import DBSCAN
from sklearn.cluster.dbscan_ import dbscan
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
# Tests the DBSCAN algorithm with a similarity array.
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
# Tests the DBSCAN algorithm with a feature vector array.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_sparse():
core_sparse, labels_sparse = dbscan(sparse.lil_matrix(X), eps=.8,
min_samples=10)
core_dense, labels_dense = dbscan(X, eps=.8, min_samples=10)
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_sparse_precomputed():
D = pairwise_distances(X)
nn = NearestNeighbors(radius=.9).fit(X)
D_sparse = nn.radius_neighbors_graph(mode='distance')
# Ensure it is sparse not merely on diagonals:
assert D_sparse.nnz < D.shape[0] * (D.shape[0] - 1)
core_sparse, labels_sparse = dbscan(D_sparse,
eps=.8,
min_samples=10,
metric='precomputed')
core_dense, labels_dense = dbscan(D, eps=.8, min_samples=10,
metric='precomputed')
assert_array_equal(core_dense, core_sparse)
assert_array_equal(labels_dense, labels_sparse)
def test_dbscan_no_core_samples():
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
for X_ in [X, sparse.csr_matrix(X)]:
db = DBSCAN(min_samples=6).fit(X_)
assert_array_equal(db.components_, np.empty((0, X_.shape[1])))
assert_array_equal(db.labels_, -1)
assert_equal(db.core_sample_indices_.shape, (0,))
def test_dbscan_callable():
# Tests the DBSCAN algorithm with a callable metric.
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_metric_params():
# Tests that DBSCAN works with the metrics_params argument.
eps = 0.8
min_samples = 10
p = 1
# Compute DBSCAN with metric_params arg
db = DBSCAN(metric='minkowski', metric_params={'p': p}, eps=eps,
min_samples=min_samples, algorithm='ball_tree').fit(X)
core_sample_1, labels_1 = db.core_sample_indices_, db.labels_
# Test that sample labels are the same as passing Minkowski 'p' directly
db = DBSCAN(metric='minkowski', eps=eps, min_samples=min_samples,
algorithm='ball_tree', p=p).fit(X)
core_sample_2, labels_2 = db.core_sample_indices_, db.labels_
assert_array_equal(core_sample_1, core_sample_2)
assert_array_equal(labels_1, labels_2)
# Minkowski with p=1 should be equivalent to Manhattan distance
db = DBSCAN(metric='manhattan', eps=eps, min_samples=min_samples,
algorithm='ball_tree').fit(X)
core_sample_3, labels_3 = db.core_sample_indices_, db.labels_
assert_array_equal(core_sample_1, core_sample_3)
assert_array_equal(labels_1, labels_3)
def test_dbscan_balltree():
# Tests the DBSCAN algorithm with balltree for neighbor calculation.
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
# DBSCAN.fit should accept a list of lists.
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
# Test bad argument values: these should all raise ValueErrors
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
def test_boundaries():
# ensure min_samples is inclusive of core point
core, _ = dbscan([[0], [1]], eps=2, min_samples=2)
assert_in(0, core)
# ensure eps is inclusive of circumference
core, _ = dbscan([[0], [1], [1]], eps=1, min_samples=2)
assert_in(0, core)
core, _ = dbscan([[0], [1], [1]], eps=.99, min_samples=2)
assert_not_in(0, core)
def test_weighted_dbscan():
# ensure sample_weight is validated
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2])
assert_raises(ValueError, dbscan, [[0], [1]], sample_weight=[2, 3, 4])
# ensure sample_weight has an effect
assert_array_equal([], dbscan([[0], [1]], sample_weight=None,
min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 5],
min_samples=6)[0])
assert_array_equal([0], dbscan([[0], [1]], sample_weight=[6, 5],
min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 6],
min_samples=6)[0])
# points within eps of each other:
assert_array_equal([0, 1], dbscan([[0], [1]], eps=1.5,
sample_weight=[5, 1], min_samples=6)[0])
# and effect of non-positive and non-integer sample_weight:
assert_array_equal([], dbscan([[0], [1]], sample_weight=[5, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[5.9, 0.1],
eps=1.5, min_samples=6)[0])
assert_array_equal([0, 1], dbscan([[0], [1]], sample_weight=[6, 0],
eps=1.5, min_samples=6)[0])
assert_array_equal([], dbscan([[0], [1]], sample_weight=[6, -1],
eps=1.5, min_samples=6)[0])
# for non-negative sample_weight, cores should be identical to repetition
rng = np.random.RandomState(42)
sample_weight = rng.randint(0, 5, X.shape[0])
core1, label1 = dbscan(X, sample_weight=sample_weight)
assert_equal(len(label1), len(X))
X_repeated = np.repeat(X, sample_weight, axis=0)
core_repeated, label_repeated = dbscan(X_repeated)
core_repeated_mask = np.zeros(X_repeated.shape[0], dtype=bool)
core_repeated_mask[core_repeated] = True
core_mask = np.zeros(X.shape[0], dtype=bool)
core_mask[core1] = True
assert_array_equal(np.repeat(core_mask, sample_weight), core_repeated_mask)
# sample_weight should work with precomputed distance matrix
D = pairwise_distances(X)
core3, label3 = dbscan(D, sample_weight=sample_weight,
metric='precomputed')
assert_array_equal(core1, core3)
assert_array_equal(label1, label3)
# sample_weight should work with estimator
est = DBSCAN().fit(X, sample_weight=sample_weight)
core4 = est.core_sample_indices_
label4 = est.labels_
assert_array_equal(core1, core4)
assert_array_equal(label1, label4)
est = DBSCAN()
label5 = est.fit_predict(X, sample_weight=sample_weight)
core5 = est.core_sample_indices_
assert_array_equal(core1, core5)
assert_array_equal(label1, label5)
assert_array_equal(label1, est.labels_)
def test_dbscan_core_samples_toy():
X = [[0], [2], [3], [4], [6], [8], [10]]
n_samples = len(X)
for algorithm in ['brute', 'kd_tree', 'ball_tree']:
# Degenerate case: every sample is a core sample, either with its own
# cluster or including other close core samples.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=1)
assert_array_equal(core_samples, np.arange(n_samples))
assert_array_equal(labels, [0, 1, 1, 1, 2, 3, 4])
# With eps=1 and min_samples=2 only the 3 samples from the denser area
# are core samples. All other points are isolated and considered noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=2)
assert_array_equal(core_samples, [1, 2, 3])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# Only the sample in the middle of the dense area is core. Its two
# neighbors are edge samples. Remaining samples are noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=3)
assert_array_equal(core_samples, [2])
assert_array_equal(labels, [-1, 0, 0, 0, -1, -1, -1])
# It's no longer possible to extract core samples with eps=1:
# everything is noise.
core_samples, labels = dbscan(X, algorithm=algorithm, eps=1,
min_samples=4)
assert_array_equal(core_samples, [])
assert_array_equal(labels, -np.ones(n_samples))
def test_dbscan_precomputed_metric_with_degenerate_input_arrays():
# see https://github.com/scikit-learn/scikit-learn/issues/4641 for
# more details
X = np.eye(10)
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
X = np.zeros((10, 10))
labels = DBSCAN(eps=0.5, metric='precomputed').fit(X).labels_
assert_equal(len(set(labels)), 1)
def test_dbscan_precomputed_metric_with_initial_rows_zero():
# sample matrix with initial two row all zero
ar = np.array([
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.0, 0.0],
[0.0, 0.0, 0.1, 0.1, 0.0, 0.0, 0.3],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1],
[0.0, 0.0, 0.0, 0.0, 0.3, 0.1, 0.0]
])
matrix = sparse.csr_matrix(ar)
labels = DBSCAN(eps=0.2, metric='precomputed',
min_samples=2).fit(matrix).labels_
assert_array_equal(labels, [-1, -1, 0, 0, 0, 1, 1])
|
bsd-3-clause
|
pythonvietnam/scikit-learn
|
examples/linear_model/lasso_dense_vs_sparse_data.py
|
348
|
1862
|
"""
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets.samples_generator import make_regression
from sklearn.linear_model import Lasso
###############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
###############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.toarray(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
|
bsd-3-clause
|
Weihonghao/ECM
|
Vpy34/lib/python3.5/site-packages/pandas/core/indexes/datetimelike.py
|
7
|
28008
|
"""
Base and utility classes for tseries type pandas objects.
"""
import warnings
from datetime import datetime, timedelta
from pandas import compat
from pandas.compat.numpy import function as nv
import numpy as np
from pandas.core.dtypes.common import (
is_integer, is_float,
is_bool_dtype, _ensure_int64,
is_scalar, is_dtype_equal,
is_list_like)
from pandas.core.dtypes.generic import (
ABCIndex, ABCSeries,
ABCPeriodIndex, ABCIndexClass)
from pandas.core.dtypes.missing import isnull
from pandas.core import common as com, algorithms
from pandas.core.algorithms import checked_add_with_arr
from pandas.core.common import AbstractMethodError
import pandas.io.formats.printing as printing
from pandas._libs import (tslib as libts, lib,
Timedelta, Timestamp, iNaT, NaT)
from pandas._libs.period import Period
from pandas.core.indexes.base import Index, _index_shared_docs
from pandas.util._decorators import Appender, cache_readonly
import pandas.core.dtypes.concat as _concat
import pandas.tseries.frequencies as frequencies
import pandas.core.indexes.base as ibase
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
class DatelikeOps(object):
""" common ops for DatetimeIndex/PeriodIndex, but not TimedeltaIndex """
def strftime(self, date_format):
return np.asarray(self.format(date_format=date_format),
dtype=compat.text_type)
strftime.__doc__ = """
Return an array of formatted strings specified by date_format, which
supports the same string format as the python standard library. Details
of the string format can be found in `python string format doc <{0}>`__
.. versionadded:: 0.17.0
Parameters
----------
date_format : str
date format string (e.g. "%Y-%m-%d")
Returns
-------
ndarray of formatted strings
""".format("https://docs.python.org/2/library/datetime.html"
"#strftime-and-strptime-behavior")
class TimelikeOps(object):
""" common ops for TimedeltaIndex/DatetimeIndex, but not PeriodIndex """
_round_doc = (
"""
%s the index to the specified freq
Parameters
----------
freq : freq string/object
Returns
-------
index of same type
Raises
------
ValueError if the freq cannot be converted
""")
def _round(self, freq, rounder):
from pandas.tseries.frequencies import to_offset
unit = to_offset(freq).nanos
# round the local times
values = _ensure_datetimelike_to_i8(self)
if unit < 1000 and unit % 1000 != 0:
# for nano rounding, work with the last 6 digits separately
# due to float precision
buff = 1000000
result = (buff * (values // buff) + unit *
(rounder((values % buff) / float(unit))).astype('i8'))
elif unit >= 1000 and unit % 1000 != 0:
msg = 'Precision will be lost using frequency: {}'
warnings.warn(msg.format(freq))
result = (unit * rounder(values / float(unit)).astype('i8'))
else:
result = (unit * rounder(values / float(unit)).astype('i8'))
result = self._maybe_mask_results(result, fill_value=NaT)
attribs = self._get_attributes_dict()
if 'freq' in attribs:
attribs['freq'] = None
if 'tz' in attribs:
attribs['tz'] = None
return self._ensure_localized(
self._shallow_copy(result, **attribs))
@Appender(_round_doc % "round")
def round(self, freq, *args, **kwargs):
return self._round(freq, np.round)
@Appender(_round_doc % "floor")
def floor(self, freq):
return self._round(freq, np.floor)
@Appender(_round_doc % "ceil")
def ceil(self, freq):
return self._round(freq, np.ceil)
class DatetimeIndexOpsMixin(object):
""" common ops mixin to support a unified inteface datetimelike Index """
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
if not isinstance(other, ABCIndexClass):
return False
elif not isinstance(other, type(self)):
try:
other = type(self)(other)
except:
return False
if not is_dtype_equal(self.dtype, other.dtype):
# have different timezone
return False
# ToDo: Remove this when PeriodDtype is added
elif isinstance(self, ABCPeriodIndex):
if not isinstance(other, ABCPeriodIndex):
return False
if self.freq != other.freq:
return False
return np.array_equal(self.asi8, other.asi8)
def __iter__(self):
return (self._box_func(v) for v in self.asi8)
@staticmethod
def _join_i8_wrapper(joinf, dtype, with_indexers=True):
""" create the join wrapper methods """
@staticmethod
def wrapper(left, right):
if isinstance(left, (np.ndarray, ABCIndex, ABCSeries)):
left = left.view('i8')
if isinstance(right, (np.ndarray, ABCIndex, ABCSeries)):
right = right.view('i8')
results = joinf(left, right)
if with_indexers:
join_index, left_indexer, right_indexer = results
join_index = join_index.view(dtype)
return join_index, left_indexer, right_indexer
return results
return wrapper
def _evaluate_compare(self, other, op):
"""
We have been called because a comparison between
8 aware arrays. numpy >= 1.11 will
now warn about NaT comparisons
"""
# coerce to a similar object
if not isinstance(other, type(self)):
if not is_list_like(other):
# scalar
other = [other]
elif is_scalar(lib.item_from_zerodim(other)):
# ndarray scalar
other = [other.item()]
other = type(self)(other)
# compare
result = op(self.asi8, other.asi8)
# technically we could support bool dtyped Index
# for now just return the indexing array directly
mask = (self._isnan) | (other._isnan)
if is_bool_dtype(result):
result[mask] = False
return result
try:
result[mask] = iNaT
return Index(result)
except TypeError:
return result
def _ensure_localized(self, result):
"""
ensure that we are re-localized
This is for compat as we can then call this on all datetimelike
indexes generally (ignored for Period/Timedelta)
Parameters
----------
result : DatetimeIndex / i8 ndarray
Returns
-------
localized DTI
"""
# reconvert to local tz
if getattr(self, 'tz', None) is not None:
if not isinstance(result, ABCIndexClass):
result = self._simple_new(result)
result = result.tz_localize(self.tz)
return result
@property
def _box_func(self):
"""
box function to get object from internal representation
"""
raise AbstractMethodError(self)
def _box_values(self, values):
"""
apply box func to passed values
"""
return lib.map_infer(values, self._box_func)
def _format_with_header(self, header, **kwargs):
return header + list(self._format_native_types(**kwargs))
@Appender(_index_shared_docs['__contains__'] % _index_doc_kwargs)
def __contains__(self, key):
try:
res = self.get_loc(key)
return is_scalar(res) or type(res) == slice or np.any(res)
except (KeyError, TypeError, ValueError):
return False
contains = __contains__
def __getitem__(self, key):
"""
This getitem defers to the underlying array, which by-definition can
only handle list-likes, slices, and integer scalars
"""
is_int = is_integer(key)
if is_scalar(key) and not is_int:
raise ValueError
getitem = self._data.__getitem__
if is_int:
val = getitem(key)
return self._box_func(val)
else:
if com.is_bool_indexer(key):
key = np.asarray(key)
if key.all():
key = slice(0, None, None)
else:
key = lib.maybe_booleans_to_slice(key.view(np.uint8))
attribs = self._get_attributes_dict()
is_period = isinstance(self, ABCPeriodIndex)
if is_period:
freq = self.freq
else:
freq = None
if isinstance(key, slice):
if self.freq is not None and key.step is not None:
freq = key.step * self.freq
else:
freq = self.freq
attribs['freq'] = freq
result = getitem(key)
if result.ndim > 1:
# To support MPL which performs slicing with 2 dim
# even though it only has 1 dim by definition
if is_period:
return self._simple_new(result, **attribs)
return result
return self._simple_new(result, **attribs)
@property
def freqstr(self):
"""
Return the frequency object as a string if its set, otherwise None
"""
if self.freq is None:
return None
return self.freq.freqstr
@cache_readonly
def inferred_freq(self):
"""
Trys to return a string representing a frequency guess,
generated by infer_freq. Returns None if it can't autodetect the
frequency.
"""
try:
return frequencies.infer_freq(self)
except ValueError:
return None
def _nat_new(self, box=True):
"""
Return Index or ndarray filled with NaT which has the same
length as the caller.
Parameters
----------
box : boolean, default True
- If True returns a Index as the same as caller.
- If False returns ndarray of np.int64.
"""
result = np.zeros(len(self), dtype=np.int64)
result.fill(iNaT)
if not box:
return result
attribs = self._get_attributes_dict()
if not isinstance(self, ABCPeriodIndex):
attribs['freq'] = None
return self._simple_new(result, **attribs)
# Try to run function on index first, and then on elements of index
# Especially important for group-by functionality
def map(self, f):
try:
result = f(self)
# Try to use this result if we can
if isinstance(result, np.ndarray):
self._shallow_copy(result)
if not isinstance(result, Index):
raise TypeError('The map function must return an Index object')
return result
except Exception:
return self.asobject.map(f)
def sort_values(self, return_indexer=False, ascending=True):
"""
Return sorted copy of Index
"""
if return_indexer:
_as = self.argsort()
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
return sorted_index, _as
else:
sorted_values = np.sort(self._values)
attribs = self._get_attributes_dict()
freq = attribs['freq']
if freq is not None and not isinstance(self, ABCPeriodIndex):
if freq.n > 0 and not ascending:
freq = freq * -1
elif freq.n < 0 and ascending:
freq = freq * -1
attribs['freq'] = freq
if not ascending:
sorted_values = sorted_values[::-1]
return self._simple_new(sorted_values, **attribs)
@Appender(_index_shared_docs['take'] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True,
fill_value=None, **kwargs):
nv.validate_take(tuple(), kwargs)
indices = _ensure_int64(indices)
maybe_slice = lib.maybe_indices_to_slice(indices, len(self))
if isinstance(maybe_slice, slice):
return self[maybe_slice]
taken = self._assert_take_fillable(self.asi8, indices,
allow_fill=allow_fill,
fill_value=fill_value,
na_value=iNaT)
# keep freq in PeriodIndex, reset otherwise
freq = self.freq if isinstance(self, ABCPeriodIndex) else None
return self._shallow_copy(taken, freq=freq)
def get_duplicates(self):
values = Index.get_duplicates(self)
return self._simple_new(values)
_can_hold_na = True
_na_value = NaT
"""The expected NA value to use with this index."""
@cache_readonly
def _isnan(self):
""" return if each value is nan"""
return (self.asi8 == iNaT)
@property
def asobject(self):
"""
return object Index which contains boxed values
*this is an internal non-public method*
"""
from pandas.core.index import Index
return Index(self._box_values(self.asi8), name=self.name, dtype=object)
def _convert_tolerance(self, tolerance):
try:
return Timedelta(tolerance).to_timedelta64()
except ValueError:
raise ValueError('tolerance argument for %s must be convertible '
'to Timedelta: %r'
% (type(self).__name__, tolerance))
def _maybe_mask_results(self, result, fill_value=None, convert=None):
"""
Parameters
----------
result : a ndarray
convert : string/dtype or None
Returns
-------
result : ndarray with values replace by the fill_value
mask the result if needed, convert to the provided dtype if its not
None
This is an internal routine
"""
if self.hasnans:
if convert:
result = result.astype(convert)
if fill_value is None:
fill_value = np.nan
result[self._isnan] = fill_value
return result
def tolist(self):
"""
return a list of the underlying data
"""
return list(self.asobject)
def min(self, axis=None, *args, **kwargs):
"""
Return the minimum value of the Index or minimum along
an axis.
See also
--------
numpy.ndarray.min
"""
nv.validate_min(args, kwargs)
try:
i8 = self.asi8
# quick check
if len(i8) and self.is_monotonic:
if i8[0] != iNaT:
return self._box_func(i8[0])
if self.hasnans:
min_stamp = self[~self._isnan].asi8.min()
else:
min_stamp = i8.min()
return self._box_func(min_stamp)
except ValueError:
return self._na_value
def argmin(self, axis=None, *args, **kwargs):
"""
Returns the indices of the minimum values along an axis.
See `numpy.ndarray.argmin` for more information on the
`axis` parameter.
See also
--------
numpy.ndarray.argmin
"""
nv.validate_argmin(args, kwargs)
i8 = self.asi8
if self.hasnans:
mask = self._isnan
if mask.all():
return -1
i8 = i8.copy()
i8[mask] = np.iinfo('int64').max
return i8.argmin()
def max(self, axis=None, *args, **kwargs):
"""
Return the maximum value of the Index or maximum along
an axis.
See also
--------
numpy.ndarray.max
"""
nv.validate_max(args, kwargs)
try:
i8 = self.asi8
# quick check
if len(i8) and self.is_monotonic:
if i8[-1] != iNaT:
return self._box_func(i8[-1])
if self.hasnans:
max_stamp = self[~self._isnan].asi8.max()
else:
max_stamp = i8.max()
return self._box_func(max_stamp)
except ValueError:
return self._na_value
def argmax(self, axis=None, *args, **kwargs):
"""
Returns the indices of the maximum values along an axis.
See `numpy.ndarray.argmax` for more information on the
`axis` parameter.
See also
--------
numpy.ndarray.argmax
"""
nv.validate_argmax(args, kwargs)
i8 = self.asi8
if self.hasnans:
mask = self._isnan
if mask.all():
return -1
i8 = i8.copy()
i8[mask] = 0
return i8.argmax()
@property
def _formatter_func(self):
raise AbstractMethodError(self)
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value)
"""
attrs = super(DatetimeIndexOpsMixin, self)._format_attrs()
for attrib in self._attributes:
if attrib == 'freq':
freq = self.freqstr
if freq is not None:
freq = "'%s'" % freq
attrs.append(('freq', freq))
return attrs
@cache_readonly
def _resolution(self):
return frequencies.Resolution.get_reso_from_freq(self.freqstr)
@cache_readonly
def resolution(self):
"""
Returns day, hour, minute, second, millisecond or microsecond
"""
return frequencies.Resolution.get_str(self._resolution)
def _convert_scalar_indexer(self, key, kind=None):
"""
we don't allow integer or float indexing on datetime-like when using
loc
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'getitem', 'iloc'} or None
"""
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
# we don't allow integer/float indexing for loc
# we don't allow float indexing for ix/getitem
if is_scalar(key):
is_int = is_integer(key)
is_flt = is_float(key)
if kind in ['loc'] and (is_int or is_flt):
self._invalid_indexer('index', key)
elif kind in ['ix', 'getitem'] and is_flt:
self._invalid_indexer('index', key)
return (super(DatetimeIndexOpsMixin, self)
._convert_scalar_indexer(key, kind=kind))
def _add_datelike(self, other):
raise AbstractMethodError(self)
def _sub_datelike(self, other):
raise AbstractMethodError(self)
def _sub_period(self, other):
return NotImplemented
@classmethod
def _add_datetimelike_methods(cls):
"""
add in the datetimelike methods (as we may have to override the
superclass)
"""
def __add__(self, other):
from pandas.core.index import Index
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.tseries.offsets import DateOffset
if isinstance(other, TimedeltaIndex):
return self._add_delta(other)
elif isinstance(self, TimedeltaIndex) and isinstance(other, Index):
if hasattr(other, '_add_delta'):
return other._add_delta(self)
raise TypeError("cannot add TimedeltaIndex and {typ}"
.format(typ=type(other)))
elif isinstance(other, Index):
raise TypeError("cannot add {typ1} and {typ2}"
.format(typ1=type(self).__name__,
typ2=type(other).__name__))
elif isinstance(other, (DateOffset, timedelta, np.timedelta64,
Timedelta)):
return self._add_delta(other)
elif is_integer(other):
return self.shift(other)
elif isinstance(other, (Timestamp, datetime)):
return self._add_datelike(other)
else: # pragma: no cover
return NotImplemented
cls.__add__ = __add__
cls.__radd__ = __add__
def __sub__(self, other):
from pandas.core.index import Index
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.timedeltas import TimedeltaIndex
from pandas.tseries.offsets import DateOffset
if isinstance(other, TimedeltaIndex):
return self._add_delta(-other)
elif isinstance(self, TimedeltaIndex) and isinstance(other, Index):
if not isinstance(other, TimedeltaIndex):
raise TypeError("cannot subtract TimedeltaIndex and {typ}"
.format(typ=type(other).__name__))
return self._add_delta(-other)
elif isinstance(other, DatetimeIndex):
return self._sub_datelike(other)
elif isinstance(other, Index):
raise TypeError("cannot subtract {typ1} and {typ2}"
.format(typ1=type(self).__name__,
typ2=type(other).__name__))
elif isinstance(other, (DateOffset, timedelta, np.timedelta64,
Timedelta)):
return self._add_delta(-other)
elif is_integer(other):
return self.shift(-other)
elif isinstance(other, (Timestamp, datetime)):
return self._sub_datelike(other)
elif isinstance(other, Period):
return self._sub_period(other)
else: # pragma: no cover
return NotImplemented
cls.__sub__ = __sub__
def __rsub__(self, other):
return -(self - other)
cls.__rsub__ = __rsub__
cls.__iadd__ = __add__
cls.__isub__ = __sub__
def _add_delta(self, other):
return NotImplemented
def _add_delta_td(self, other):
# add a delta of a timedeltalike
# return the i8 result view
inc = libts._delta_to_nanoseconds(other)
new_values = checked_add_with_arr(self.asi8, inc,
arr_mask=self._isnan).view('i8')
if self.hasnans:
new_values[self._isnan] = iNaT
return new_values.view('i8')
def _add_delta_tdi(self, other):
# add a delta of a TimedeltaIndex
# return the i8 result view
# delta operation
if not len(self) == len(other):
raise ValueError("cannot add indices of unequal length")
self_i8 = self.asi8
other_i8 = other.asi8
new_values = checked_add_with_arr(self_i8, other_i8,
arr_mask=self._isnan,
b_mask=other._isnan)
if self.hasnans or other.hasnans:
mask = (self._isnan) | (other._isnan)
new_values[mask] = iNaT
return new_values.view(self.dtype)
def isin(self, values):
"""
Compute boolean array of whether each index value is found in the
passed set of values
Parameters
----------
values : set or sequence of values
Returns
-------
is_contained : ndarray (boolean dtype)
"""
if not isinstance(values, type(self)):
try:
values = type(self)(values)
except ValueError:
return self.asobject.isin(values)
return algorithms.isin(self.asi8, values.asi8)
def shift(self, n, freq=None):
"""
Specialized shift which produces a DatetimeIndex
Parameters
----------
n : int
Periods to shift by
freq : DateOffset or timedelta-like, optional
Returns
-------
shifted : DatetimeIndex
"""
if freq is not None and freq != self.freq:
if isinstance(freq, compat.string_types):
freq = frequencies.to_offset(freq)
offset = n * freq
result = self + offset
if hasattr(self, 'tz'):
result.tz = self.tz
return result
if n == 0:
# immutable so OK
return self
if self.freq is None:
raise ValueError("Cannot shift with no freq")
start = self[0] + n * self.freq
end = self[-1] + n * self.freq
attribs = self._get_attributes_dict()
attribs['start'] = start
attribs['end'] = end
return type(self)(**attribs)
def repeat(self, repeats, *args, **kwargs):
"""
Analogous to ndarray.repeat
"""
nv.validate_repeat(args, kwargs)
if isinstance(self, ABCPeriodIndex):
freq = self.freq
else:
freq = None
return self._shallow_copy(self.asi8.repeat(repeats),
freq=freq)
@Appender(_index_shared_docs['where'] % _index_doc_kwargs)
def where(self, cond, other=None):
other = _ensure_datetimelike_to_i8(other)
values = _ensure_datetimelike_to_i8(self)
result = np.where(cond, values, other).astype('i8')
result = self._ensure_localized(result)
return self._shallow_copy(result,
**self._get_attributes_dict())
def summary(self, name=None):
"""
return a summarized representation
"""
formatter = self._formatter_func
if len(self) > 0:
index_summary = ', %s to %s' % (formatter(self[0]),
formatter(self[-1]))
else:
index_summary = ''
if name is None:
name = type(self).__name__
result = '%s: %s entries%s' % (printing.pprint_thing(name),
len(self), index_summary)
if self.freq:
result += '\nFreq: %s' % self.freqstr
# display as values, not quoted
result = result.replace("'", "")
return result
def _append_same_dtype(self, to_concat, name):
"""
Concatenate to_concat which has the same class
"""
attribs = self._get_attributes_dict()
attribs['name'] = name
if not isinstance(self, ABCPeriodIndex):
# reset freq
attribs['freq'] = None
if getattr(self, 'tz', None) is not None:
return _concat._concat_datetimetz(to_concat, name)
else:
new_data = np.concatenate([c.asi8 for c in to_concat])
return self._simple_new(new_data, **attribs)
def _ensure_datetimelike_to_i8(other):
""" helper for coercing an input scalar or array to i8 """
if lib.isscalar(other) and isnull(other):
other = iNaT
elif isinstance(other, ABCIndexClass):
# convert tz if needed
if getattr(other, 'tz', None) is not None:
other = other.tz_localize(None).asi8
else:
other = other.asi8
else:
try:
other = np.array(other, copy=False).view('i8')
except TypeError:
# period array cannot be coerces to int
other = Index(other).asi8
return other
|
agpl-3.0
|
fixbugs/py-fixbugs-tools
|
test/machlearn/7650.py
|
1
|
5032
|
#!/usr/bin/env python
#coding=utf8
import numpy as np
#from scipy import linalg
from scipy.sparse.linalg import svds
from scipy import sparse
#import matplotlib.pyplot as plt
# a = np.random.randn(9, 6) + 1.j*np.random.randn(9, 6)
# print a
# U, s, Vh = linalg.svd(a)
# print U.shape, Vh.shape, s.shape
# U, s, Vh = linalg.svd(a, full_matrices=False)
# print U.shape, Vh.shape, s.shape
# S = linalg.diagsvd(s, 6, 6)
# print S
# print np.allclose(a, np.dot(U, np.dot(S, Vh)))
# s2 = linalg.svd(a, compute_uv=False)
# print np.allclose(s, s2)
def getFileContent(file_path):
import os
if not os.path.exists(file_path):
return False
result = []
try:
f = open(file_path, 'r')
for l in f.readlines():
l = l.strip("\r\n")
result.append(l.split(","))
finally:
f.close()
return result[1:]
uidArr = []
movieArr = []
def getTrainMaxi(trainarr):
#uidArr = []
totalArr = np.zeros((500, 11000))
totalArr = np.ndarray.tolist(totalArr)
#movieArr = []
for t in trainarr:
if t[0] not in uidArr:
uidArr.append(t[0])
if t[1] not in movieArr:
movieArr.append(t[1])
Uindex = int(uidArr.index(t[0]))
Mindex = int(movieArr.index(t[1]))
totalArr[Uindex][Mindex] = int(t[2])
return totalArr
def vector_to_diagonal(vector):
"""
将向量放在对角矩阵的对角线上
:param vector:
:return:
"""
if (isinstance(vector, np.ndarray) and vector.ndim == 1) or \
isinstance(vector, list):
length = len(vector)
diag_matrix = np.zeros((length, length))
np.fill_diagonal(diag_matrix, vector)
return diag_matrix
return None
def getTestMaxi(trainarr):
#uidArr = []
totalArr = np.zeros((500, 11000))
totalArr = np.ndarray.tolist(totalArr)
#movieArr = []
for t in trainarr:
if t[0] not in uidArr:
uidArr.append(t[0])
if t[1] not in movieArr:
movieArr.append(t[1])
Uindex = int(uidArr.index(t[0]))
Mindex = int(movieArr.index(t[1]))
totalArr[Uindex][Mindex] = int(t[2])
return totalArr
# def hdsp(trains, testarr):
# for t in testarr:
# if t[0] not in uidArr:
# uidArr.append(t[0])
# if t[1] not in movieArr:
# movieArr.append(t[1])
# Uindex = int(uidArr.index(t[0]))
# Mindex = int(movieArr.index(t[1]))
# trains[Uindex][Mindex] = 0
trainarr = getFileContent('7650d/train.txt')
trainres = getTrainMaxi(trainarr)
#print trainres
# testarr = getFileContent('7650d/test.txt')
# print trainres
# exit(0)
#RATE_MATRIX = trainres
# RATE_MATRIX = np.array(
# [[5, 5, 3, 0, 5, 5],
# [5, 0, 4, 0, 4, 4],
# [0, 3, 0, 5, 4, 5],
# [5, 4, 3, 3, 5, 5]]
# )
RATE_MATRIX = np.array(trainres)
RATE_MATRIX = RATE_MATRIX.astype('float')
# from sklear.metrics.pairwise import pairwise_distances
# user_similarity = pairwise_distances(RATE_MATRIX, metric='cosine')
# print user_similarity
# exit(0)
data_arr_f = RATE_MATRIX
for i in range(300):
U, S, VT = svds(sparse.csr_matrix(data_arr_f), k=15, maxiter=5)
S = vector_to_diagonal(S)
data_arr_f = RATE_MATRIX + np.dot(np.dot(U, S), VT) * (RATE_MATRIX < 1e-6)
# U, S, VT = svds(sparse.csr_matrix(RATE_MATRIX), k=15, maxiter=200)
# S = vector_to_diagonal(S)
print data_arr_f
#exit(0)
# print '用户的主题分布:'
# print U
# print '奇异值:'
# print S
# print '物品的主题分布:'
# print VT
# print '重建评分矩阵,并过滤掉已经评分的物品:'
# lastEnd = np.dot(np.dot(U, S), VT) * (RATE_MATRIX < 1e-6)
# print type(lastEnd)
# print lastEnd[0][0]
#print lastEnd
#exit(0)
testarr = getFileContent('7650d/test.txt')
total_sum = 0
total_arr = []
for t in testarr:
us = t[0]
mos = t[1]
uindex = uidArr.index(us)
if mos not in movieArr or us not in uidArr:
sres = '4'
#total_sum += sres
total_arr.append(str(sres))
continue
mindex = movieArr.index(mos)
sres = int(round(data_arr_f[uindex][mindex]))
if sres < 1:
sres = 1
if sres > 5:
sres = 5
# if sres == 0:
# sres = int(RATE_MATRIX[uindex][mindex])
# if sres == 0:
# sres = '*'
total_arr.append(str(sres))
total_sum += sres
#print sres
print "totalnum:", total_sum
res_str = ''.join(total_arr)
print "totalstring:", res_str
file_object = open('result.txt', 'w')
file_object.write(res_str)
file_object.close()
exit(0)
# trainarr = []
# tmp = [1, 2, 5]
# trainarr.append(tmp)
# trainarr.append([1,3,1])
# trainarr.append([2,2,3])
# trainarr.append([2,3,1])
trainarr = getFileContent('7650d/train.txt')
trainres = getTrainMaxi(trainarr)
#print trainres
#testarr = getFileContent('7650d/test.txt')
# first get train maxirt for reuslt
# piece for all result
# U, s, Vh = linalg.svd(trainres)
# print U
# print "============"
# print Vh
# print "============="
# print s
#print s
#print testarr
|
gpl-3.0
|
shaowei-su/pyAudioAnalysis
|
data/testComputational.py
|
5
|
3609
|
import sys
from pyAudioAnalysis import audioBasicIO
from pyAudioAnalysis import audioFeatureExtraction
from pyAudioAnalysis import audioTrainTest as aT
from pyAudioAnalysis import audioSegmentation as aS
import matplotlib.pyplot as plt
import time
nExp = 4
def main(argv):
if argv[1] == "-shortTerm":
for i in range(nExp):
[Fs, x] = audioBasicIO.readAudioFile("diarizationExample.wav");
duration = x.shape[0] / float(Fs)
t1 = time.clock()
F = audioFeatureExtraction.stFeatureExtraction(x, Fs, 0.050*Fs, 0.050*Fs);
t2 = time.clock()
perTime1 = duration / (t2-t1); print "short-term feature extraction: {0:.1f} x realtime".format(perTime1)
elif argv[1] == "-classifyFile":
for i in range(nExp):
[Fs, x] = audioBasicIO.readAudioFile("diarizationExample.wav");
duration = x.shape[0] / float(Fs)
t1 = time.clock()
aT.fileClassification("diarizationExample.wav", "svmSM","svm")
t2 = time.clock()
perTime1 = duration / (t2-t1); print "Mid-term feature extraction + classification \t {0:.1f} x realtime".format(perTime1)
elif argv[1] == "-mtClassify":
for i in range(nExp):
[Fs, x] = audioBasicIO.readAudioFile("diarizationExample.wav");
duration = x.shape[0] / float(Fs)
t1 = time.clock()
[flagsInd, classesAll, acc] = aS.mtFileClassification("diarizationExample.wav", "svmSM", "svm", False, '')
t2 = time.clock()
perTime1 = duration / (t2-t1); print "Fix-sized classification - segmentation \t {0:.1f} x realtime".format(perTime1)
elif argv[1] == "-hmmSegmentation":
for i in range(nExp):
[Fs, x] = audioBasicIO.readAudioFile("diarizationExample.wav");
duration = x.shape[0] / float(Fs)
t1 = time.clock()
aS.hmmSegmentation('diarizationExample.wav', 'hmmRadioSM', False, '')
t2 = time.clock()
perTime1 = duration / (t2-t1); print "HMM-based classification - segmentation \t {0:.1f} x realtime".format(perTime1)
elif argv[1] == "-silenceRemoval":
for i in range(nExp):
[Fs, x] = audioBasicIO.readAudioFile("diarizationExample.wav");
duration = x.shape[0] / float(Fs)
t1 = time.clock()
[Fs, x] = audioBasicIO.readAudioFile("diarizationExample.wav");
segments = aS.silenceRemoval(x, Fs, 0.050, 0.050, smoothWindow = 1.0, Weight = 0.3, plot = False)
t2 = time.clock()
perTime1 = duration / (t2-t1); print "Silence removal \t {0:.1f} x realtime".format(perTime1)
elif argv[1] == "-thumbnailing":
for i in range(nExp):
[Fs1, x1] = audioBasicIO.readAudioFile("scottish.wav")
duration1 = x1.shape[0] / float(Fs1)
t1 = time.clock()
[A1, A2, B1, B2, Smatrix] = aS.musicThumbnailing(x1, Fs1, 1.0, 1.0, 15.0) # find thumbnail endpoints
t2 = time.clock()
perTime1 = duration1 / (t2-t1); print "Thumbnail \t {0:.1f} x realtime".format(perTime1)
elif argv[1] == "-diarization-noLDA":
for i in range(nExp):
[Fs1, x1] = audioBasicIO.readAudioFile("diarizationExample.wav")
duration1 = x1.shape[0] / float(Fs1)
t1 = time.clock()
aS.speakerDiarization("diarizationExample.wav", 4, LDAdim = 0, PLOT = False)
t2 = time.clock()
perTime1 = duration1 / (t2-t1); print "Diarization \t {0:.1f} x realtime".format(perTime1)
elif argv[1] == "-diarization-LDA":
for i in range(nExp):
[Fs1, x1] = audioBasicIO.readAudioFile("diarizationExample.wav")
duration1 = x1.shape[0] / float(Fs1)
t1 = time.clock()
aS.speakerDiarization("diarizationExample.wav", 4, PLOT = False)
t2 = time.clock()
perTime1 = duration1 / (t2-t1); print "Diarization \t {0:.1f} x realtime".format(perTime1)
if __name__ == '__main__':
main(sys.argv)
|
apache-2.0
|
arabenjamin/scikit-learn
|
examples/svm/plot_separating_hyperplane.py
|
294
|
1273
|
"""
=========================================
SVM: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a Support Vector Machine classifier with
linear kernel.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
|
bsd-3-clause
|
abergeron/pylearn2
|
pylearn2/cross_validation/tests/test_subset_iterators.py
|
49
|
2411
|
"""
Test subset iterators.
"""
import numpy as np
from pylearn2.testing.skip import skip_if_no_sklearn
def test_validation_k_fold():
"""Test ValidationKFold."""
skip_if_no_sklearn()
from pylearn2.cross_validation.subset_iterators import ValidationKFold
n = 30
# test with indices
cv = ValidationKFold(n)
for train, valid, test in cv:
assert np.unique(np.concatenate((train, valid, test))).size == n
assert valid.size == n / cv.n_folds
assert test.size == n / cv.n_folds
def test_stratified_validation_k_fold():
"""Test StratifiedValidationKFold."""
skip_if_no_sklearn()
from pylearn2.cross_validation.subset_iterators import (
StratifiedValidationKFold)
n = 30
y = np.concatenate((np.zeros(n / 2, dtype=int), np.ones(n / 2, dtype=int)))
# test with indices
cv = StratifiedValidationKFold(y)
for train, valid, test in cv:
assert np.unique(np.concatenate((train, valid, test))).size == n
assert valid.size == n / cv.n_folds
assert test.size == n / cv.n_folds
assert np.count_nonzero(y[valid]) == (n / 2) * (1. / cv.n_folds)
assert np.count_nonzero(y[test]) == (n / 2) * (1. / cv.n_folds)
def test_validation_shuffle_split():
"""Test ValidationShuffleSplit."""
skip_if_no_sklearn()
from pylearn2.cross_validation.subset_iterators import (
ValidationShuffleSplit)
n = 30
# test with indices
cv = ValidationShuffleSplit(n)
for train, valid, test in cv:
assert np.unique(np.concatenate((train, valid, test))).size == n
assert valid.size == n * cv.test_size
assert test.size == n * cv.test_size
def test_stratified_validation_shuffle_split():
"""Test StratifiedValidationShuffleSplit."""
skip_if_no_sklearn()
from pylearn2.cross_validation.subset_iterators import (
StratifiedValidationShuffleSplit)
n = 60
y = np.concatenate((np.zeros(n / 2, dtype=int), np.ones(n / 2, dtype=int)))
# test with indices
cv = StratifiedValidationShuffleSplit(y)
for train, valid, test in cv:
assert np.unique(np.concatenate((train, valid, test))).size == n
assert valid.size == n * cv.test_size
assert test.size == n * cv.test_size
assert np.count_nonzero(y[valid]) == (n / 2) * cv.test_size
assert np.count_nonzero(y[test]) == (n / 2) * cv.test_size
|
bsd-3-clause
|
olivierayache/xuggle-xuggler
|
captive/libsrt/csrc/scripts/changelog/changelog.py
|
3
|
2334
|
import enum
import click
import numpy as np
import pandas as pd
@enum.unique
class Area(enum.Enum):
core = 'core'
tests = 'tests'
build = 'build'
apps = 'apps'
docs = 'docs'
def define_area(msg):
areas = [e.value for e in Area]
for area in areas:
if msg.startswith(f'[{area}] '):
return area
return np.NaN
def delete_prefix(msg):
prefixes = [f'[{e.value}] ' for e in Area]
for prefix in prefixes:
if msg.startswith(prefix):
return msg[len(prefix):]
return msg[:]
def write_into_changelog(df, f):
f.write('\n')
for _, row in df.iterrows():
f.write(f"\n{row['commit']} {row['message']}")
f.write('\n')
@click.command()
@click.argument(
'git_log',
type=click.Path(exists=True)
)
def main(git_log):
""" Script designed to create changelog out of .csv SRT git log """
df = pd.read_csv(git_log, sep = '|', names = ['commit', 'message', 'author', 'email'])
df['area'] = df['message'].apply(define_area)
df['message'] = df['message'].apply(delete_prefix)
core = df[df['area']=='core']
tests = df[df['area']=='tests']
build = df[df['area']=='build']
apps = df[df['area']=='apps']
docs = df[df['area']=='docs']
other = df[df['area'].isna()]
with open('changelog.md', 'w') as f:
f.write('# Release Notes\n')
f.write('\n## Changelog\n')
f.write('\n<details><summary>Click to expand/collapse</summary>')
f.write('\n<p>')
f.write('\n')
if not core.empty:
f.write('\n### Core Functionality')
write_into_changelog(core, f)
if not tests.empty:
f.write('\n### Unit Tests')
write_into_changelog(tests, f)
if not build.empty:
f.write('\n### Build Scripts (CMake, etc.)')
write_into_changelog(build, f)
if not apps.empty:
f.write('\n### Sample Applications')
write_into_changelog(apps, f)
if not docs.empty:
f.write('\n### Documentation')
write_into_changelog(docs, f)
if not other.empty:
f.write('\n### Other')
write_into_changelog(other, f)
f.write('\n</p>')
f.write('\n</details>')
if __name__ == '__main__':
main()
|
lgpl-3.0
|
amitts/myo-spark
|
myo.py
|
1
|
3637
|
from __future__ import print_function
from collections import Counter, deque
import sys
import time
import numpy as np
from inspect import getmembers
from pprint import pprint
import requests
try:
from sklearn import neighbors, svm
HAVE_SK = True
except ImportError:
HAVE_SK = False
from common import *
from myo_raw import MyoRaw
SUBSAMPLE = 3
K = 15
class NNClassifier(object):
'''A wrapper for sklearn's nearest-neighbor classifier that stores
training data in vals0, ..., vals9.dat.'''
def __init__(self):
for i in range(10):
with open('vals%d.dat' % i, 'ab') as f: pass
self.read_data()
def store_data(self, cls, vals):
with open('vals%d.dat' % cls, 'ab') as f:
f.write(pack('8H', *vals))
self.train(np.vstack([self.X, vals]), np.hstack([self.Y, [cls]]))
def read_data(self):
X = []
Y = []
for i in range(10):
X.append(np.fromfile('vals%d.dat' % i, dtype=np.uint16).reshape((-1, 8)))
Y.append(i + np.zeros(X[-1].shape[0]))
self.train(np.vstack(X), np.hstack(Y))
def train(self, X, Y):
self.X = X
self.Y = Y
if HAVE_SK and self.X.shape[0] >= K * SUBSAMPLE:
self.nn = neighbors.KNeighborsClassifier(n_neighbors=K, algorithm='kd_tree')
self.nn.fit(self.X[::SUBSAMPLE], self.Y[::SUBSAMPLE])
else:
self.nn = None
def nearest(self, d):
dists = ((self.X - d)**2).sum(1)
ind = dists.argmin()
return self.Y[ind]
def classify(self, d):
if self.X.shape[0] < K * SUBSAMPLE: return 0
if not HAVE_SK: return self.nearest(d)
return int(self.nn.predict(d)[0])
class Myo(MyoRaw):
'''Adds higher-level pose classification and handling onto MyoRaw.'''
HIST_LEN = 25
def __init__(self, cls, tty=None):
MyoRaw.__init__(self, tty)
self.cls = cls
self.history = deque([0] * Myo.HIST_LEN, Myo.HIST_LEN)
self.history_cnt = Counter(self.history)
self.add_emg_handler(self.emg_handler)
self.last_pose = None
self.pose_handlers = []
def emg_handler(self, emg, moving):
y = self.cls.classify(emg)
self.history_cnt[self.history[0]] -= 1
self.history_cnt[y] += 1
self.history.append(y)
r, n = self.history_cnt.most_common(1)[0]
if self.last_pose is None or (n > self.history_cnt[self.last_pose] + 5 and n > Myo.HIST_LEN / 2):
self.on_raw_pose(r)
self.last_pose = r
def add_raw_pose_handler(self, h):
self.pose_handlers.append(h)
def on_raw_pose(self, pose):
for h in self.pose_handlers:
h(pose)
def turnthefuckeron():
payload = {'access_token':'922952216c46e9985debbdd58a4e947d7e43d6b8','params':'l1,HIGH'}
r = requests.post("https://api.spark.io/v1/devices/53ff68066667574815402067/led", data=payload)
print(r.text)
def turnthefuckeroff():
payload = {'access_token':'922952216c46e9985debbdd58a4e947d7e43d6b8','params':'l1,LOW'}
r = requests.post("https://api.spark.io/v1/devices/53ff68066667574815402067/led", data=payload)
print(r.text)
if __name__ == '__main__':
import subprocess
m = Myo(NNClassifier(), sys.argv[1] if len(sys.argv) >= 2 else None)
m.add_raw_pose_handler(print)
def page(pose):
if pose !=0:
if pose.value == 1:
turnthefuckeron()
elif pose.value == 2:
turnthefuckeroff()
m.add_raw_pose_handler(page)
m.connect()
while True:
m.run()
|
mit
|
jorge2703/scikit-learn
|
sklearn/utils/tests/test_extmath.py
|
130
|
16270
|
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis Engemann <d.engemann@fz-juelich.de>
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import linalg
from scipy import stats
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.extmath import density
from sklearn.utils.extmath import logsumexp
from sklearn.utils.extmath import norm, squared_norm
from sklearn.utils.extmath import randomized_svd
from sklearn.utils.extmath import row_norms
from sklearn.utils.extmath import weighted_mode
from sklearn.utils.extmath import cartesian
from sklearn.utils.extmath import log_logistic
from sklearn.utils.extmath import fast_dot, _fast_dot
from sklearn.utils.extmath import svd_flip
from sklearn.utils.extmath import _batch_mean_variance_update
from sklearn.utils.extmath import _deterministic_vector_sign_flip
from sklearn.datasets.samples_generator import make_low_rank_matrix
def test_density():
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 5))
X[1, 2] = 0
X[5, 3] = 0
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_coo = sparse.coo_matrix(X)
X_lil = sparse.lil_matrix(X)
for X_ in (X_csr, X_csc, X_coo, X_lil):
assert_equal(density(X_), density(X))
def test_uniform_weights():
# with uniform weights, results should be identical to stats.mode
rng = np.random.RandomState(0)
x = rng.randint(10, size=(10, 5))
weights = np.ones(x.shape)
for axis in (None, 0, 1):
mode, score = stats.mode(x, axis)
mode2, score2 = weighted_mode(x, weights, axis)
assert_true(np.all(mode == mode2))
assert_true(np.all(score == score2))
def test_random_weights():
# set this up so that each row should have a weighted mode of 6,
# with a score that is easily reproduced
mode_result = 6
rng = np.random.RandomState(0)
x = rng.randint(mode_result, size=(100, 10))
w = rng.random_sample(x.shape)
x[:, :5] = mode_result
w[:, :5] += 1
mode, score = weighted_mode(x, w, axis=1)
assert_array_equal(mode, mode_result)
assert_array_almost_equal(score.ravel(), w[:, :5].sum(1))
def test_logsumexp():
# Try to add some smallish numbers in logspace
x = np.array([1e-40] * 1000000)
logx = np.log(x)
assert_almost_equal(np.exp(logsumexp(logx)), x.sum())
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
def test_randomized_svd_low_rank():
# Check that extmath.randomized_svd is consistent with linalg.svd
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X of approximate effective rank `rank` and no noise
# component (very structured signal):
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
U, s, V = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_equal(Ua.shape, (n_samples, k))
assert_equal(sa.shape, (k,))
assert_equal(Va.shape, (k, n_features))
# ensure that the singular values of both methods are equal up to the real
# rank of the matrix
assert_almost_equal(s[:k], sa)
# check the singular vectors too (while not checking the sign)
assert_almost_equal(np.dot(U[:, :k], V[:k, :]), np.dot(Ua, Va))
# check the sparse matrix representation
X = sparse.csr_matrix(X)
# compute the singular values of X using the fast approximate method
Ua, sa, Va = randomized_svd(X, k)
assert_almost_equal(s[:rank], sa[:rank])
def test_norm_squared_norm():
X = np.random.RandomState(42).randn(50, 63)
X *= 100 # check stability
X += 200
assert_almost_equal(np.linalg.norm(X.ravel()), norm(X))
assert_almost_equal(norm(X) ** 2, squared_norm(X), decimal=6)
assert_almost_equal(np.linalg.norm(X), np.sqrt(squared_norm(X)), decimal=6)
def test_row_norms():
X = np.random.RandomState(42).randn(100, 100)
sq_norm = (X ** 2).sum(axis=1)
assert_array_almost_equal(sq_norm, row_norms(X, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(X))
Xcsr = sparse.csr_matrix(X, dtype=np.float32)
assert_array_almost_equal(sq_norm, row_norms(Xcsr, squared=True), 5)
assert_array_almost_equal(np.sqrt(sq_norm), row_norms(Xcsr))
def test_randomized_svd_low_rank_with_noise():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# generate a matrix X wity structure approximate rank `rank` and an
# important noisy component
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.05)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is helping getting rid of the noise:
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_infinite_rank():
# Check that extmath.randomized_svd can handle noisy matrices
n_samples = 100
n_features = 500
rank = 5
k = 10
# let us try again without 'low_rank component': just regularly but slowly
# decreasing singular values: the rank of the data matrix is infinite
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=1.0,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
# compute the singular values of X using the slow exact method
_, s, _ = linalg.svd(X, full_matrices=False)
# compute the singular values of X using the fast approximate method
# without the iterated power method
_, sa, _ = randomized_svd(X, k, n_iter=0)
# the approximation does not tolerate the noise:
assert_greater(np.abs(s[:k] - sa).max(), 0.1)
# compute the singular values of X using the fast approximate method with
# iterated power method
_, sap, _ = randomized_svd(X, k, n_iter=5)
# the iterated power method is still managing to get most of the structure
# at the requested rank
assert_almost_equal(s[:k], sap, decimal=3)
def test_randomized_svd_transpose_consistency():
# Check that transposing the design matrix has limit impact
n_samples = 100
n_features = 500
rank = 4
k = 10
X = make_low_rank_matrix(n_samples=n_samples, n_features=n_features,
effective_rank=rank, tail_strength=0.5,
random_state=0)
assert_equal(X.shape, (n_samples, n_features))
U1, s1, V1 = randomized_svd(X, k, n_iter=3, transpose=False,
random_state=0)
U2, s2, V2 = randomized_svd(X, k, n_iter=3, transpose=True,
random_state=0)
U3, s3, V3 = randomized_svd(X, k, n_iter=3, transpose='auto',
random_state=0)
U4, s4, V4 = linalg.svd(X, full_matrices=False)
assert_almost_equal(s1, s4[:k], decimal=3)
assert_almost_equal(s2, s4[:k], decimal=3)
assert_almost_equal(s3, s4[:k], decimal=3)
assert_almost_equal(np.dot(U1, V1), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
assert_almost_equal(np.dot(U2, V2), np.dot(U4[:, :k], V4[:k, :]),
decimal=2)
# in this case 'auto' is equivalent to transpose
assert_almost_equal(s2, s3)
def test_svd_flip():
# Check that svd_flip works in both situations, and reconstructs input.
rs = np.random.RandomState(1999)
n_samples = 20
n_features = 10
X = rs.randn(n_samples, n_features)
# Check matrix reconstruction
U, S, V = linalg.svd(X, full_matrices=False)
U1, V1 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U1 * S, V1), X, decimal=6)
# Check transposed matrix reconstruction
XT = X.T
U, S, V = linalg.svd(XT, full_matrices=False)
U2, V2 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U2 * S, V2), XT, decimal=6)
# Check that different flip methods are equivalent under reconstruction
U_flip1, V_flip1 = svd_flip(U, V, u_based_decision=True)
assert_almost_equal(np.dot(U_flip1 * S, V_flip1), XT, decimal=6)
U_flip2, V_flip2 = svd_flip(U, V, u_based_decision=False)
assert_almost_equal(np.dot(U_flip2 * S, V_flip2), XT, decimal=6)
def test_randomized_svd_sign_flip():
a = np.array([[2.0, 0.0], [0.0, 1.0]])
u1, s1, v1 = randomized_svd(a, 2, flip_sign=True, random_state=41)
for seed in range(10):
u2, s2, v2 = randomized_svd(a, 2, flip_sign=True, random_state=seed)
assert_almost_equal(u1, u2)
assert_almost_equal(v1, v2)
assert_almost_equal(np.dot(u2 * s2, v2), a)
assert_almost_equal(np.dot(u2.T, u2), np.eye(2))
assert_almost_equal(np.dot(v2.T, v2), np.eye(2))
def test_cartesian():
# Check if cartesian product delivers the right results
axes = (np.array([1, 2, 3]), np.array([4, 5]), np.array([6, 7]))
true_out = np.array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
out = cartesian(axes)
assert_array_equal(true_out, out)
# check single axis
x = np.arange(3)
assert_array_equal(x[:, np.newaxis], cartesian((x,)))
def test_logistic_sigmoid():
# Check correctness and robustness of logistic sigmoid implementation
naive_logistic = lambda x: 1 / (1 + np.exp(-x))
naive_log_logistic = lambda x: np.log(naive_logistic(x))
x = np.linspace(-2, 2, 50)
assert_array_almost_equal(log_logistic(x), naive_log_logistic(x))
extreme_x = np.array([-100., 100.])
assert_array_almost_equal(log_logistic(extreme_x), [-100, 0])
def test_fast_dot():
# Check fast dot blas wrapper function
if fast_dot is np.dot:
return
rng = np.random.RandomState(42)
A = rng.random_sample([2, 10])
B = rng.random_sample([2, 10])
try:
linalg.get_blas_funcs(['gemm'])[0]
has_blas = True
except (AttributeError, ValueError):
has_blas = False
if has_blas:
# Test _fast_dot for invalid input.
# Maltyped data.
for dt1, dt2 in [['f8', 'f4'], ['i4', 'i4']]:
assert_raises(ValueError, _fast_dot, A.astype(dt1),
B.astype(dt2).T)
# Malformed data.
## ndim == 0
E = np.empty(0)
assert_raises(ValueError, _fast_dot, E, E)
## ndim == 1
assert_raises(ValueError, _fast_dot, A, A[0])
## ndim > 2
assert_raises(ValueError, _fast_dot, A.T, np.array([A, A]))
## min(shape) == 1
assert_raises(ValueError, _fast_dot, A, A[0, :][None, :])
# test for matrix mismatch error
assert_raises(ValueError, _fast_dot, A, A)
# Test cov-like use case + dtypes.
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
# col < row
C = np.dot(A.T, A)
C_ = fast_dot(A.T, A)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A, B.T)
C_ = fast_dot(A, B.T)
assert_almost_equal(C, C_, decimal=5)
# Test square matrix * rectangular use case.
A = rng.random_sample([2, 2])
for dtype in ['f8', 'f4']:
A = A.astype(dtype)
B = B.astype(dtype)
C = np.dot(A, B)
C_ = fast_dot(A, B)
assert_almost_equal(C, C_, decimal=5)
C = np.dot(A.T, B)
C_ = fast_dot(A.T, B)
assert_almost_equal(C, C_, decimal=5)
if has_blas:
for x in [np.array([[d] * 10] * 2) for d in [np.inf, np.nan]]:
assert_raises(ValueError, _fast_dot, x, x.T)
def test_incremental_variance_update_formulas():
# Test Youngs and Cramer incremental variance formulas.
# Doggie data from http://www.mathsisfun.com/data/standard-deviation.html
A = np.array([[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300],
[600, 470, 170, 430, 300]]).T
idx = 2
X1 = A[:idx, :]
X2 = A[idx:, :]
old_means = X1.mean(axis=0)
old_variances = X1.var(axis=0)
old_sample_count = X1.shape[0]
final_means, final_variances, final_count = _batch_mean_variance_update(
X2, old_means, old_variances, old_sample_count)
assert_almost_equal(final_means, A.mean(axis=0), 6)
assert_almost_equal(final_variances, A.var(axis=0), 6)
assert_almost_equal(final_count, A.shape[0])
def test_incremental_variance_ddof():
# Test that degrees of freedom parameter for calculations are correct.
rng = np.random.RandomState(1999)
X = rng.randn(50, 10)
n_samples, n_features = X.shape
for batch_size in [11, 20, 37]:
steps = np.arange(0, X.shape[0], batch_size)
if steps[-1] != X.shape[0]:
steps = np.hstack([steps, n_samples])
for i, j in zip(steps[:-1], steps[1:]):
batch = X[i:j, :]
if i == 0:
incremental_means = batch.mean(axis=0)
incremental_variances = batch.var(axis=0)
# Assign this twice so that the test logic is consistent
incremental_count = batch.shape[0]
sample_count = batch.shape[0]
else:
result = _batch_mean_variance_update(
batch, incremental_means, incremental_variances,
sample_count)
(incremental_means, incremental_variances,
incremental_count) = result
sample_count += batch.shape[0]
calculated_means = np.mean(X[:j], axis=0)
calculated_variances = np.var(X[:j], axis=0)
assert_almost_equal(incremental_means, calculated_means, 6)
assert_almost_equal(incremental_variances,
calculated_variances, 6)
assert_equal(incremental_count, sample_count)
def test_vector_sign_flip():
# Testing that sign flip is working & largest value has positive sign
data = np.random.RandomState(36).randn(5, 5)
max_abs_rows = np.argmax(np.abs(data), axis=1)
data_flipped = _deterministic_vector_sign_flip(data)
max_rows = np.argmax(data_flipped, axis=1)
assert_array_equal(max_abs_rows, max_rows)
signs = np.sign(data[range(data.shape[0]), max_abs_rows])
assert_array_equal(data, data_flipped * signs[:, np.newaxis])
|
bsd-3-clause
|
aabadie/scikit-learn
|
sklearn/ensemble/__init__.py
|
153
|
1382
|
"""
The :mod:`sklearn.ensemble` module includes ensemble-based methods for
classification, regression and anomaly detection.
"""
from .base import BaseEnsemble
from .forest import RandomForestClassifier
from .forest import RandomForestRegressor
from .forest import RandomTreesEmbedding
from .forest import ExtraTreesClassifier
from .forest import ExtraTreesRegressor
from .bagging import BaggingClassifier
from .bagging import BaggingRegressor
from .iforest import IsolationForest
from .weight_boosting import AdaBoostClassifier
from .weight_boosting import AdaBoostRegressor
from .gradient_boosting import GradientBoostingClassifier
from .gradient_boosting import GradientBoostingRegressor
from .voting_classifier import VotingClassifier
from . import bagging
from . import forest
from . import weight_boosting
from . import gradient_boosting
from . import partial_dependence
__all__ = ["BaseEnsemble",
"RandomForestClassifier", "RandomForestRegressor",
"RandomTreesEmbedding", "ExtraTreesClassifier",
"ExtraTreesRegressor", "BaggingClassifier",
"BaggingRegressor", "IsolationForest", "GradientBoostingClassifier",
"GradientBoostingRegressor", "AdaBoostClassifier",
"AdaBoostRegressor", "VotingClassifier",
"bagging", "forest", "gradient_boosting",
"partial_dependence", "weight_boosting"]
|
bsd-3-clause
|
elkingtonmcb/scikit-learn
|
sklearn/manifold/tests/test_locally_linear.py
|
232
|
4761
|
from itertools import product
from nose.tools import assert_true
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from scipy import linalg
from sklearn import neighbors, manifold
from sklearn.manifold.locally_linear import barycenter_kneighbors_graph
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import ignore_warnings
eigen_solvers = ['dense', 'arpack']
#----------------------------------------------------------------------
# Test utility routines
def test_barycenter_kneighbors_graph():
X = np.array([[0, 1], [1.01, 1.], [2, 0]])
A = barycenter_kneighbors_graph(X, 1)
assert_array_almost_equal(
A.toarray(),
[[0., 1., 0.],
[1., 0., 0.],
[0., 1., 0.]])
A = barycenter_kneighbors_graph(X, 2)
# check that columns sum to one
assert_array_almost_equal(np.sum(A.toarray(), 1), np.ones(3))
pred = np.dot(A.toarray(), X)
assert_less(linalg.norm(pred - X) / X.shape[0], 1)
#----------------------------------------------------------------------
# Test LLE by computing the reconstruction error on some manifolds.
def test_lle_simple_grid():
# note: ARPACK is numerically unstable, so this test will fail for
# some random seeds. We choose 2 because the tests pass.
rng = np.random.RandomState(2)
tol = 0.1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(5), repeat=2)))
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
clf = manifold.LocallyLinearEmbedding(n_neighbors=5,
n_components=n_components,
random_state=rng)
tol = 0.1
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X, 'fro')
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
assert_less(reconstruction_error, tol)
assert_almost_equal(clf.reconstruction_error_,
reconstruction_error, decimal=1)
# re-embed a noisy version of X using the transform method
noise = rng.randn(*X.shape) / 100
X_reembedded = clf.transform(X + noise)
assert_less(linalg.norm(X_reembedded - clf.embedding_), tol)
def test_lle_manifold():
rng = np.random.RandomState(0)
# similar test on a slightly more complex manifold
X = np.array(list(product(np.arange(18), repeat=2)))
X = np.c_[X, X[:, 0] ** 2 / 18]
X = X + 1e-10 * rng.uniform(size=X.shape)
n_components = 2
for method in ["standard", "hessian", "modified", "ltsa"]:
clf = manifold.LocallyLinearEmbedding(n_neighbors=6,
n_components=n_components,
method=method, random_state=0)
tol = 1.5 if method == "standard" else 3
N = barycenter_kneighbors_graph(X, clf.n_neighbors).toarray()
reconstruction_error = linalg.norm(np.dot(N, X) - X)
assert_less(reconstruction_error, tol)
for solver in eigen_solvers:
clf.set_params(eigen_solver=solver)
clf.fit(X)
assert_true(clf.embedding_.shape[1] == n_components)
reconstruction_error = linalg.norm(
np.dot(N, clf.embedding_) - clf.embedding_, 'fro') ** 2
details = ("solver: %s, method: %s" % (solver, method))
assert_less(reconstruction_error, tol, msg=details)
assert_less(np.abs(clf.reconstruction_error_ -
reconstruction_error),
tol * reconstruction_error, msg=details)
def test_pipeline():
# check that LocallyLinearEmbedding works fine as a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
from sklearn import pipeline, datasets
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('filter', manifold.LocallyLinearEmbedding(random_state=0)),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
# Test the error raised when the weight matrix is singular
def test_singular_matrix():
from nose.tools import assert_raises
M = np.ones((10, 3))
f = ignore_warnings
assert_raises(ValueError, f(manifold.locally_linear_embedding),
M, 2, 1, method='standard', eigen_solver='arpack')
|
bsd-3-clause
|
dhruvparamhans/zipline
|
tests/history_cases.py
|
2
|
21207
|
"""
Test case definitions for history tests.
"""
import pandas as pd
import numpy as np
from zipline.finance.trading import TradingEnvironment
from zipline.history.history import HistorySpec
from zipline.protocol import BarData
from zipline.utils.test_utils import to_utc
def mixed_frequency_expected_index(count, frequency):
"""
Helper for enumerating expected indices for test_mixed_frequency.
"""
env = TradingEnvironment.instance()
minute = MIXED_FREQUENCY_MINUTES[count]
if frequency == '1d':
return [env.previous_open_and_close(minute)[1], minute]
elif frequency == '1m':
return [env.previous_market_minute(minute), minute]
def mixed_frequency_expected_data(count, frequency):
"""
Helper for enumerating expected data test_mixed_frequency.
"""
if frequency == '1d':
# First day of this test is July 3rd, which is a half day.
if count < 210:
return [np.nan, count]
else:
return [209, count]
elif frequency == '1m':
if count == 0:
return [np.nan, count]
else:
return [count - 1, count]
MIXED_FREQUENCY_MINUTES = TradingEnvironment.instance().market_minute_window(
to_utc('2013-07-03 9:31AM'), 600,
)
ONE_MINUTE_PRICE_ONLY_SPECS = [
HistorySpec(1, '1m', 'price', True),
]
DAILY_OPEN_CLOSE_SPECS = [
HistorySpec(3, '1d', 'open_price', False),
HistorySpec(3, '1d', 'close_price', False),
]
ILLIQUID_PRICES_SPECS = [
HistorySpec(3, '1m', 'price', False),
HistorySpec(5, '1m', 'price', True),
]
MIXED_FREQUENCY_SPECS = [
HistorySpec(1, '1m', 'price', False),
HistorySpec(2, '1m', 'price', False),
HistorySpec(2, '1d', 'price', False),
]
MIXED_FIELDS_SPECS = [
HistorySpec(3, '1m', 'price', True),
HistorySpec(3, '1m', 'open_price', True),
HistorySpec(3, '1m', 'close_price', True),
HistorySpec(3, '1m', 'high', True),
HistorySpec(3, '1m', 'low', True),
HistorySpec(3, '1m', 'volume', True),
]
HISTORY_CONTAINER_TEST_CASES = {
# June 2013
# Su Mo Tu We Th Fr Sa
# 1
# 2 3 4 5 6 7 8
# 9 10 11 12 13 14 15
# 16 17 18 19 20 21 22
# 23 24 25 26 27 28 29
# 30
'test one minute price only': {
# A list of HistorySpec objects.
'specs': ONE_MINUTE_PRICE_ONLY_SPECS,
# Sids for the test.
'sids': [1],
# Start date for test.
'dt': to_utc('2013-06-21 9:31AM'),
# Sequency of updates to the container
'updates': [
BarData(
{
1: {
'price': 5,
'dt': to_utc('2013-06-21 9:31AM'),
},
},
),
BarData(
{
1: {
'price': 6,
'dt': to_utc('2013-06-21 9:32AM'),
},
},
),
],
# Expected results
'expected': {
ONE_MINUTE_PRICE_ONLY_SPECS[0].key_str: [
pd.DataFrame(
data={
1: [5],
},
index=[
to_utc('2013-06-21 9:31AM'),
],
),
pd.DataFrame(
data={
1: [6],
},
index=[
to_utc('2013-06-21 9:32AM'),
],
),
],
},
},
'test daily open close': {
# A list of HistorySpec objects.
'specs': DAILY_OPEN_CLOSE_SPECS,
# Sids for the test.
'sids': [1],
# Start date for test.
'dt': to_utc('2013-06-21 9:31AM'),
# Sequence of updates to the container
'updates': [
BarData(
{
1: {
'open_price': 10,
'close_price': 11,
'dt': to_utc('2013-06-21 10:00AM'),
},
},
),
BarData(
{
1: {
'open_price': 12,
'close_price': 13,
'dt': to_utc('2013-06-21 3:30PM'),
},
},
),
BarData(
{
1: {
'open_price': 14,
'close_price': 15,
# Wait a full market day before the next bar.
# We should end up with nans for Monday the 24th.
'dt': to_utc('2013-06-25 9:31AM'),
},
},
),
],
# Dictionary mapping spec_key -> list of expected outputs
'expected': {
# open
DAILY_OPEN_CLOSE_SPECS[0].key_str: [
pd.DataFrame(
data={
1: [np.nan, np.nan, 10]
},
index=[
to_utc('2013-06-19 4:00PM'),
to_utc('2013-06-20 4:00PM'),
to_utc('2013-06-21 10:00AM'),
],
),
pd.DataFrame(
data={
1: [np.nan, np.nan, 10]
},
index=[
to_utc('2013-06-19 4:00PM'),
to_utc('2013-06-20 4:00PM'),
to_utc('2013-06-21 3:30PM'),
],
),
pd.DataFrame(
data={
1: [10, np.nan, 14]
},
index=[
to_utc('2013-06-21 4:00PM'),
to_utc('2013-06-24 4:00PM'),
to_utc('2013-06-25 9:31AM'),
],
),
],
# close
DAILY_OPEN_CLOSE_SPECS[1].key_str: [
pd.DataFrame(
data={
1: [np.nan, np.nan, 11]
},
index=[
to_utc('2013-06-19 4:00PM'),
to_utc('2013-06-20 4:00PM'),
to_utc('2013-06-21 10:00AM'),
],
),
pd.DataFrame(
data={
1: [np.nan, np.nan, 13]
},
index=[
to_utc('2013-06-19 4:00PM'),
to_utc('2013-06-20 4:00PM'),
to_utc('2013-06-21 3:30PM'),
],
),
pd.DataFrame(
data={
1: [13, np.nan, 15]
},
index=[
to_utc('2013-06-21 4:00PM'),
to_utc('2013-06-24 4:00PM'),
to_utc('2013-06-25 9:31AM'),
],
),
],
},
},
'test illiquid prices': {
# A list of HistorySpec objects.
'specs': ILLIQUID_PRICES_SPECS,
# Sids for the test.
'sids': [1],
# Start date for test.
'dt': to_utc('2013-06-28 9:31AM'),
# Sequence of updates to the container
'updates': [
BarData(
{
1: {
'price': 10,
'dt': to_utc('2013-06-28 9:31AM'),
},
},
),
BarData(
{
1: {
'price': 11,
'dt': to_utc('2013-06-28 9:32AM'),
},
},
),
BarData(
{
1: {
'price': 12,
'dt': to_utc('2013-06-28 9:33AM'),
},
},
),
BarData(
{
1: {
'price': 13,
# Note: Skipping 9:34 to simulate illiquid bar/missing
# data.
'dt': to_utc('2013-06-28 9:35AM'),
},
},
),
],
# Dictionary mapping spec_key -> list of expected outputs
'expected': {
ILLIQUID_PRICES_SPECS[0].key_str: [
pd.DataFrame(
data={
1: [np.nan, np.nan, 10],
},
index=[
to_utc('2013-06-27 3:59PM'),
to_utc('2013-06-27 4:00PM'),
to_utc('2013-06-28 9:31AM'),
],
),
pd.DataFrame(
data={
1: [np.nan, 10, 11],
},
index=[
to_utc('2013-06-27 4:00PM'),
to_utc('2013-06-28 9:31AM'),
to_utc('2013-06-28 9:32AM'),
],
),
pd.DataFrame(
data={
1: [10, 11, 12],
},
index=[
to_utc('2013-06-28 9:31AM'),
to_utc('2013-06-28 9:32AM'),
to_utc('2013-06-28 9:33AM'),
],
),
# Since there's no update for 9:34, this is called at 9:35.
pd.DataFrame(
data={
1: [12, np.nan, 13],
},
index=[
to_utc('2013-06-28 9:33AM'),
to_utc('2013-06-28 9:34AM'),
to_utc('2013-06-28 9:35AM'),
],
),
],
ILLIQUID_PRICES_SPECS[1].key_str: [
pd.DataFrame(
data={
1: [np.nan, np.nan, np.nan, np.nan, 10],
},
index=[
to_utc('2013-06-27 3:57PM'),
to_utc('2013-06-27 3:58PM'),
to_utc('2013-06-27 3:59PM'),
to_utc('2013-06-27 4:00PM'),
to_utc('2013-06-28 9:31AM'),
],
),
pd.DataFrame(
data={
1: [np.nan, np.nan, np.nan, 10, 11],
},
index=[
to_utc('2013-06-27 3:58PM'),
to_utc('2013-06-27 3:59PM'),
to_utc('2013-06-27 4:00PM'),
to_utc('2013-06-28 9:31AM'),
to_utc('2013-06-28 9:32AM'),
],
),
pd.DataFrame(
data={
1: [np.nan, np.nan, 10, 11, 12],
},
index=[
to_utc('2013-06-27 3:59PM'),
to_utc('2013-06-27 4:00PM'),
to_utc('2013-06-28 9:31AM'),
to_utc('2013-06-28 9:32AM'),
to_utc('2013-06-28 9:33AM'),
],
),
# Since there's no update for 9:34, this is called at 9:35.
# The 12 value from 9:33 should be forward-filled.
pd.DataFrame(
data={
1: [10, 11, 12, 12, 13],
},
index=[
to_utc('2013-06-28 9:31AM'),
to_utc('2013-06-28 9:32AM'),
to_utc('2013-06-28 9:33AM'),
to_utc('2013-06-28 9:34AM'),
to_utc('2013-06-28 9:35AM'),
],
),
],
},
},
'test mixed frequencies': {
# A list of HistorySpec objects.
'specs': MIXED_FREQUENCY_SPECS,
# Sids for the test.
'sids': [1],
# Start date for test.
# July 2013
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6
# 7 8 9 10 11 12 13
# 14 15 16 17 18 19 20
# 21 22 23 24 25 26 27
# 28 29 30 31
'dt': to_utc('2013-07-03 9:31AM'),
# Sequence of updates to the container
'updates': [
BarData(
{
1: {
'price': count,
'dt': dt,
}
}
)
for count, dt in enumerate(MIXED_FREQUENCY_MINUTES)
],
# Dictionary mapping spec_key -> list of expected outputs.
'expected': {
MIXED_FREQUENCY_SPECS[0].key_str: [
pd.DataFrame(
data={
1: [count],
},
index=[minute],
)
for count, minute in enumerate(MIXED_FREQUENCY_MINUTES)
],
MIXED_FREQUENCY_SPECS[1].key_str: [
pd.DataFrame(
data={
1: mixed_frequency_expected_data(count, '1m'),
},
index=mixed_frequency_expected_index(count, '1m'),
)
for count in range(len(MIXED_FREQUENCY_MINUTES))
],
MIXED_FREQUENCY_SPECS[2].key_str: [
pd.DataFrame(
data={
1: mixed_frequency_expected_data(count, '1d'),
},
index=mixed_frequency_expected_index(count, '1d'),
)
for count in range(len(MIXED_FREQUENCY_MINUTES))
]
},
},
'test multiple fields and sids': {
# A list of HistorySpec objects.
'specs': MIXED_FIELDS_SPECS,
# Sids for the test.
'sids': [1, 10],
# Start date for test.
'dt': to_utc('2013-06-28 9:31AM'),
# Sequence of updates to the container
'updates': [
BarData(
{
1: {
'dt': dt,
'price': count,
'open_price': count,
'close_price': count,
'high': count,
'low': count,
'volume': count,
},
10: {
'dt': dt,
'price': count * 10,
'open_price': count * 10,
'close_price': count * 10,
'high': count * 10,
'low': count * 10,
'volume': count * 10,
},
},
)
for count, dt in enumerate([
to_utc('2013-06-28 9:31AM'),
to_utc('2013-06-28 9:32AM'),
to_utc('2013-06-28 9:33AM'),
# NOTE: No update for 9:34
to_utc('2013-06-28 9:35AM'),
])
],
# Dictionary mapping spec_key -> list of expected outputs
'expected': dict(
# Build a dict from a list of tuples. Doing it this way because
# there are two distinct cases we want to test: forward-fillable
# fields and non-forward-fillable fields.
[
(
# Non forward-fill fields
key,
[
pd.DataFrame(
data={
1: [np.nan, np.nan, 0],
10: [np.nan, np.nan, 0],
},
index=[
to_utc('2013-06-27 3:59PM'),
to_utc('2013-06-27 4:00PM'),
to_utc('2013-06-28 9:31AM'),
],
# Missing volume data should manifest as 0's rather
# than nans.
).fillna(0 if 'volume' in key else np.nan),
pd.DataFrame(
data={
1: [np.nan, 0, 1],
10: [np.nan, 0, 10],
},
index=[
to_utc('2013-06-27 4:00PM'),
to_utc('2013-06-28 9:31AM'),
to_utc('2013-06-28 9:32AM'),
],
).fillna(0 if 'volume' in key else np.nan),
pd.DataFrame(
data={
1: [0, 1, 2],
10: [0, 10, 20],
},
index=[
to_utc('2013-06-28 9:31AM'),
to_utc('2013-06-28 9:32AM'),
to_utc('2013-06-28 9:33AM'),
],
# Note: Calling fillna() here even though there are
# no NaNs because this makes it less likely
# for us to introduce a stupid bug by
# copy/pasting in the future.
).fillna(0 if 'volume' in key else np.nan),
pd.DataFrame(
data={
1: [2, np.nan, 3],
10: [20, np.nan, 30],
},
index=[
to_utc('2013-06-28 9:33AM'),
to_utc('2013-06-28 9:34AM'),
to_utc('2013-06-28 9:35AM'),
],
).fillna(0 if 'volume' in key else np.nan),
],
)
for key in [spec.key_str for spec in MIXED_FIELDS_SPECS
if spec.field not in HistorySpec.FORWARD_FILLABLE]
]
+ # Concatenate the expected results for non-ffillable with
# expected result for ffillable.
[
(
# Forward-fillable fields
key,
[
pd.DataFrame(
data={
1: [np.nan, np.nan, 0],
10: [np.nan, np.nan, 0],
},
index=[
to_utc('2013-06-27 3:59PM'),
to_utc('2013-06-27 4:00PM'),
to_utc('2013-06-28 9:31AM'),
],
),
pd.DataFrame(
data={
1: [np.nan, 0, 1],
10: [np.nan, 0, 10],
},
index=[
to_utc('2013-06-27 4:00PM'),
to_utc('2013-06-28 9:31AM'),
to_utc('2013-06-28 9:32AM'),
],
),
pd.DataFrame(
data={
1: [0, 1, 2],
10: [0, 10, 20],
},
index=[
to_utc('2013-06-28 9:31AM'),
to_utc('2013-06-28 9:32AM'),
to_utc('2013-06-28 9:33AM'),
],
),
pd.DataFrame(
data={
1: [2, 2, 3],
10: [20, 20, 30],
},
index=[
to_utc('2013-06-28 9:33AM'),
to_utc('2013-06-28 9:34AM'),
to_utc('2013-06-28 9:35AM'),
],
),
],
)
for key in [spec.key_str for spec in MIXED_FIELDS_SPECS
if spec.field in HistorySpec.FORWARD_FILLABLE]
]
),
},
}
|
apache-2.0
|
krasch/smart-assistants
|
examples/conflict_uncertainty.py
|
1
|
2452
|
# -*- coding: UTF-8 -*-
"""
Scatter conflict versus uncertainty at different cutoffs to find regions of uncertainty/conflict where the algorithm
is more/less successful. Further details for this experiment can be found in the paper in Section 6.6 and the
dissertation in Section 5.5.7.
"""
import sys
sys.path.append("..")
import pandas
from recsys.classifiers.temporal import TemporalEvidencesClassifier
from recsys.dataset import load_dataset
from evaluation.metrics import results_as_dataframe
from evaluation import plot
import config
#configuration
data = load_dataset("../datasets/houseA.csv", "../datasets/houseA.config")
#run the classifier on the whole dataset
cls = TemporalEvidencesClassifier(data.features, data.target_names)
cls = cls.fit(data.data, data.target)
results = cls.predict(data.data, include_conflict_theta=True)
#extract conflict and uncertainty and convert recommendations to pandas representation
recommendations, conflict, uncertainty = zip(*results)
results = results_as_dataframe(data.target, list(recommendations))
#for each row, mark correct recommendations with "1", false recommendations with "0"
find_matches_in_row = lambda row: [1 if col == row.name else 0 for col in row]
results = results.apply(find_matches_in_row, axis=1)
#set uncertainty and conflict as multi-index
results.index = pandas.MultiIndex.from_tuples(zip(conflict, uncertainty),
names=["Conflict", "Uncertainty"])
#found_within: the correct service was found within X recommendations
#-> apply cumulative sum on each row so that the "1" marker is set for all columns after it first appears
found_within = results.cumsum(axis=1)
#create one plot for each cutoff
conf = plot.plot_config(config.plot_directory, sub_dirs=[data.name, "conflict-uncertainty"],
prefix="found_within_", img_type=config.img_type)
plot.conflict_uncertainty_scatter(found_within, conf)
#not found withing: the correct service was not found within X recommendations, is the reverse of found_within
not_found_within = found_within.apply(lambda col: 1-col)
#create one plot for each cutoff
conf = plot.plot_config(config.plot_directory, sub_dirs=[data.name, "conflict-uncertainty"],
prefix="not_found_within_", img_type=config.img_type)
plot.conflict_uncertainty_scatter(not_found_within, conf)
print "Results can be found in the \"%s\" directory" % config.plot_directory
|
mit
|
AhmedCh/bus-arrival-forcast
|
bus-arrival-forcast/utilities.py
|
1
|
9324
|
import configparser
import os
import sys
import datetime
import pandas as pd
from bokeh.io import output_notebook
from bokeh.models import (
GMapPlot, GMapOptions, ColumnDataSource, Circle, DataRange1d, PanTool, WheelZoomTool, BoxSelectTool)
from geopy.distance import great_circle
def get_google_maps_plot(route_lat=[], route_lon=[], gps_lat=[], gps_lon=[]):
config = configparser.ConfigParser()
config.read('config.ini')
google_api_key = config['DEFAULT']['GoogleMapsApiKey']
map_options = GMapOptions(lat=37.751, lng=-122.42, map_type="roadmap", zoom=13)
plot = GMapPlot(
x_range=DataRange1d(), y_range=DataRange1d(), map_options=map_options,
)
plot.title.text = "San Francisco"
plot.api_key = google_api_key
source_route = ColumnDataSource(
data=dict(
lat=route_lat,
lon=route_lon,
)
)
source_trip = ColumnDataSource(
data=dict(
lat=gps_lat,
lon=gps_lon,
)
)
blue_circle_route = Circle(x="lon", y="lat", size=5, fill_color="blue", fill_alpha=0.8, line_color=None)
red_circle_trip = Circle(x="lon", y="lat", size=5, fill_color="red", fill_alpha=0.8, line_color=None)
plot.add_glyph(source_route, blue_circle_route)
plot.add_glyph(source_trip, red_circle_trip)
plot.add_tools(PanTool(), WheelZoomTool(), BoxSelectTool())
output_notebook(hide_banner=True)
return plot
def distances_between_consecutive_lat_lon_points(lat_long):
distances = [great_circle(t, s).meters for s, t in zip(lat_long, lat_long[1:])]
return distances
def get_distance_between_geo_locations(pt1, pt2):
distance = great_circle(pt1, pt2)
return distance.meters
def get_point_nearest_to(list_of_lat_long_coordinates, lat_long):
smallest_dist = sys.maxsize
nearest_point = None
for i in list_of_lat_long_coordinates:
distance_from_stop1 = get_distance_between_geo_locations(tuple((i[0], i[1])), lat_long)
if distance_from_stop1 < smallest_dist:
smallest_dist = distance_from_stop1
nearest_point = i
return nearest_point, smallest_dist
def get_stop_nearest_to(stops_df, lat_long):
smallest_dist = sys.maxsize
nearest_point = None
for i, row in stops_df.iterrows():
distance_from_stop1 = get_distance_between_geo_locations(tuple((row['stop_lat'], row['stop_lon'])), lat_long)
if distance_from_stop1 < smallest_dist:
smallest_dist = distance_from_stop1
nearest_point = row
return nearest_point, smallest_dist
def add_nearest_stop(row, list_of_stops):
lat = row['LATITUDE']
lon = row['LONGITUDE']
point, distance = get_stop_nearest_to(list_of_stops, (lat, lon))
row['Nearest_Stop_Distance'] = distance
row['Nearest_Stop_Coordinates'] = point['stop_lat'], point['stop_lon']
row['Nearest_Stop_Id'] = point['stop_id']
row['Nearest_Stop_seq_no'] = point['stop_sequence']
return row
class GtfsStore(object):
def __init__(self, dir_path):
self.dir_path = dir_path
self.routes_df = None
self.trips_df = None
self.stop_times_df = None
self.stops_df = None
self.shapes_df = None
def get_data(self):
return self.routes_df, self.shapes_df, self.stop_times_df, self.stops_df, self.trips_df
def init(self):
routes_file_path = os.path.join(self.dir_path, 'routes.txt')
shapes_file_path = os.path.join(self.dir_path, 'shapes.txt')
trips_file_path = os.path.join(self.dir_path, 'trips.txt')
stop_times_file_path = os.path.join(self.dir_path, 'stop_times.txt')
stops_file_path = os.path.join(self.dir_path, 'stops.txt')
self.routes_df = pd.read_csv(routes_file_path, low_memory=False)
self.trips_df = pd.read_csv(trips_file_path, low_memory=False)
self.stop_times_df = pd.read_csv(stop_times_file_path, low_memory=False,
usecols=['trip_id', 'arrival_time', 'departure_time', 'stop_id',
'stop_sequence'])
self.stops_df = pd.read_csv(stops_file_path, low_memory=False,
usecols=['stop_id', 'stop_name', 'stop_lat', 'stop_lon'])
self.shapes_df = pd.read_csv(shapes_file_path, low_memory=False)
def get_route_coordinates_for_a_trip(self, trip_id):
shape_id = self.trips_df[self.trips_df['trip_id'] == trip_id]['shape_id'].values[0]
return self.get_shape_points_for(shape_id)
def get_shape_points_for(self, shape_id):
shape_points = self.shapes_df[self.shapes_df['shape_id'] == shape_id]
route_lat = shape_points['shape_pt_lat'].tolist()
route_lon = shape_points['shape_pt_lon'].tolist()
return route_lat, route_lon
def get_route_id_for(self, route_short_name=None):
return self.routes_df[self.routes_df['route_short_name'] == route_short_name]['route_id'].values[0]
def get_stops_for_trip(self, trip_id):
stops_for_route = self.stop_times_df[self.stop_times_df['trip_id'] == trip_id]
stops_for_route = stops_for_route.merge(self.stops_df, left_on='stop_id', right_on='stop_id')
return stops_for_route
def get_stops_lat_lon_for_trip(self, trip_id):
stops = self.get_stops_for_trip(trip_id)
stop_lat_lon = stops[['stop_lat', 'stop_lon']]
stop_lat_lon = [tuple(x) for x in stop_lat_lon.values]
return stop_lat_lon
def get_distances_between_stops_for_trip(self, trip_id):
stop_lat_long = self.get_stops_lat_lon_for_trip(trip_id)
distances = distances_between_consecutive_lat_lon_points(stop_lat_long)
return distances
def get_coordinates_for_stop(self, stop_id):
var = self.stops_df.loc[self.stops_df['stop_id'] == stop_id, ['stop_lat', 'stop_lon']]
return tuple((var.iloc[0]['stop_lat'], var.iloc[0]['stop_lon']))
def get_nearest_point_to_stop(self, stop_id, gps_points):
stop_coordinates = self.get_coordinates_for_stop(stop_id)
return get_point_nearest_to(gps_points, stop_coordinates)
class RawAVlDataStore(object):
def __init__(self, file_path):
self.file_path = file_path
self.avl_df = None
def get_data(self):
return self.avl_df
def init(self):
self.avl_df = pd.read_csv(self.file_path, low_memory=False)
self.avl_df.dropna(subset=['TRAIN_ASSIGNMENT'], inplace=True)
# change order of lattitude and longitude
cols = ['REV', 'REPORT_TIME', 'VEHICLE_TAG', 'SPEED', 'HEADING', 'TRAIN_ASSIGNMENT', 'PREDICTABLE', 'LATITUDE',
'LONGITUDE']
self.avl_df = self.avl_df[cols]
def get_sample_trace_for(self, trip_assignment=None, num_points=10):
df = self.avl_df[self.avl_df['TRAIN_ASSIGNMENT'] == trip_assignment]
df = df.head(num_points)
gps_lat = df['LATITUDE'].tolist()
gps_lon = df['LONGITUDE'].tolist()
gps_report_time = df['REPORT_TIME'].tolist()
return gps_lat, gps_lon, gps_report_time
class DataProcessor(object):
def __init__(self, gtfs_store, avl_store):
self.avl_store = avl_store
self.gtfs_store = gtfs_store
def get_actual_travel_time_between(self, stop1_id, stop2_id, gps_points):
closest_to_stop1, dist1 = self.gtfs_store.get_nearest_point_to_stop(stop1_id, gps_points)
closest_to_stop2, dist2 = self.gtfs_store.get_nearest_point_to_stop(stop2_id, gps_points)
time_stamp1 = datetime.datetime.strptime(closest_to_stop1[2], "%m/%d/%Y %H:%M:%S")
time_stamp2 = datetime.datetime.strptime(closest_to_stop2[2], "%m/%d/%Y %H:%M:%S")
travel_time = time_stamp1 - time_stamp2
return closest_to_stop1, closest_to_stop2, travel_time
def add_nearest_stop_data_and_write_to_file(self):
routes_df, shapes_df, stop_times_df, stops_df, trips_df = self.gtfs_store.get_data()
avl_df = self.avl_store.get_data()
route_id_for_bus14 = self.gtfs_store.get_route_id_for(route_short_name='14')
# Filter GPS data for blocks corresponding to route we are interested in
# todo remove service_id ==1 to include trips for other service ids
bus14_trips_df = trips_df[(trips_df['route_id'] == route_id_for_bus14) & (trips_df['service_id'] == 1)]
blocks_for_route14 = bus14_trips_df.sort_values(by='block_id')['block_id'].unique()
blocks_for_bus14_as_str = list(map(str, blocks_for_route14))
mask = avl_df['TRAIN_ASSIGNMENT'].isin(blocks_for_bus14_as_str)
gps_data_for_route_df = avl_df[mask]
cols = ['REPORT_TIME', 'TRAIN_ASSIGNMENT', 'LATITUDE', 'LONGITUDE']
gps_data_for_route_df = gps_data_for_route_df[cols]
# for now hard code the trip to use for getting the stops, later use the trips above to get a list of the stops
stops_for_selected_trip = self.gtfs_store.get_stops_for_trip(7091318)
gps_data_and_stop_info = gps_data_for_route_df.apply(add_nearest_stop, axis=1,
list_of_stops=stops_for_selected_trip)
gps_data_and_stop_info.to_csv('data/gps_data_with_nearest_stop_info.csv', index=False)
return gps_data_and_stop_info
|
mit
|
kushalbhola/MyStuff
|
Practice/PythonApplication/env/Lib/site-packages/pandas/tests/series/test_block_internals.py
|
2
|
1420
|
import pandas as pd
# Segregated collection of methods that require the BlockManager internal data
# structure
class TestSeriesBlockInternals:
def test_setitem_invalidates_datetime_index_freq(self):
# GH#24096 altering a datetime64tz Series inplace invalidates the
# `freq` attribute on the underlying DatetimeIndex
dti = pd.date_range("20130101", periods=3, tz="US/Eastern")
ts = dti[1]
ser = pd.Series(dti)
assert ser._values is not dti
assert ser._values._data.base is not dti._data._data.base
assert dti.freq == "D"
ser.iloc[1] = pd.NaT
assert ser._values.freq is None
# check that the DatetimeIndex was not altered in place
assert ser._values is not dti
assert ser._values._data.base is not dti._data._data.base
assert dti[1] == ts
assert dti.freq == "D"
def test_dt64tz_setitem_does_not_mutate_dti(self):
# GH#21907, GH#24096
dti = pd.date_range("2016-01-01", periods=10, tz="US/Pacific")
ts = dti[0]
ser = pd.Series(dti)
assert ser._values is not dti
assert ser._values._data.base is not dti._data._data.base
assert ser._data.blocks[0].values is not dti
assert ser._data.blocks[0].values._data.base is not dti._data._data.base
ser[::3] = pd.NaT
assert ser[0] is pd.NaT
assert dti[0] == ts
|
apache-2.0
|
lucidfrontier45/scikit-learn
|
doc/datasets/mldata_fixture.py
|
8
|
1191
|
"""Fixture module to skip the datasets loading when offline
Mock urllib2 access to mldata.org
"""
from os import makedirs
from os.path import join
import numpy as np
import tempfile
import shutil
from sklearn import datasets
from sklearn.utils.testing import mock_urllib2
def globs(globs):
# setup mock urllib2 module to avoid downloading from mldata.org
mock_dataset = {
'mnist-original': {
'data': np.empty((70000, 784)),
'label': np.repeat(np.arange(10, dtype='d'), 7000),
},
'iris': {
'data': np.empty((150, 4)),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
}
global custom_data_home
custom_data_home = tempfile.mkdtemp()
makedirs(join(custom_data_home, 'mldata'))
globs['custom_data_home'] = custom_data_home
global _urllib2_ref
_urllib2_ref = datasets.mldata.urllib2
globs['_urllib2_ref'] = _urllib2_ref
datasets.mldata.urllib2 = mock_urllib2(mock_dataset)
return globs
def teardown_module(module):
datasets.mldata.urllib2 = _urllib2_ref
shutil.rmtree(custom_data_home)
|
bsd-3-clause
|
iABC2XYZ/abc
|
DM_Twiss/TwissTrain7.py
|
1
|
2508
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 20 13:37:16 2017
Author: Peiyong Jiang : jiangpeiyong@impcas.ac.cn
Function:
Train one complex objective, not better.
"""
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
plt.close('all')
emitX=1
alphaX=-15.
betaX=1.
gammaX=(1.+alphaX**2)/betaX
sigmaX=np.array([[betaX,-alphaX],[-alphaX,gammaX]])*emitX;
numPart=np.int32(1e5);
X=np.random.multivariate_normal([0.,0.],sigmaX,numPart).T
plt.figure(1)
plt.plot(X[0,:],X[1,:],'.')
##
def WeightP(shape):
initial=tf.truncated_normal(shape,stddev=0.1)
return tf.Variable(initial)
P_1=WeightP([2,2])
xI=tf.placeholder(tf.float32,[2,None])
xO=tf.matmul(P_1,xI)
x2=tf.reduce_mean(xO[0]**2)
xp2=tf.reduce_mean(xO[1]**2)
xxp=tf.reduce_mean(xO[0]*xO[1])
lossX2=(x2-1.)**2
lossXp2=(xp2-1.)**2
lossXxp=xxp**2
rX2=tf.random_normal([1],mean=1.0)
rXp2=tf.random_normal([1],mean=1.0)
rXxp=tf.random_normal([1],mean=1.0)
#lossTotal=rX2[0]*lossX2+rXp2*lossXp2+lossXxp
#lossTotal=(rX2[0]*lossX2+1.)*(rXp2*lossXp2+1.)*(rXxp*lossXxp+1.)
lossTotal=tf.reduce_mean((xO[0]**2*xO[1]**2-1.)**2)
#xCov=tf.red
rateLearn=5e-4
optTotal=tf.train.AdamOptimizer(rateLearn)
trainTotal=optTotal.minimize(lossTotal)
sess = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=True))
sess.run(tf.global_variables_initializer())
sizeBatch=1024
for _ in xrange(8000):
startBatch=np.random.randint(0,high=numPart-sizeBatch-1)
xFeed=X[:,startBatch:startBatch+sizeBatch:]
sess.run(trainTotal,feed_dict={xI:xFeed})
#print(sess.run(LambdaR))
#print('---------------------------')
print(sess.run(lossTotal,feed_dict={xI:X}),_)
print('_______________________________________________')
'''
if ( _ % 100 ==0):
zReal=sess.run(xO,feed_dict={xI:X})
plt.figure(20)
plt.plot(zReal[0,:],zReal[1,:],'r.')
plt.axis('equal')
plt.pause(0.2)
'''
zReal=sess.run(xO,feed_dict={xI:X})
plt.figure(2)
plt.plot(zReal[0,:],zReal[1,:],'r.')
plt.axis('equal')
plt.figure(10)
plt.hold
plt.plot(zReal[0,:],zReal[1,:],'r.')
plt.plot(X[0,:],X[1,:],'b.')
#plt.plot(zReal[0,:],zReal[1,:],'r.')
plt.axis('equal')
plt.figure(11)
plt.hold
#plt.plot(zReal[0,:],zReal[1,:],'r.')
plt.plot(X[0,:],X[1,:],'b.')
plt.plot(zReal[0,:],zReal[1,:],'r.')
plt.axis('equal')
zRealCov=np.cov(zReal)
emitXReal=np.sqrt(np.linalg.det(zRealCov))
print(emitXReal)
|
gpl-3.0
|
bloyl/mne-python
|
tutorials/time-freq/20_sensors_time_frequency.py
|
10
|
8158
|
"""
.. _tut-sensors-time-freq:
============================================
Frequency and time-frequency sensor analysis
============================================
The objective is to show you how to explore the spectral content
of your data (frequency and time-frequency). Here we'll work on Epochs.
We will use this dataset: :ref:`somato-dataset`. It contains so-called event
related synchronizations (ERS) / desynchronizations (ERD) in the beta band.
""" # noqa: E501
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Stefan Appelhoff <stefan.appelhoff@mailbox.org>
# Richard Höchenberger <richard.hoechenberger@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.time_frequency import tfr_morlet, psd_multitaper, psd_welch
from mne.datasets import somato
###############################################################################
# Set parameters
data_path = somato.data_path()
subject = '01'
task = 'somato'
raw_fname = op.join(data_path, 'sub-{}'.format(subject), 'meg',
'sub-{}_task-{}_meg.fif'.format(subject, task))
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.find_events(raw, stim_channel='STI 014')
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True, stim=False)
# Construct Epochs
event_id, tmin, tmax = 1, -1., 3.
baseline = (None, 0)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=baseline, reject=dict(grad=4000e-13, eog=350e-6),
preload=True)
epochs.resample(200., npad='auto') # resample to reduce computation time
###############################################################################
# Frequency analysis
# ------------------
#
# We start by exploring the frequence content of our epochs.
###############################################################################
# Let's first check out all channel types by averaging across epochs.
epochs.plot_psd(fmin=2., fmax=40., average=True, spatial_colors=False)
###############################################################################
# Now let's take a look at the spatial distributions of the PSD.
epochs.plot_psd_topomap(ch_type='grad', normalize=True)
###############################################################################
# Alternatively, you can also create PSDs from Epochs objects with functions
# that start with ``psd_`` such as
# :func:`mne.time_frequency.psd_multitaper` and
# :func:`mne.time_frequency.psd_welch`.
f, ax = plt.subplots()
psds, freqs = psd_multitaper(epochs, fmin=2, fmax=40, n_jobs=1)
psds = 10. * np.log10(psds)
psds_mean = psds.mean(0).mean(0)
psds_std = psds.mean(0).std(0)
ax.plot(freqs, psds_mean, color='k')
ax.fill_between(freqs, psds_mean - psds_std, psds_mean + psds_std,
color='k', alpha=.5)
ax.set(title='Multitaper PSD (gradiometers)', xlabel='Frequency (Hz)',
ylabel='Power Spectral Density (dB)')
plt.show()
###############################################################################
# Notably, :func:`mne.time_frequency.psd_welch` supports the keyword argument
# ``average``, which specifies how to estimate the PSD based on the individual
# windowed segments. The default is ``average='mean'``, which simply calculates
# the arithmetic mean across segments. Specifying ``average='median'``, in
# contrast, returns the PSD based on the median of the segments (corrected for
# bias relative to the mean), which is a more robust measure.
# Estimate PSDs based on "mean" and "median" averaging for comparison.
kwargs = dict(fmin=2, fmax=40, n_jobs=1)
psds_welch_mean, freqs_mean = psd_welch(epochs, average='mean', **kwargs)
psds_welch_median, freqs_median = psd_welch(epochs, average='median', **kwargs)
# Convert power to dB scale.
psds_welch_mean = 10 * np.log10(psds_welch_mean)
psds_welch_median = 10 * np.log10(psds_welch_median)
# We will only plot the PSD for a single sensor in the first epoch.
ch_name = 'MEG 0122'
ch_idx = epochs.info['ch_names'].index(ch_name)
epo_idx = 0
_, ax = plt.subplots()
ax.plot(freqs_mean, psds_welch_mean[epo_idx, ch_idx, :], color='k',
ls='-', label='mean of segments')
ax.plot(freqs_median, psds_welch_median[epo_idx, ch_idx, :], color='k',
ls='--', label='median of segments')
ax.set(title='Welch PSD ({}, Epoch {})'.format(ch_name, epo_idx),
xlabel='Frequency (Hz)', ylabel='Power Spectral Density (dB)')
ax.legend(loc='upper right')
plt.show()
###############################################################################
# Lastly, we can also retrieve the unaggregated segments by passing
# ``average=None`` to :func:`mne.time_frequency.psd_welch`. The dimensions of
# the returned array are ``(n_epochs, n_sensors, n_freqs, n_segments)``.
psds_welch_unagg, freqs_unagg = psd_welch(epochs, average=None, **kwargs)
print(psds_welch_unagg.shape)
###############################################################################
# .. _inter-trial-coherence:
#
# Time-frequency analysis: power and inter-trial coherence
# --------------------------------------------------------
#
# We now compute time-frequency representations (TFRs) from our Epochs.
# We'll look at power and inter-trial coherence (ITC).
#
# To this we'll use the function :func:`mne.time_frequency.tfr_morlet`
# but you can also use :func:`mne.time_frequency.tfr_multitaper`
# or :func:`mne.time_frequency.tfr_stockwell`.
#
# .. note::
# The ``decim`` parameter reduces the sampling rate of the time-frequency
# decomposition by the defined factor. This is usually done to reduce
# memory usage. For more information refer to the documentation of
# :func:`mne.time_frequency.tfr_morlet`.
#
# define frequencies of interest (log-spaced)
freqs = np.logspace(*np.log10([6, 35]), num=8)
n_cycles = freqs / 2. # different number of cycle per frequency
power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles, use_fft=True,
return_itc=True, decim=3, n_jobs=1)
###############################################################################
# Inspect power
# -------------
#
# .. note::
# The generated figures are interactive. In the topo you can click
# on an image to visualize the data for one sensor.
# You can also select a portion in the time-frequency plane to
# obtain a topomap for a certain time-frequency region.
power.plot_topo(baseline=(-0.5, 0), mode='logratio', title='Average power')
power.plot([82], baseline=(-0.5, 0), mode='logratio', title=power.ch_names[82])
fig, axis = plt.subplots(1, 2, figsize=(7, 4))
power.plot_topomap(ch_type='grad', tmin=0.5, tmax=1.5, fmin=8, fmax=12,
baseline=(-0.5, 0), mode='logratio', axes=axis[0],
title='Alpha', show=False)
power.plot_topomap(ch_type='grad', tmin=0.5, tmax=1.5, fmin=13, fmax=25,
baseline=(-0.5, 0), mode='logratio', axes=axis[1],
title='Beta', show=False)
mne.viz.tight_layout()
plt.show()
###############################################################################
# Joint Plot
# ----------
# You can also create a joint plot showing both the aggregated TFR
# across channels and topomaps at specific times and frequencies to obtain
# a quick overview regarding oscillatory effects across time and space.
power.plot_joint(baseline=(-0.5, 0), mode='mean', tmin=-.5, tmax=2,
timefreqs=[(.5, 10), (1.3, 8)])
###############################################################################
# Inspect ITC
# -----------
itc.plot_topo(title='Inter-Trial coherence', vmin=0., vmax=1., cmap='Reds')
###############################################################################
# .. note::
# Baseline correction can be applied to power or done in plots.
# To illustrate the baseline correction in plots, the next line is
# commented power.apply_baseline(baseline=(-0.5, 0), mode='logratio')
#
# Exercise
# --------
#
# - Visualize the inter-trial coherence values as topomaps as done with
# power.
|
bsd-3-clause
|
gotomypc/scikit-learn
|
sklearn/linear_model/tests/test_logistic.py
|
105
|
26588
|
import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_grad_hess,
_multinomial_grad_hess, _logistic_loss,
)
from sklearn.cross_validation import StratifiedKFold
from sklearn.datasets import load_iris, make_classification
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
# Simple sanity check on a 2 classes dataset
# Make sure it predicts the correct result on simple datasets.
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
# Test for appropriate exception on errors
msg = "Penalty term must be positive"
assert_raise_message(ValueError, msg,
LogisticRegression(C=-1).fit, X, Y1)
assert_raise_message(ValueError, msg,
LogisticRegression(C="test").fit, X, Y1)
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = "Tolerance for stopping criteria must be positive"
assert_raise_message(ValueError, msg, LR(tol=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(tol="test").fit, X, Y1)
msg = "Maximum number of iteration must be positive"
assert_raise_message(ValueError, msg, LR(max_iter=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(max_iter="test").fit, X, Y1)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial')]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_check_solver_option():
X, y = iris.data, iris.target
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = ("Logistic Regression supports only liblinear, newton-cg and"
" lbfgs solvers, got wrong_name")
lr = LR(solver="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = "multi_class should be either multinomial or ovr, got wrong_name"
lr = LR(solver='newton-cg', multi_class="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solver except 'newton-cg' and 'lfbgs'
for solver in ['liblinear']:
msg = ("Solver %s does not support a multinomial backend." %
solver)
lr = LR(solver=solver, multi_class='multinomial')
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solvers except 'liblinear'
for solver in ['newton-cg', 'lbfgs']:
msg = ("Solver %s supports only l2 penalties, got l1 penalty." %
solver)
lr = LR(solver=solver, penalty='l1')
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = ("Solver %s supports only dual=False, got dual=True" %
solver)
lr = LR(solver=solver, dual=True)
assert_raise_message(ValueError, msg, lr.fit, X, y)
def test_multinomial_binary():
# Test multinomial LR on a binary problem.
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg']:
clf = LogisticRegression(solver=solver, multi_class='multinomial')
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
# Test sparsify and densify members.
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
# Test that an exception is raised on inconsistent input
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
# Test that we can write to coef_ and intercept_
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
# Test proper NaN handling.
# Regression test for Issue #252: fit used to go into an infinite loop.
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
# Test that the path algorithm is consistent
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for method in ('lbfgs', 'newton-cg', 'liblinear'):
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-16, solver=method)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-16)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4)
# test for fit_intercept=True
for method in ('lbfgs', 'newton-cg', 'liblinear'):
Cs = [1e3]
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-4, solver=method)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4)
def test_liblinear_dual_random_state():
# random_state is relevant for liblinear solver only if dual=True
X, y = make_classification(n_samples=20)
lr1 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr2.fit(X, y)
lr3 = LogisticRegression(random_state=8, dual=True, max_iter=1, tol=1e-15)
lr3.fit(X, y)
# same result for same random state
assert_array_almost_equal(lr1.coef_, lr2.coef_)
# different results for different random states
msg = "Arrays are not almost equal to 6 decimals"
assert_raise_message(AssertionError, msg,
assert_array_almost_equal, lr1.coef_, lr3.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
grad_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_interp_2 = _logistic_loss(w, X, y, alpha=1.)
grad_interp_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
# test for LogisticRegressionCV object
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1, ))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
grad_interp, hess_interp = _logistic_grad_hess(w, X, y, alpha)
loss_interp = _logistic_loss(w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
grad, hess = _logistic_grad_hess(w, X_, y, alpha)
loss = _logistic_loss(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
# Test that OvR and multinomial are correct using the iris dataset.
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# Use pre-defined fold as folds generated for different y
cv = StratifiedKFold(target, 3)
clf = LogisticRegressionCV(cv=cv)
clf.fit(train, target)
clf1 = LogisticRegressionCV(cv=cv)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10, ))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg']:
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=15
)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10, ))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=3)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=3)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=4)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
# Test the liblinear fails when class_weight of type dict is
# provided, when it is multiclass. However it can handle
# binary problems.
clf_lib = LogisticRegressionCV(class_weight={0: 0.1, 1: 0.2},
solver='liblinear')
assert_raises(ValueError, clf_lib.fit, X, y)
y_ = y.copy()
y_[y == 2] = 1
clf_lib.fit(X, y_)
assert_array_equal(clf_lib.classes_, [0, 1])
# Test for class_weight=balanced
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', fit_intercept=False,
class_weight='balanced')
clf_lbf.fit(X, y)
clf_lib = LogisticRegressionCV(solver='liblinear', fit_intercept=False,
class_weight='balanced')
clf_lib.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regression_convergence_warnings():
# Test that warnings are raised if model does not converge
X, y = make_classification(n_samples=20, n_features=20)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
# Tests for the multinomial option in logistic regression
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=10,
n_classes=n_classes, random_state=0)
clf_int = LogisticRegression(solver='lbfgs', multi_class='multinomial')
clf_int.fit(X, y)
assert_array_equal(clf_int.coef_.shape, (n_classes, n_features))
clf_wint = LogisticRegression(solver='lbfgs', multi_class='multinomial',
fit_intercept=False)
clf_wint.fit(X, y)
assert_array_equal(clf_wint.coef_.shape, (n_classes, n_features))
# Similar tests for newton-cg solver option
clf_ncg_int = LogisticRegression(solver='newton-cg',
multi_class='multinomial')
clf_ncg_int.fit(X, y)
assert_array_equal(clf_ncg_int.coef_.shape, (n_classes, n_features))
clf_ncg_wint = LogisticRegression(solver='newton-cg', fit_intercept=False,
multi_class='multinomial')
clf_ncg_wint.fit(X, y)
assert_array_equal(clf_ncg_wint.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and newton-cg
assert_almost_equal(clf_int.coef_, clf_ncg_int.coef_, decimal=3)
assert_almost_equal(clf_wint.coef_, clf_ncg_wint.coef_, decimal=3)
assert_almost_equal(clf_int.intercept_, clf_ncg_int.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg']:
clf_path = LogisticRegressionCV(solver=solver,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, clf_int.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, clf_int.intercept_, decimal=3)
def test_multinomial_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
grad, hessp = _multinomial_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[0]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
# Test negative prediction when decision_function values are zero.
# Liblinear predicts the positive class when decision_function values
# are zero. This is a test to verify that we do not do the same.
# See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
# and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
X, y = make_classification(n_samples=5, n_features=5)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
def test_logreg_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
clf = LogisticRegression(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % clf.intercept_scaling)
assert_raise_message(ValueError, msg, clf.fit, X, Y1)
def test_logreg_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, Y1)
assert_equal(clf.intercept_, 0.)
def test_logreg_cv_penalty():
# Test that the correct penalty is passed to the final fit.
X, y = make_classification(n_samples=50, n_features=20, random_state=0)
lr_cv = LogisticRegressionCV(penalty="l1", Cs=[1.0], solver='liblinear')
lr_cv.fit(X, y)
lr = LogisticRegression(penalty="l1", C=1.0, solver='liblinear')
lr.fit(X, y)
assert_equal(np.count_nonzero(lr_cv.coef_), np.count_nonzero(lr.coef_))
|
bsd-3-clause
|
kevin-intel/scikit-learn
|
sklearn/datasets/tests/test_samples_generator.py
|
2
|
23151
|
import re
from collections import defaultdict
from functools import partial
import numpy as np
import pytest
import scipy.sparse as sp
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_moons
from sklearn.datasets import make_circles
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
weights = [0.1, 0.25]
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=weights,
random_state=0)
assert weights == [0.1, 0.25]
assert X.shape == (100, 20), "X shape mismatch"
assert y.shape == (100,), "y shape mismatch"
assert np.unique(y).shape == (3,), "Unexpected number of classes"
assert sum(y == 0) == 10, "Unexpected number of samples in class #0"
assert sum(y == 1) == 25, "Unexpected number of samples in class #1"
assert sum(y == 2) == 65, "Unexpected number of samples in class #2"
# Test for n_features > 30
X, y = make_classification(n_samples=2000, n_features=31, n_informative=31,
n_redundant=0, n_repeated=0, hypercube=True,
scale=0.5, random_state=0)
assert X.shape == (2000, 31), "X shape mismatch"
assert y.shape == (2000,), "y shape mismatch"
assert (np.unique(X.view([('', X.dtype)]*X.shape[1])).view(X.dtype)
.reshape(-1, X.shape[1]).shape[0] == 2000), (
"Unexpected number of unique rows")
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(make_classification, class_sep=class_sep, n_redundant=0,
n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False)
for n_informative, weights, n_clusters_per_class in [(2, [1], 1),
(2, [1/3] * 3, 1),
(2, [1/4] * 4, 1),
(2, [1/2] * 2, 2),
(2, [3/4, 1/4], 2),
(10, [1/3] * 3, 10),
(int(64), [1], 1)
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(n_samples=n_samples, n_classes=n_classes,
weights=weights, n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube, random_state=0)
assert X.shape == (n_samples, n_informative)
assert y.shape == (n_samples,)
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype='|S{0}'.format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs,
return_inverse=True)
assert len(unique_signs) == n_clusters, (
"Wrong number of clusters, or not in distinct quadrants")
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert len(clusters) == n_clusters_per_class, (
"Wrong number of clusters per class")
assert (len(clusters_by_class) == n_classes), (
"Wrong number of classes")
assert_array_almost_equal(np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples "
"per class")
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(np.abs(centroid) / class_sep,
np.ones(n_informative),
decimal=5,
err_msg="Clusters are not "
"centered on hypercube "
"vertices")
else:
with pytest.raises(AssertionError):
assert_array_almost_equal(np.abs(centroid) / class_sep,
np.ones(n_informative),
decimal=5,
err_msg="Clusters should "
"not be centered "
"on hypercube "
"vertices")
with pytest.raises(ValueError):
make(n_features=2, n_informative=2, n_classes=5,
n_clusters_per_class=1)
with pytest.raises(ValueError):
make(n_features=2, n_informative=2, n_classes=3,
n_clusters_per_class=2)
@pytest.mark.parametrize(
'weights, err_type, err_msg',
[
([], ValueError,
"Weights specified but incompatible with number of classes."),
([.25, .75, .1], ValueError,
"Weights specified but incompatible with number of classes."),
(np.array([]), ValueError,
"Weights specified but incompatible with number of classes."),
(np.array([.25, .75, .1]), ValueError,
"Weights specified but incompatible with number of classes."),
(np.random.random(3), ValueError,
"Weights specified but incompatible with number of classes.")
]
)
def test_make_classification_weights_type(weights, err_type, err_msg):
with pytest.raises(err_type, match=err_msg):
make_classification(weights=weights)
@pytest.mark.parametrize("kwargs", [{}, {"n_classes": 3, "n_informative": 3}])
def test_make_classification_weights_array_or_list_ok(kwargs):
X1, y1 = make_classification(weights=[.1, .9],
random_state=0, **kwargs)
X2, y2 = make_classification(weights=np.array([.1, .9]),
random_state=0, **kwargs)
assert_almost_equal(X1, X2)
assert_almost_equal(y1, y2)
def test_make_multilabel_classification_return_sequences():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=100, n_features=20,
n_classes=3, random_state=0,
return_indicator=False,
allow_unlabeled=allow_unlabeled)
assert X.shape == (100, 20), "X shape mismatch"
if not allow_unlabeled:
assert max([max(y) for y in Y]) == 2
assert min([len(y) for y in Y]) == min_length
assert max([len(y) for y in Y]) <= 3
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled)
assert X.shape == (25, 20), "X shape mismatch"
assert Y.shape == (25, 3), "Y shape mismatch"
assert np.all(np.sum(Y, axis=0) > min_length)
# Also test return_distributions and return_indicator with True
X2, Y2, p_c, p_w_c = make_multilabel_classification(
n_samples=25, n_features=20, n_classes=3, random_state=0,
allow_unlabeled=allow_unlabeled, return_distributions=True)
assert_array_almost_equal(X, X2)
assert_array_equal(Y, Y2)
assert p_c.shape == (3,)
assert_almost_equal(p_c.sum(), 1)
assert p_w_c.shape == (20, 3)
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
def test_make_multilabel_classification_return_indicator_sparse():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
return_indicator='sparse',
allow_unlabeled=allow_unlabeled)
assert X.shape == (25, 20), "X shape mismatch"
assert Y.shape == (25, 3), "Y shape mismatch"
assert sp.issparse(Y)
@pytest.mark.parametrize(
"params, err_msg",
[
({"n_classes": 0}, "'n_classes' should be an integer"),
({"length": 0}, "'length' should be an integer")
]
)
def test_make_multilabel_classification_valid_arguments(params, err_msg):
with pytest.raises(ValueError, match=err_msg):
make_multilabel_classification(**params)
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert X.shape == (100, 10), "X shape mismatch"
assert y.shape == (100,), "y shape mismatch"
assert np.unique(y).shape == (2,), "Unexpected number of classes"
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert X.shape == (100, 10), "X shape mismatch"
assert y.shape == (100,), "y shape mismatch"
assert c.shape == (10,), "coef shape mismatch"
assert sum(c != 0.0) == 3, "Unexpected number of informative features"
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
# Test with small number of features.
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
assert X.shape == (100, 1)
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert X.shape == (100, 10), "X shape mismatch"
assert y.shape == (100, 3), "y shape mismatch"
assert c.shape == (10, 3), "coef shape mismatch"
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
cluster_stds = np.array([0.05, 0.2, 0.4])
cluster_centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
X, y = make_blobs(random_state=0, n_samples=50, n_features=2,
centers=cluster_centers, cluster_std=cluster_stds)
assert X.shape == (50, 2), "X shape mismatch"
assert y.shape == (50,), "y shape mismatch"
assert np.unique(y).shape == (3,), "Unexpected number of blobs"
for i, (ctr, std) in enumerate(zip(cluster_centers, cluster_stds)):
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
def test_make_blobs_n_samples_list():
n_samples = [50, 30, 20]
X, y = make_blobs(n_samples=n_samples, n_features=2, random_state=0)
assert X.shape == (sum(n_samples), 2), "X shape mismatch"
assert all(np.bincount(y, minlength=len(n_samples)) == n_samples), \
"Incorrect number of samples per blob"
def test_make_blobs_n_samples_list_with_centers():
n_samples = [20, 20, 20]
centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
cluster_stds = np.array([0.05, 0.2, 0.4])
X, y = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=cluster_stds, random_state=0)
assert X.shape == (sum(n_samples), 2), "X shape mismatch"
assert all(np.bincount(y, minlength=len(n_samples)) == n_samples), \
"Incorrect number of samples per blob"
for i, (ctr, std) in enumerate(zip(centers, cluster_stds)):
assert_almost_equal((X[y == i] - ctr).std(), std, 1, "Unexpected std")
@pytest.mark.parametrize(
"n_samples",
[[5, 3, 0],
np.array([5, 3, 0]),
tuple([5, 3, 0])]
)
def test_make_blobs_n_samples_centers_none(n_samples):
centers = None
X, y = make_blobs(n_samples=n_samples, centers=centers, random_state=0)
assert X.shape == (sum(n_samples), 2), "X shape mismatch"
assert all(np.bincount(y, minlength=len(n_samples)) == n_samples), \
"Incorrect number of samples per blob"
def test_make_blobs_return_centers():
n_samples = [10, 20]
n_features = 3
X, y, centers = make_blobs(n_samples=n_samples, n_features=n_features,
return_centers=True, random_state=0)
assert centers.shape == (len(n_samples), n_features)
def test_make_blobs_error():
n_samples = [20, 20, 20]
centers = np.array([[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
cluster_stds = np.array([0.05, 0.2, 0.4])
wrong_centers_msg = re.escape(
"Length of `n_samples` not consistent with number of centers. "
f"Got n_samples = {n_samples} and centers = {centers[:-1]}"
)
with pytest.raises(ValueError, match=wrong_centers_msg):
make_blobs(n_samples, centers=centers[:-1])
wrong_std_msg = re.escape(
"Length of `clusters_std` not consistent with number of centers. "
f"Got centers = {centers} and cluster_std = {cluster_stds[:-1]}"
)
with pytest.raises(ValueError, match=wrong_std_msg):
make_blobs(n_samples, centers=centers, cluster_std=cluster_stds[:-1])
wrong_type_msg = ("Parameter `centers` must be array-like. "
"Got {!r} instead".format(3))
with pytest.raises(ValueError, match=wrong_type_msg):
make_blobs(n_samples, centers=3)
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert X.shape == (5, 10), "X shape mismatch"
assert y.shape == (5,), "y shape mismatch"
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert X.shape == (5, 4), "X shape mismatch"
assert y.shape == (5,), "y shape mismatch"
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert X.shape == (5, 4), "X shape mismatch"
assert y.shape == (5,), "y shape mismatch"
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert X.shape == (50, 25), "X shape mismatch"
from numpy.linalg import svd
u, s, v = svd(X)
assert sum(s) - 5 < 0.1, "X rank is not approximately 5"
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert Y.shape == (10, 5), "Y shape mismatch"
assert D.shape == (10, 8), "D shape mismatch"
assert X.shape == (8, 5), "X shape mismatch"
for col in X.T:
assert len(np.flatnonzero(col)) == 3, 'Non-zero coefs mismatch'
assert_array_almost_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert X.shape == (5, 10), "X shape mismatch"
assert y.shape == (5,), "y shape mismatch"
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert X.shape == (5, 5), "X shape mismatch"
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert X.shape == (5, 3), "X shape mismatch"
assert t.shape == (5,), "t shape mismatch"
assert_array_almost_equal(X[:, 0], t * np.cos(t))
assert_array_almost_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert X.shape == (5, 3), "X shape mismatch"
assert t.shape == (5,), "t shape mismatch"
assert_array_almost_equal(X[:, 0], np.sin(t))
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert X.shape == (100, 100), "X shape mismatch"
assert rows.shape == (4, 100), "rows shape mismatch"
assert cols.shape == (4, 100,), "columns shape mismatch"
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_almost_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert X.shape == (100, 100), "X shape mismatch"
assert rows.shape == (100, 100), "rows shape mismatch"
assert cols.shape == (100, 100,), "columns shape mismatch"
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_almost_equal(X1, X2)
def test_make_moons():
X, y = make_moons(3, shuffle=False)
for x, label in zip(X, y):
center = [0.0, 0.0] if label == 0 else [1.0, 0.5]
dist_sqr = ((x - center) ** 2).sum()
assert_almost_equal(dist_sqr, 1.0,
err_msg="Point is not on expected unit circle")
def test_make_moons_unbalanced():
X, y = make_moons(n_samples=(7, 5))
assert np.sum(y == 0) == 7 and np.sum(y == 1) == 5, \
'Number of samples in a moon is wrong'
assert X.shape == (12, 2), "X shape mismatch"
assert y.shape == (12,), "y shape mismatch"
with pytest.raises(ValueError, match=r'`n_samples` can be either an int '
r'or a two-element tuple.'):
make_moons(n_samples=[1, 2, 3])
with pytest.raises(ValueError, match=r'`n_samples` can be either an int '
r'or a two-element tuple.'):
make_moons(n_samples=(10,))
def test_make_circles():
factor = 0.3
for (n_samples, n_outer, n_inner) in [(7, 3, 4), (8, 4, 4)]:
# Testing odd and even case, because in the past make_circles always
# created an even number of samples.
X, y = make_circles(n_samples, shuffle=False, noise=None,
factor=factor)
assert X.shape == (n_samples, 2), "X shape mismatch"
assert y.shape == (n_samples,), "y shape mismatch"
center = [0.0, 0.0]
for x, label in zip(X, y):
dist_sqr = ((x - center) ** 2).sum()
dist_exp = 1.0 if label == 0 else factor**2
dist_exp = 1.0 if label == 0 else factor ** 2
assert_almost_equal(dist_sqr, dist_exp,
err_msg="Point is not on expected circle")
assert X[y == 0].shape == (n_outer, 2), (
"Samples not correctly distributed across circles.")
assert X[y == 1].shape == (n_inner, 2), (
"Samples not correctly distributed across circles.")
with pytest.raises(ValueError):
make_circles(factor=-0.01)
with pytest.raises(ValueError):
make_circles(factor=1.)
def test_make_circles_unbalanced():
X, y = make_circles(n_samples=(2, 8))
assert np.sum(y == 0) == 2, 'Number of samples in inner circle is wrong'
assert np.sum(y == 1) == 8, 'Number of samples in outer circle is wrong'
assert X.shape == (10, 2), "X shape mismatch"
assert y.shape == (10,), "y shape mismatch"
with pytest.raises(ValueError, match=r'`n_samples` can be either an int '
r'or a two-element tuple.'):
make_circles(n_samples=[1, 2, 3])
with pytest.raises(ValueError, match=r'`n_samples` can be either an int '
r'or a two-element tuple.'):
make_circles(n_samples=(10,))
|
bsd-3-clause
|
DStauffman/dstauffman2
|
dstauffman2/archery/scoring/test_scoring.py
|
1
|
4960
|
r"""
Test file for the `scoring` submodule of the dstauffman2 archery code. It is intented to contain
test cases to demonstrate functionaliy and correct outcomes for all the functions within the module.
Notes
-----
#. Written by David C. Stauffer in October 2015.
"""
#%% Imports
import unittest
import matplotlib.pyplot as plt
import numpy as np
import dstauffman2.archery.scoring as arch
#%% get_root_dir
class Test_get_root_dir(unittest.TestCase):
r"""
Tests the get_root_dir function with these cases:
call the function
"""
def test_function(self):
folder = arch.get_root_dir()
self.assertTrue(folder) # TODO: don't know an independent way to test this
#%% score_text_to_number
class Test_score_text_to_number(unittest.TestCase):
r"""
Tests the score_text_to_number function with the following cases:
Text to num NFAA
Text to num USAA
Int to int NFAA
Int to int USAA
Large number
Bad float
Bad string (raises ValueError)
"""
def setUp(self):
self.text_scores = ['X', '10', '9', '8', '7', '6', '5', '4', '3', '2', '1', '0', 'M', 'x', 'm']
self.num_scores = [ 10, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 0, 10, 0]
self.usaa_scores = [ 10, 9, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 0, 10, 0]
def test_conversion(self):
for (this_text, this_num) in zip(self.text_scores, self.num_scores):
num = arch.score_text_to_number(this_text)
self.assertEqual(num, this_num)
def test_usaa_conversion(self):
for (this_text, this_num) in zip(self.text_scores, self.usaa_scores):
num = arch.score_text_to_number(this_text, flag='usaa')
self.assertEqual(num, this_num)
def test_int_to_int(self):
for this_num in self.num_scores:
num = arch.score_text_to_number(this_num)
self.assertEqual(num, this_num)
def test_int_to_int_usaa(self):
for this_num in range(0, 11):
num = arch.score_text_to_number(this_num, flag='usaa')
if this_num == 10:
self.assertEqual(num, 9)
else:
self.assertEqual(num, this_num)
def test_large_values(self):
num = arch.score_text_to_number('1001')
self.assertEqual(num, 1001)
def test_bad_float(self):
with self.assertRaises(ValueError):
arch.score_text_to_number('10.8')
def test_bad_value(self):
with self.assertRaises(ValueError):
arch.score_text_to_number('z')
#%% convert_data_to_scores
class Test_convert_data_to_scores(unittest.TestCase):
r"""
Tests the convert_data_to_scores function with these cases:
Nominal
"""
def setUp(self):
self.scores = [10*['X', 10, 9], 10*[9, 9, 9]]
self.nfaa_scores = [290, 270]
self.usaa_scores = [280, 270]
def test_nominal(self):
(nfaa_score, usaa_score) = arch.convert_data_to_scores(self.scores)
np.testing.assert_array_equal(nfaa_score, self.nfaa_scores)
np.testing.assert_array_equal(usaa_score, self.usaa_scores)
#%% plot_mean_and_std
class Test_plot_mean_and_std(unittest.TestCase):
r"""
Tests the plot_mean_and_std function with these cases:
TBD
"""
def setUp(self):
self.scores = [10*['X', 10, 9], 10*[9, 9, 9]]
self.fig = None
def test_nominal(self):
self.fig = arch.plot_mean_and_std(self.scores)
# TODO: write more of these
def tearDown(self):
if self.fig is not None:
plt.close(self.fig)
#%% normal_curve
class Test_normal_curve(unittest.TestCase):
r"""
Tests the normal_curve function with these cases:
TBD
"""
def setUp(self):
self.x = np.arange(-5, 5.01, 0.01)
self.mu = 0
self.sigma = 1
self.y = np.exp(-self.x**2/2)/np.sqrt(2*np.pi)
def test_nominal(self):
y = arch.normal_curve(self.x, self.mu, self.sigma)
np.testing.assert_array_almost_equal(y, self.y)
def test_nonzero_mean(self):
offset = 2.5
y = arch.normal_curve(self.x + offset, self.mu + offset, self.sigma)
np.testing.assert_array_almost_equal(y, self.y)
def test_no_std(self):
y = arch.normal_curve(self.x, 3.3, 0)
out = np.zeros(self.x.shape)
ix = np.flatnonzero(y == 3.3)
out[ix] = 1
np.testing.assert_array_almost_equal(y, out)
#%% read_from_excel_datafile
class Test_read_from_excel_datafile(unittest.TestCase):
r"""
Tests the read_from_excel_datafile function with these cases:
TBD
"""
pass
#%% create_scoresheet
class Test_create_scoresheet(unittest.TestCase):
r"""
Tests the create_scoresheet function with these cases:
TBD
"""
pass
#%% Unit test execution
if __name__ == '__main__':
unittest.main(exit=False)
|
lgpl-3.0
|
kevin-intel/scikit-learn
|
sklearn/tests/test_min_dependencies_readme.py
|
2
|
1562
|
"""Tests for the minimum dependencies in the README.rst file."""
import os
import re
from pathlib import Path
import pytest
import sklearn
from sklearn._min_dependencies import dependent_packages
from sklearn.utils.fixes import parse_version
def test_min_dependencies_readme():
# Test that the minimum dependencies in the README.rst file are
# consistent with the minimum dependencies defined at the file:
# sklearn/_min_dependencies.py
pattern = re.compile(r"(\.\. \|)" +
r"(([A-Za-z]+\-?)+)" +
r"(MinVersion\| replace::)" +
r"( [0-9]+\.[0-9]+(\.[0-9]+)?)")
readme_path = Path(sklearn.__path__[0]).parents[0]
readme_file = readme_path / "README.rst"
if not os.path.exists(readme_file):
# Skip the test if the README.rst file is not available.
# For instance, when installing scikit-learn from wheels
pytest.skip("The README.rst file is not available.")
with readme_file.open("r") as f:
for line in f:
matched = pattern.match(line)
if not matched:
continue
package, version = matched.group(2), matched.group(5)
package = package.lower()
if package in dependent_packages:
version = parse_version(version)
min_version = parse_version(dependent_packages[package][0])
assert version == min_version, (f"{package} has a mismatched "
"version")
|
bsd-3-clause
|
StefReck/Km3-Autoencoder
|
scripts/channel_autoencoder_predict.py
|
1
|
8784
|
# -*- coding: utf-8 -*-
"""
Load a channel id autoencoder model and predict on some train files, then plot it optionally.
"""
from keras.models import load_model
from keras import metrics
import h5py
import numpy as np
import argparse
import matplotlib.pyplot as plt
from get_dataset_info import get_dataset_info
from util.run_cnn import load_zero_center_data, generate_batches_from_hdf5_file, h5_get_number_of_rows
def parse_input():
parser = argparse.ArgumentParser(description='Predict on channel data')
parser.add_argument('model_name', type=str)
args = parser.parse_args()
params = vars(args)
return params
params = parse_input()
model_name = params["model_name"]
mode="statistics"
zero_center = False
#for plot mode, number of 32 batches of channel_id arrays should be read through for the plot
how_many_dom_batches = 1000
bins=100
model=load_model(model_name)
dataset_info_dict=get_dataset_info("xyzc_flat")
if mode == "simple":
#Print some 31-arrays and the prediction from the autoencoder
how_many_doms=10 #to read from file
minimum_counts = 5
test_file = dataset_info_dict["test_file"]
if zero_center==True:
xs_mean=load_zero_center_data(((dataset_info_dict["train_file"],),), batchsize=32, n_bins=dataset_info_dict["n_bins"], n_gpu=1)
else:
xs_mean = 0
f = h5py.File(test_file, "r")
#look for some doms that are not mostly 0
batch=[]
i=0
while len(batch)<=how_many_doms:
dom=f["x"][i:i+1]
if dom.sum()>=minimum_counts:
batch.extend(dom)
i+=1
batch=np.array(batch)
batch_centered=np.subtract(batch, xs_mean)
pred=np.add(model.predict_on_batch(batch_centered), xs_mean)
for i in range(len(batch)):
print("Original")
print(batch[i])
print("Prediction")
print(pred[i])
print("loss:", ((batch[i]-pred[i])**2).mean())
print("\n")
elif mode=="plot":
#make plot of predictions
#measured counts are almost always 0 or 1
maximum_counts_to_look_for=1
skip_zero_counts=False
train_file=dataset_info_dict["train_file"]
test_file=dataset_info_dict["test_file"]
n_bins=dataset_info_dict["n_bins"]
broken_simulations_mode=dataset_info_dict["broken_simulations_mode"] #def 0
filesize_factor=dataset_info_dict["filesize_factor"]
filesize_factor_test=dataset_info_dict["filesize_factor_test"]
batchsize=dataset_info_dict["batchsize"] #def 32
print("Total channel ids:", how_many_dom_batches*batchsize*31)
class_type=(2,"up_down")
is_autoencoder=True
train_tuple=[[train_file, int(h5_get_number_of_rows(train_file)*filesize_factor)]]
#test_tuple=[[test_file, int(h5_get_number_of_rows(test_file)*filesize_factor_test)]]
if zero_center==True:
xs_mean = load_zero_center_data(train_files=train_tuple, batchsize=batchsize, n_bins=n_bins, n_gpu=1)
else:
xs_mean=0
generator = generate_batches_from_hdf5_file(test_file, batchsize, n_bins, class_type,
is_autoencoder, dataset_info_dict, broken_simulations_mode=0,
f_size=None, zero_center_image=xs_mean, yield_mc_info=False,
swap_col=None, is_in_test_mode = False)
#prediction on channel id that measured
# 0 ,1 ,2 ,3 ... counts
pred_on = []
for measured_counts in range(maximum_counts_to_look_for+1):
pred_on.append([])
print_something_after_every = int(how_many_dom_batches/10)
for i in range(how_many_dom_batches):
if i%print_something_after_every==0:
print("Predicting ... ", int(10*i/print_something_after_every), "% done")
data=next(generator)[0]
data_real = np.add(data, xs_mean)
pred=np.add(model.predict_on_batch(data), xs_mean)
#data_real is still a batch of len batchsize of single doms (dim. e.g. (32,31)), so look at each one:
for dom_no,data_real_single in enumerate(data_real):
pred_single=pred[dom_no]
for measured_counts in range(skip_zero_counts, maximum_counts_to_look_for+1):
#sort predicitions into list according to original counts
pred_on[measured_counts].extend(pred_single[data_real_single==measured_counts])
print("Done, generating plot...")
plt.title("Channel autoencoder predictions (%)")
plt.ylabel("Fraction of predicitons")
plt.xlabel("Predicted counts")
plt.plot([],[], " ", label="Original counts")
make_plots_of_counts=list(range(maximum_counts_to_look_for+1))
ex_list=[]
#fill with maximum and minimum prediction of every original count number
for counts_array in pred_on:
len(counts_array)
if len(counts_array) != 0:
ex_list.extend([np.amax(counts_array), np.amin(counts_array)])
range_of_plot=[np.amin(ex_list),np.amax(ex_list)]
#relative width of bins as fracton of bin size
#relative_width=1/len(make_plots_of_counts)
#bin_size = (range_of_plot[0]-range_of_plot[1]) / bins
#bin_edges = np.linspace(range_of_plot[0], range_of_plot[1], num=bins+1)
for c in make_plots_of_counts:
if len(pred[c]) != 0:
#offset = bin_size*relative_width*c
plt.hist( x=pred_on[c], bins=bins, label=str(c), density=True, range=range_of_plot )
plt.legend()
plt.show()
elif mode=="statistics":
#evaluate on test set. Check wheter doms with n hits in total were reconstructed correctly.
#For this, predictions are rounded to next integer
train_file=dataset_info_dict["train_file"]
test_file=dataset_info_dict["test_file"]
n_bins=dataset_info_dict["n_bins"]
broken_simulations_mode=dataset_info_dict["broken_simulations_mode"] #def 0
filesize_factor=dataset_info_dict["filesize_factor"]
filesize_factor_test=dataset_info_dict["filesize_factor_test"]
#higher for testing
batchsize=32
dataset_info_dict["batchsize"]=batchsize #def 32
class_type=(2,"up_down")
is_autoencoder=True
train_tuple=[[train_file, int(h5_get_number_of_rows(train_file)*filesize_factor)]]
test_tuple=[[test_file, int(h5_get_number_of_rows(test_file)*filesize_factor_test)]]
if zero_center==True:
xs_mean = load_zero_center_data(train_files=train_tuple, batchsize=batchsize, n_bins=n_bins, n_gpu=1)
else:
xs_mean=None
generator = generate_batches_from_hdf5_file(test_file, batchsize, n_bins, class_type,
is_autoencoder, dataset_info_dict, broken_simulations_mode=0,
f_size=None, zero_center_image=xs_mean, yield_mc_info=False,
swap_col=None, is_in_test_mode = False)
total_number_of_batches = int(test_tuple[0][1]/batchsize)
total_number_of_batches=10
print("Filesize:",test_tuple[0][1], "Total number of batches:", total_number_of_batches)
#a dict with entries: total_counts_per_dom : [[correct_from_batch_0, ...],[total_from_batch_0,...]]
#e.g. 0 : [[70,75,...],[96,94,...]]
counts_dict={}
for batchno in range(total_number_of_batches):
data = next(generator)[0]
print("data shape", data.shape)
prediction = np.round(model.predict_on_batch(data))
#shape (batchsize,)
total_counts_measured_in_dom = np.sum(data, axis=1)
print("total_counts shape",total_counts_measured_in_dom.shape)
#Should result in a (batchsize,) array that states wheter the whole dom was predicted correctly
dom_correct = np.logical_and.reduce(data==prediction, axis=1)
print("dom_correct shape",dom_correct.shape)
#which count numbers were measured in all the doms
counts=np.unique(total_counts_measured_in_dom).astype(int)
for count in counts:
positions = np.where(total_counts_measured_in_dom==count)
predicted_correct_there = np.sum(dom_correct[positions]).astype(int)
total_doms_with_these_counts = len(positions[0])
if count not in counts_dict:
counts_dict[count]=[[],[]]
counts_dict[count][0].append(predicted_correct_there)
counts_dict[count][1].append(total_doms_with_these_counts)
for keyword in counts_dict:
predicted_correct, total = counts_dict[keyword]
counts_dict[keyword] = [[np.sum(predicted_correct)], [np.sum(total)]]
print(counts_dict)
|
mit
|
ultimanet/nifty
|
nifty_cmaps.py
|
1
|
12084
|
## NIFTY (Numerical Information Field Theory) has been developed at the
## Max-Planck-Institute for Astrophysics.
##
## Copyright (C) 2013 Max-Planck-Society
##
## Author: Marco Selig
## Project homepage: <http://www.mpa-garching.mpg.de/ift/nifty/>
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
## See the GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
.. __ ____ __
.. /__/ / _/ / /_
.. __ ___ __ / /_ / _/ __ __
.. / _ | / / / _/ / / / / / /
.. / / / / / / / / / /_ / /_/ /
.. /__/ /__/ /__/ /__/ \___/ \___ / cmaps
.. /______/
This module provides the `ncmap` class whose static methods return color
maps.
The visualization of fields is useful for obvious reasons, and therefore
some nice color maps are here to be found. Those are segmented color maps
that can be used in many settings, including the native plotting method for
fields. (Some of the color maps offered here are results from IFT
publications, cf. references below.)
Examples
--------
>>> from nifty.nifty_cmaps import *
>>> f = field(rg_space([42, 42]), random="uni", vmin=-1)
>>> f[21:] = f.smooth(sigma=1/42)[21:]
>>> [f.plot(cmap=cc, vmin=-0.8, vmax=0.8) for cc in [None, ncmap.pm()]]
## two 2D plots open
"""
from __future__ import division
from matplotlib.colors import LinearSegmentedColormap as cm
##-----------------------------------------------------------------------------
class ncmap(object):
"""
.. __ ___ _______ __ ___ ____ ____ __ ______
.. / _ | / ____/ / _ _ | / _ / / _ |
.. / / / / / /____ / / / / / / / /_/ / / /_/ /
.. /__/ /__/ \______/ /__/ /__/ /__/ \______| / ____/ class
.. /__/
NIFTY support class for color maps.
This class provides several *nifty* color maps that are returned by
its static methods. The `ncmap` class is not meant to be initialised.
See Also
--------
matplotlib.colors.LinearSegmentedColormap
Examples
--------
>>> f = field(rg_space([42, 42]), random="uni", vmin=-1)
>>> f.plot(cmap=ncmap.pm(), vmin=-1, vmax=1)
## 2D plot opens
"""
__init__ = None
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
@staticmethod
def he(ncolors=256):
"""
Returns a color map often used in High Energy Astronomy.
Parameters
----------
ncolors : int, *optional*
Number of color segments (default: 256).
Returns
-------
cmap : matplotlib.colors.LinearSegmentedColormap instance
Linear segmented color map.
"""
segmentdata = {"red": [(0.000, 0.0, 0.0), (0.167, 0.0, 0.0),
(0.333, 0.5, 0.5), (0.500, 1.0, 1.0),
(0.667, 1.0, 1.0), (0.833, 1.0, 1.0),
(1.000, 1.0, 1.0)],
"green": [(0.000, 0.0, 0.0), (0.167, 0.0, 0.0),
(0.333, 0.0, 0.0), (0.500, 0.0, 0.0),
(0.667, 0.5, 0.5), (0.833, 1.0, 1.0),
(1.000, 1.0, 1.0)],
"blue": [(0.000, 0.0, 0.0), (0.167, 1.0, 1.0),
(0.333, 0.5, 0.5), (0.500, 0.0, 0.0),
(0.667, 0.0, 0.0), (0.833, 0.0, 0.0),
(1.000, 1.0, 1.0)]}
return cm("High Energy", segmentdata, N=int(ncolors), gamma=1.0)
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
@staticmethod
def fm(ncolors=256):
"""
Returns a color map used in reconstruction of the "Faraday Map".
Parameters
----------
ncolors : int, *optional*
Number of color segments (default: 256).
Returns
-------
cmap : matplotlib.colors.LinearSegmentedColormap instance
Linear segmented color map.
References
----------
.. [#] N. Opermann et. al.,
"An improved map of the Galactic Faraday sky",
Astronomy & Astrophysics, Volume 542, id.A93, 06/2012;
`arXiv:1111.6186 <http://www.arxiv.org/abs/1111.6186>`_
"""
segmentdata = {"red": [(0.000, 0.35, 0.35), (0.100, 0.40, 0.40),
(0.200, 0.25, 0.25), (0.410, 0.47, 0.47),
(0.500, 0.80, 0.80), (0.560, 0.96, 0.96),
(0.590, 1.00, 1.00), (0.740, 0.80, 0.80),
(0.800, 0.80, 0.80), (0.900, 0.50, 0.50),
(1.000, 0.40, 0.40)],
"green": [(0.000, 0.00, 0.00), (0.200, 0.00, 0.00),
(0.362, 0.88, 0.88), (0.500, 1.00, 1.00),
(0.638, 0.88, 0.88), (0.800, 0.25, 0.25),
(0.900, 0.30, 0.30), (1.000, 0.20, 0.20)],
"blue": [(0.000, 0.35, 0.35), (0.100, 0.40, 0.40),
(0.200, 0.80, 0.80), (0.260, 0.80, 0.80),
(0.410, 1.00, 1.00), (0.440, 0.96, 0.96),
(0.500, 0.80, 0.80), (0.590, 0.47, 0.47),
(0.800, 0.00, 0.00), (1.000, 0.00, 0.00)]}
return cm("Faraday Map", segmentdata, N=int(ncolors), gamma=1.0)
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
@staticmethod
def fu(ncolors=256):
"""
Returns a color map used for the "Faraday Map Uncertainty".
Parameters
----------
ncolors : int, *optional*
Number of color segments (default: 256).
Returns
-------
cmap : matplotlib.colors.LinearSegmentedColormap instance
Linear segmented color map.
References
----------
.. [#] N. Opermann et. al.,
"An improved map of the Galactic Faraday sky",
Astronomy & Astrophysics, Volume 542, id.A93, 06/2012;
`arXiv:1111.6186 <http://www.arxiv.org/abs/1111.6186>`_
"""
segmentdata = {"red": [(0.000, 1.00, 1.00), (0.100, 0.80, 0.80),
(0.200, 0.65, 0.65), (0.410, 0.60, 0.60),
(0.500, 0.70, 0.70), (0.560, 0.96, 0.96),
(0.590, 1.00, 1.00), (0.740, 0.80, 0.80),
(0.800, 0.80, 0.80), (0.900, 0.50, 0.50),
(1.000, 0.40, 0.40)],
"green": [(0.000, 0.90, 0.90), (0.200, 0.65, 0.65),
(0.362, 0.95, 0.95), (0.500, 1.00, 1.00),
(0.638, 0.88, 0.88), (0.800, 0.25, 0.25),
(0.900, 0.30, 0.30), (1.000, 0.20, 0.20)],
"blue": [(0.000, 1.00, 1.00), (0.100, 0.80, 0.80),
(0.200, 1.00, 1.00), (0.410, 1.00, 1.00),
(0.440, 0.96, 0.96), (0.500, 0.70, 0.70),
(0.590, 0.42, 0.42), (0.800, 0.00, 0.00),
(1.000, 0.00, 0.00)]}
return cm("Faraday Uncertainty", segmentdata, N=int(ncolors),
gamma=1.0)
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
@staticmethod
def pm(ncolors=256):
"""
Returns a color map useful for a zero-centerd range of values.
Parameters
----------
ncolors : int, *optional*
Number of color segments (default: 256).
Returns
-------
cmap : matplotlib.colors.LinearSegmentedColormap instance
Linear segmented color map.
"""
segmentdata = {"red": [(0.0, 1.00, 1.00), (0.1, 0.96, 0.96),
(0.2, 0.84, 0.84), (0.3, 0.64, 0.64),
(0.4, 0.36, 0.36), (0.5, 0.00, 0.00),
(0.6, 0.00, 0.00), (0.7, 0.00, 0.00),
(0.8, 0.00, 0.00), (0.9, 0.00, 0.00),
(1.0, 0.00, 0.00)],
"green": [(0.0, 0.50, 0.50), (0.1, 0.32, 0.32),
(0.2, 0.18, 0.18), (0.3, 0.08, 0.08),
(0.4, 0.02, 0.02), (0.5, 0.00, 0.00),
(0.6, 0.02, 0.02), (0.7, 0.08, 0.08),
(0.8, 0.18, 0.18), (0.9, 0.32, 0.32),
(1.0, 0.50, 0.50)],
"blue": [(0.0, 0.00, 0.00), (0.1, 0.00, 0.00),
(0.2, 0.00, 0.00), (0.3, 0.00, 0.00),
(0.4, 0.00, 0.00), (0.5, 0.00, 0.00),
(0.6, 0.36, 0.36), (0.7, 0.64, 0.64),
(0.8, 0.84, 0.84), (0.9, 0.96, 0.96),
(1.0, 1.00, 1.00)]}
return cm("Plus Minus", segmentdata, N=int(ncolors), gamma=1.0)
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
@staticmethod
def planck(ncolors=256):
"""
Returns a color map similar to the one used for the "Planck CMB Map".
Parameters
----------
ncolors : int, *optional*
Number of color segments (default: 256).
Returns
-------
cmap : matplotlib.colors.LinearSegmentedColormap instance
Linear segmented color map.
"""
segmentdata = {"red": [(0.0, 0.00, 0.00), (0.1, 0.00, 0.00),
(0.2, 0.00, 0.00), (0.3, 0.00, 0.00),
(0.4, 0.00, 0.00), (0.5, 1.00, 1.00),
(0.6, 1.00, 1.00), (0.7, 1.00, 1.00),
(0.8, 0.83, 0.83), (0.9, 0.67, 0.67),
(1.0, 0.50, 0.50)],
"green": [(0.0, 0.00, 0.00), (0.1, 0.00, 0.00),
(0.2, 0.00, 0.00), (0.3, 0.30, 0.30),
(0.4, 0.70, 0.70), (0.5, 1.00, 1.00),
(0.6, 0.70, 0.70), (0.7, 0.30, 0.30),
(0.8, 0.00, 0.00), (0.9, 0.00, 0.00),
(1.0, 0.00, 0.00)],
"blue": [(0.0, 0.50, 0.50), (0.1, 0.67, 0.67),
(0.2, 0.83, 0.83), (0.3, 1.00, 1.00),
(0.4, 1.00, 1.00), (0.5, 1.00, 1.00),
(0.6, 0.00, 0.00), (0.7, 0.00, 0.00),
(0.8, 0.00, 0.00), (0.9, 0.00, 0.00),
(1.0, 0.00, 0.00)]}
return cm("Planck-like", segmentdata, N=int(ncolors), gamma=1.0)
##-----------------------------------------------------------------------------
|
gpl-3.0
|
emhuff/regularizedInversion
|
des_color_inference.py
|
1
|
15630
|
#!/usr/bin/env python
import matplotlib as mpl
import desdb
import numpy as np
import esutil
import pyfits
import sys
import argparse
import healpy as hp
import os
import functions2
import slr_zeropoint_shiftmap as slr
import numpy.lib.recfunctions as rf
import matplotlib.pyplot as plt
def NoSimFields(band='i'):
q = """
SELECT
balrog_index,
mag_auto,
flags
FROM
SUCHYTA1.balrog_sva1v2_nosim_%s
""" %(band)
return q
def SimFields(band='i',table='sva1v2'):
q = """
SELECT
t.tilename as tilename,
m.xwin_image as xwin_image,
m.ywin_image as ywin_image,
m.xmin_image as xmin_image,
m.ymin_image as ymin_image,
m.xmax_image as xmax_image,
m.ymax_image as ymax_image,
m.balrog_index as balrog_index,
m.alphawin_j2000 as ra,
m.deltawin_j2000 as dec,
m.mag_auto as mag_auto,
m.spread_model as spread_model,
m.spreaderr_model as spreaderr_model,
m.class_star as class_star,
m.mag_psf as mag_psf,
t.mag as truth_mag_auto,
m.flags as flags
FROM
SUCHYTA1.balrog_%s_sim_%s m
JOIN SUCHYTA1.balrog_%s_truth_%s t ON t.balrog_index = m.balrog_index
""" %(table, band, table, band)
return q
def DESFields(tilestuff, band='i'):
q = """
SELECT
tilename,
coadd_objects_id,
mag_auto_%s as mag_auto,
alphawin_j2000_%s as ra,
deltawin_j2000_%s as dec,
spread_model_%s as spread_model,
spreaderr_model_%s as spreaderr_model,
class_star_%s as class_star,
mag_psf_%s as mag_psf,
flags_%s as flags
FROM
sva1_coadd_objects
WHERE
tilename in %s
""" % (band,band,band,band,band,band,band,band,str(tuple(np.unique(tilestuff['tilename']))))
return q
def TruthFields(band='i', table = 'sva1v2'):
q = """
SELECT
balrog_index,
tilename,
ra,
dec,
objtype,
mag
FROM
SUCHYTA1.balrog_%s_truth_%s
"""%(table,band)
return q
def GetDESCat( depthmap, nside, tilestuff, tileinfo, band='i',depth = 0.0):
cur = desdb.connect()
q = DESFields(tileinfo, band=band)
detcat = cur.quick(q, array=True)
detcat = functions2.ValidDepth(depthmap, nside, detcat, rakey='ra', deckey='dec',depth = depth)
detcat = functions2.RemoveTileOverlap(tilestuff, detcat, col='tilename', rakey='ra', deckey='dec')
return detcat
def getTileInfo(catalog, HealConfig=None):
if HealConfig is None:
HealConfig = getHealConfig()
tiles = np.unique(catalog['tilename'])
cur = desdb.connect()
q = "SELECT tilename, udecll, udecur, urall, uraur FROM coaddtile"
tileinfo = cur.quick(q, array=True)
tilestuff = {}
for i in range(len(tileinfo)):
tilestuff[ tileinfo[i]['tilename'] ] = tileinfo[i]
max = np.power(map_nside/float(HealConfig['out_nside']), 2.0)
depthmap, nside = functions2.GetDepthMap(HealConfig['depthfile'])
return depthmap, nside
def cleanCatalog(catalog, tag='mag_auto'):
# We should get rid of obviously wrong things.
keep = np.where( (catalog[tag] > 15. ) & (catalog[tag] < 30.) & (catalog['flags'] < 2) )
return catalog[keep]
def removeBadTilesFromTruthCatalog(truth, tag='mag_auto', goodfrac = 0.8):
tileList = np.unique(truth['tilename'])
number = np.zeros(tileList.size)
for tile, i in zip(tileList,xrange(number.size)):
number[i] = np.sum(truth['tilename'] == tile)
tileList = tileList[number > goodfrac*np.max(number)]
keep = np.in1d( truth['tilename'], tileList )
return truth[keep]
def mergeCatalogsUsingPandas(sim=None, truth=None, key='balrog_index', suffixes = ['_sim','']):
import pandas as pd
simData = pd.DataFrame(sim)
truthData = pd.DataFrame(truth)
matched = pd.merge(simData, truthData, on=key, suffixes = suffixes)
matched_arr = matched.to_records(index=False)
# This last step is necessary because Pandas converts strings to Objects when eating structured arrays.
# And np.recfunctions flips out when it has one.
oldDtype = matched_arr.dtype.descr
newDtype = oldDtype
for thisOldType,i in zip(oldDtype, xrange(len(oldDtype) )):
if 'O' in thisOldType[1]:
newDtype[i] = (thisOldType[0], 'S12')
matched_arr = np.array(matched_arr,dtype=newDtype)
return matched_arr
def GetFromDB( band='i', depth = 0.0,tables =['sva1v2','sva1v3_2']): # tables =['sva1v2','sva1v3','sva1v3_2']
depthfile = '../sva1_gold_1.0.2-4_nside4096_nest_i_auto_weights.fits'
cur = desdb.connect()
q = "SELECT tilename, udecll, udecur, urall, uraur FROM coaddtile"
tileinfo = cur.quick(q, array=True)
tilestuff = {}
for i in range(len(tileinfo)):
tilestuff[ tileinfo[i]['tilename'] ] = tileinfo[i]
depthmap, nside = functions2.GetDepthMap(depthfile)
truths = []
sims = []
truthMatcheds = []
for tableName in tables:
q = TruthFields(band=band,table=tableName)
truth = cur.quick(q, array=True)
truth = removeBadTilesFromTruthCatalog(truth)
truth = functions2.ValidDepth(depthmap, nside, truth, depth = depth)
truth = functions2.RemoveTileOverlap(tilestuff, truth)
unique_binds, unique_inds = np.unique(truth['balrog_index'],return_index=True)
truth = truth[unique_inds]
q = SimFields(band=band, table=tableName)
sim = cur.quick(q, array=True)
sim = cleanCatalog(sim,tag='mag_auto')
unique_binds, unique_inds = np.unique(sim['balrog_index'],return_index=True)
sim = sim[unique_inds]
truthMatched = mergeCatalogsUsingPandas(sim=sim,truth=truth)
sim = sim[np.in1d(sim['balrog_index'],truthMatched['balrog_index'])]
sim.sort(order='balrog_index')
truthMatched.sort(order='balrog_index')
truthMatcheds.append(truthMatched)
truths.append(truth)
sims.append(sim)
sim = np.hstack(sims)
truth = np.hstack(truths)
truthMatched = np.hstack(truthMatcheds)
des = GetDESCat(depthmap, nside, tilestuff, sim, band=band,depth = depth)
des = cleanCatalog(des, tag='mag_auto')
return des, sim, truthMatched, truth, tileinfo
def getSingleFilterCatalogs(reload=False,band='i'):
# Check to see whether the catalog files exist. If they do, then
# use the files. If at least one does not, then get what we need
# from the database
fileNames = ['desCatalogFile-'+band+'.fits','BalrogObsFile-'+band+'.fits',
'BalrogTruthFile-'+band+'.fits', 'BalrogTruthMatchedFile-'+band+'.fits',
'BalrogTileInfo.fits']
exists = True
for thisFile in fileNames:
print "Checking for existence of: "+thisFile
if not os.path.isfile(thisFile): exists = False
if exists and not reload:
desCat = esutil.io.read(fileNames[0])
BalrogObs = esutil.io.read(fileNames[1])
BalrogTruth = esutil.io.read(fileNames[2])
BalrogTruthMatched = esutil.io.read(fileNames[3])
BalrogTileInfo = esutil.io.read(fileNames[4])
else:
print "Cannot find files, or have been asked to reload. Getting data from DESDB."
desCat, BalrogObs, BalrogTruthMatched, BalrogTruth, BalrogTileInfo = GetFromDB(band=band)
esutil.io.write( fileNames[0], desCat , clobber=True)
esutil.io.write( fileNames[1], BalrogObs , clobber=True)
esutil.io.write( fileNames[2], BalrogTruth , clobber=True)
esutil.io.write( fileNames[3], BalrogTruthMatched , clobber=True)
esutil.io.write( fileNames[4], BalrogTileInfo, clobber=True)
return desCat, BalrogObs, BalrogTruthMatched, BalrogTruth, BalrogTileInfo
def modestify(data, band='i'):
modest = np.zeros(len(data), dtype=np.int32)
galcut = (data['flags_%s'%(band)] <=3) & -( ((data['class_star_%s'%(band)] > 0.3) & (data['mag_auto_%s'%(band)] < 18.0)) | ((data['spread_model_%s'%(band)] + 3*data['spreaderr_model_%s'%(band)]) < 0.003) | ((data['mag_psf_%s'%(band)] > 30.0) & (data['mag_auto_%s'%(band)] < 21.0)))
modest[galcut] = 1
starcut = (data['flags_%s'%(band)] <=3) & ((data['class_star_%s'%(band)] > 0.3) & (data['mag_auto_%s'%(band)] < 18.0) & (data['mag_psf_%s'%(band)] < 30.0) | (((data['spread_model_%s'%(band)] + 3*data['spreaderr_model_%s'%(band)]) < 0.003) & ((data['spread_model_%s'%(band)] +3*data['spreaderr_model_%s'%(band)]) > -0.003)))
modest[starcut] = 3
neither = -(galcut | starcut)
modest[neither] = 5
data = rf.append_fields(data, 'modtype_%s'%(band), modest)
print len(data), np.sum(galcut), np.sum(starcut), np.sum(neither)
return data
def getMultiBandCatalogs(reload=False, band1 = 'g', band2 = 'i'):
des1, balrogObs1, balrogTruthMatched1, balrogTruth1, balrogTileInfo = getSingleFilterCatalogs(reload=reload, band=band1)
des2, balrogObs2, balrogTruthMatched2, balrogTruth2, _ = getSingleFilterCatalogs(reload=reload, band=band2)
# Now merge these across filters.
des = mergeCatalogsUsingPandas(des1, des2, key='coadd_objects_id', suffixes = ['_'+band1,'_'+band2])
balrogObs = mergeCatalogsUsingPandas(balrogObs1, balrogObs2, key='balrog_index', suffixes = ['_'+band1,'_'+band2])
balrogTruthMatched = mergeCatalogsUsingPandas(balrogTruthMatched1, balrogTruthMatched2, key='balrog_index', suffixes = ['_'+band1,'_'+band2])
balrogTruth = mergeCatalogsUsingPandas(balrogTruth1, balrogTruth2, key='balrog_index', suffixes = ['_'+band1,'_'+band2])
des = modestify(des, band=band1)
des = modestify(des, band=band2)
balrogObs = modestify(balrogObs,band=band1)
balrogObs = modestify(balrogObs,band=band2)
balrogTruthMatched = modestify(balrogTruthMatched,band=band1)
balrogTruthMatched = modestify(balrogTruthMatched,band=band2)
# Finally, add colors.
des = rf.append_fields(des, 'color_%s_%s'%(band1,band2), ( des['mag_auto_'+band1] - des['mag_auto_'+band2] ) )
balrogObs = rf.append_fields(balrogObs, 'color_%s_%s'%(band1,band2), ( balrogObs['mag_auto_'+band1] - balrogObs['mag_auto_'+band2] ) )
balrogTruthMatched = rf.append_fields(balrogTruthMatched, 'color_%s_%s'%(band1,band2), ( balrogTruthMatched['mag_auto_'+band1] - balrogTruthMatched['mag_auto_'+band2] ) )
balrogTruth = rf.append_fields(balrogTruth, 'color_%s_%s'%(band1,band2), ( balrogTruth['mag_'+band1] - balrogTruth['mag_'+band2] ) )
return des, balrogObs, balrogTruthMatched, balrogTruth, balrogTileInfo
def hpHEALPixelToRaDec(pixel, nside=4096, nest=True):
theta, phi = hp.pix2ang(nside, pixel, nest=nest)
ra, dec = convertThetaPhiToRaDec(theta, phi)
return ra, dec
def hpRaDecToHEALPixel(ra, dec, nside= 4096, nest= True):
phi = ra * np.pi / 180.0
theta = (90.0 - dec) * np.pi / 180.0
hpInd = hp.ang2pix(nside, theta, phi, nest= nest)
return hpInd
def getGoodRegionIndices(catalog=None, badHPInds=None, nside=4096,band='i'):
hpInd = hpRaDecToHEALPixel(catalog['ra_'+band], catalog['dec_'+band], nside=nside, nest= True)
keep = ~np.in1d(hpInd, badHPInds)
return keep
def excludeBadRegions(des,balrogObs, balrogTruthMatched, balrogTruth, band='i'):
eliMap = hp.read_map("sva1_gold_1.0.4_goodregions_04_equ_nest_4096.fits", nest=True)
nside = hp.npix2nside(eliMap.size)
maskIndices = np.arange(eliMap.size)
badIndices = maskIndices[eliMap == 1]
obsKeepIndices = getGoodRegionIndices(catalog=balrogObs, badHPInds=badIndices, nside=nside, band=band)
truthKeepIndices = getGoodRegionIndices(catalog=balrogTruth, badHPInds=badIndices, nside=nside,band=band)
desKeepIndices = getGoodRegionIndices(catalog=des, badHPInds=badIndices, nside=nside,band=band)
balrogObs = balrogObs[obsKeepIndices]
balrogTruthMatched = balrogTruthMatched[obsKeepIndices]
balrogTruth = balrogTruth[truthKeepIndices]
des = des[desKeepIndices]
return des,balrogObs, balrogTruthMatched, balrogTruth
def main(argv):
band1 = 'g'
band2 = 'r'
des, balrogObs, balrogTruthMatched, balrogTruth, balrogTileInfo = getCatalogs(reload=False, band1=band1,band2=band2)
des, balrogObs, balrogTruthMatched, balrogTruth = excludeBadRegions(des,balrogObs, balrogTruthMatched, balrogTruth,band=band2)
import MCMC
#truthcolumns = ['objtype_%s'%(band1), 'mag_%s'%(band1), 'mag_%s'%(band2)]
#truthbins = [np.arange(0.5,5,2.0), np.arange(17.5,27,0.5),np.arange(17.5,27,0.5)]
#measuredcolumns = ['modtype_%s'%(band1),'mag_auto_%s'%(band1), 'mag_auto_%s'%(band2)]
#measuredbins=[np.arange(0.5, 7, 2.0), np.arange(17.5,27,0.5), np.arange(17.5,27,0.5)]
truthcolumns = ['objtype_%s'%(band1), 'color_%s_%s'%(band1,band2), 'mag_%s'%(band2)]
truthbins = [np.arange(0.5,5,2.0), np.arange(-4,4,0.5),np.arange(17.5,27,0.5)]
measuredcolumns = ['modtype_%s'%(band1), 'color_%s_%s'%(band1,band2), 'mag_auto_%s'%(band2)]
measuredbins=[np.arange(0.5, 7, 2.0), np.arange(-4,4,0.25), np.arange(17.5,27,0.25)]
BalrogObject = MCMC.BalrogLikelihood(balrogTruth, balrogTruthMatched,
truthcolumns = truthcolumns,
truthbins = truthbins,
measuredcolumns= measuredcolumns,
measuredbins = measuredbins)
nWalkers = 2000
burnin = 1000
steps = 1000
ReconObject = MCMC.MCMCReconstruction(BalrogObject, des, MCMC.ObjectLogL,
truth=balrogTruth, nWalkers=nWalkers, reg=1.0e-10)
ReconObject.BurnIn(burnin)
ReconObject.Sample(steps)
print np.average(ReconObject.Sampler.acceptance_fraction)
fig = plt.figure(1,figsize=(14,7))
ax = fig.add_subplot(1,2, 1)
where = [0, None]
BalrogObject.PlotTruthHistogram1D(where=where, ax=ax, plotkwargs={'label':'BT-G', 'color':'Blue'})
BalrogObject.PlotMeasuredHistogram1D(where=where, ax=ax, plotkwargs={'label':'BO-G', 'color':'Red'})
ReconObject.PlotMeasuredHistogram1D(where=where, ax=ax, plotkwargs={'label':'DO-G', 'color':'Gray'})
ReconObject.PlotReconHistogram1D(where=where, ax=ax, plotkwargs={'label':'DR-G', 'color':'black', 'fmt':'o', 'markersize':3})
ax.legend(loc='best', ncol=2)
ax.set_yscale('log')
ax = fig.add_subplot(1,2, 2)
where = [1, None, 1]
BalrogObject.PlotTruthHistogram1D(where=where, ax=ax, plotkwargs={'label':'BT-G', 'color':'Blue'})
BalrogObject.PlotMeasuredHistogram1D(where=where, ax=ax, plotkwargs={'label':'BO-G', 'color':'Red'})
ReconObject.PlotMeasuredHistogram1D(where=where, ax=ax, plotkwargs={'label':'DO-G', 'color':'Gray'})
ReconObject.PlotReconHistogram1D(where=where, ax=ax, plotkwargs={'label':'DR-G', 'color':'black', 'fmt':'o', 'markersize':3})
ax.legend(loc='best', ncol=2)
ax.set_yscale('log')
fig.savefig("star-galaxy-magnitude-reconstruction")
plt.show(block=True)
fullRecon, fullReconErrs = ReconObject.GetReconstruction()
nBins = np.array([thing.size for thing in truthbins])-1
recon2d = np.reshape(fullRecon, nBins)
err2d = np.reshape(fullReconErrs, nBins)
stop
if __name__ == "__main__":
import pdb, traceback
try:
main(sys.argv)
except:
thingtype, value, tb = sys.exc_info()
traceback.print_exc()
pdb.post_mortem(tb)
|
mit
|
BiaDarkia/scikit-learn
|
examples/cluster/plot_kmeans_silhouette_analysis.py
|
6
|
5963
|
"""
===============================================================================
Selecting the number of clusters with silhouette analysis on KMeans clustering
===============================================================================
Silhouette analysis can be used to study the separation distance between the
resulting clusters. The silhouette plot displays a measure of how close each
point in one cluster is to points in the neighboring clusters and thus provides
a way to assess parameters like number of clusters visually. This measure has a
range of [-1, 1].
Silhouette coefficients (as these values are referred to as) near +1 indicate
that the sample is far away from the neighboring clusters. A value of 0
indicates that the sample is on or very close to the decision boundary between
two neighboring clusters and negative values indicate that those samples might
have been assigned to the wrong cluster.
In this example the silhouette analysis is used to choose an optimal value for
``n_clusters``. The silhouette plot shows that the ``n_clusters`` value of 3, 5
and 6 are a bad pick for the given data due to the presence of clusters with
below average silhouette scores and also due to wide fluctuations in the size
of the silhouette plots. Silhouette analysis is more ambivalent in deciding
between 2 and 4.
Also from the thickness of the silhouette plot the cluster size can be
visualized. The silhouette plot for cluster 0 when ``n_clusters`` is equal to
2, is bigger in size owing to the grouping of the 3 sub clusters into one big
cluster. However when the ``n_clusters`` is equal to 4, all the plots are more
or less of similar thickness and hence are of similar sizes as can be also
verified from the labelled scatter plot on the right.
"""
from __future__ import print_function
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
print(__doc__)
# Generating the sample data from make_blobs
# This particular setting has one distinct cluster and 3 clusters placed close
# together.
X, y = make_blobs(n_samples=500,
n_features=2,
centers=4,
cluster_std=1,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=1) # For reproducibility
range_n_clusters = [2, 3, 4, 5, 6]
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.nipy_spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.nipy_spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors, edgecolor='k')
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1], marker='o',
c="white", alpha=1, s=200, edgecolor='k')
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1,
s=50, edgecolor='k')
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
|
bsd-3-clause
|
kgullikson88/Chiron-Scripts
|
Smooth.py
|
1
|
13289
|
import numpy as np
import FittingUtilities
import HelperFunctions
import matplotlib.pyplot as plt
import sys
import os
from astropy import units
import DataStructures
from scipy.interpolate import InterpolatedUnivariateSpline as interp
from scipy.interpolate import UnivariateSpline as smooth
import MakeModel
import HelperFunctions
from collections import Counter
from sklearn.gaussian_process import GaussianProcess
from sklearn import cross_validation
from scipy.stats import gmean
from astropy.io import fits, ascii
def SmoothData(order, windowsize=91, smoothorder=5, lowreject=3, highreject=3, numiters=10, expand=0, normalize=True):
denoised = HelperFunctions.Denoise(order.copy())
denoised.y = FittingUtilities.Iterative_SV(denoised.y, windowsize, smoothorder, lowreject=lowreject, highreject=highreject, numiters=numiters, expand=expand)
if normalize:
denoised.y /= denoised.y.max()
return denoised
def roundodd(num):
rounded = round(num)
if rounded%2 != 0:
return rounded
else:
if rounded > num:
return rounded - 1
else:
return rounded + 1
def cost(data, prediction, scale = 1, dx=1):
retval = np.sum((prediction - data)**2/scale**2)/float(prediction.size)
#retval = gmean(data/prediction) / np.mean(data/prediction)
#idx = np.argmax(abs(data - prediction))
#std = np.std(data - prediction)
#retval = abs(data[idx] - prediction[idx]) / std
#retval = np.std(data/(prediction/prediction.sum())) / scale
#retval = np.std(data - prediction)/np.mean(scale)
return retval# + 1e-10*np.mean(np.gradient(np.gradient(prediction, dx), dx)**2)
def OptimalSmooth(order, normalize=True):
"""
Determine the best window size with cross-validation
"""
#Flatten the spectrum
order.y /= order.cont/order.cont.mean()
order.err /= order.cont/order.cont.mean()
#Remove outliers (telluric residuals)
smoothed = SmoothData(order, windowsize=41, normalize=False)
temp = smoothed.copy()
temp.y = order.y/smoothed.y
temp.cont = FittingUtilities.Continuum(temp.x, temp.y, lowreject=2, highreject=2, fitorder=3)
outliers = HelperFunctions.FindOutliers(temp, numsiglow=6, numsighigh=6, expand=10)
data = order.copy()
if len(outliers) > 0:
#order.y[outliers] = order.cont[outliers]
order.y[outliers] = smoothed.y[outliers]
order.err[outliers] = 9e9
#Make cross-validation sets
inp = np.transpose((order.x, order.err, order.cont))
X_train, X_test, y_train, y_test = cross_validation.train_test_split(inp, order.y, test_size=0.2)
X_train = X_train.transpose()
X_test = X_test.transpose()
sorter_train = np.argsort(X_train[0])
sorter_test = np.argsort(X_test[0])
training = DataStructures.xypoint(x=X_train[0][sorter_train], y=y_train[sorter_train], err=X_train[1][sorter_train], cont=X_train[2][sorter_train])
validation = DataStructures.xypoint(x=X_test[0][sorter_test], y=y_test[sorter_test], err=X_test[1][sorter_test], cont=X_test[2][sorter_test])
"""
#Try each smoothing parameter
s_array = np.logspace(-3, 1, 100)
chisq = []
for s in s_array:
fcn = smooth(training.x, training.y, w=1.0/training.err, s=s)
prediction = fcn(validation.x)
chisq.append(cost(validation.y, prediction, validation.err))
print s, chisq[-1]
idx = np.argmin(np.array(chisq) - 1.0)
s = s_array[idx]
"""
s = 0.9*order.size()
smoothed = order.copy()
fcn = smooth(smoothed.x, smoothed.y, w=1.0/smoothed.err, s=s)
smoothed.y = fcn(smoothed.x)
plt.plot(order.x, order.y)
plt.plot(smoothed.x, smoothed.y)
plt.show()
return smoothed, s
def CrossValidation(order, smoothorder=5, lowreject=3, highreject=3, numiters=10, normalize=True):
"""
Determine the best window size with cross-validation
"""
#order = HelperFunctions.Denoise(order.copy())
order.y /= order.cont/order.cont.mean()
#plt.plot(order.x, order.y)
# First, find outliers by doing a guess smooth
smoothed = SmoothData(order, windowsize=41, normalize=False)
temp = smoothed.copy()
temp.y = order.y/smoothed.y
temp.cont = FittingUtilities.Continuum(temp.x, temp.y, lowreject=2, highreject=2, fitorder=3)
outliers = HelperFunctions.FindOutliers(temp, numsiglow=6, numsighigh=6, expand=10)
data = order.copy()
if len(outliers) > 0:
#order.y[outliers] = order.cont[outliers]
order.y[outliers] = smoothed.y[outliers]
#plt.plot(order.x, order.y)
#plt.plot(order.x, order.cont)
#plt.show()
#plt.plot(order.x, order.y)
#plt.plot(denoised.x, denoised.y)
#plt.show()
# First, split the data into a training sample and validation sample
# Use every 10th point for the validation sample
cv_indices = range(6, order.size()-1, 6)
training = DataStructures.xypoint(size=order.size()-len(cv_indices))
validation = DataStructures.xypoint(size=len(cv_indices))
cv_idx = 0
tr_idx = 0
for i in range(order.size()):
if i in cv_indices:
validation.x[cv_idx] = order.x[i]
validation.y[cv_idx] = order.y[i]
validation.cont[cv_idx] = order.cont[i]
validation.err[cv_idx] = order.err[i]
cv_idx += 1
else:
training.x[tr_idx] = order.x[i]
training.y[tr_idx] = order.y[i]
training.cont[tr_idx] = order.cont[i]
training.err[tr_idx] = order.err[i]
tr_idx += 1
#Rebin the training set to constant wavelength spacing
xgrid = np.linspace(training.x[0], training.x[-1], training.size())
training = FittingUtilities.RebinData(training, xgrid)
dx = training.x[1] - training.x[0]
size = 40
left = xgrid.size/2 - size
right = left + size*2
func = np.poly1d(np.polyfit(training.x[left:right]-training.x[left+size], training.y[left:right], 5))
sig = np.std(training.y[left:right] - func(training.x[left:right]-training.x[left+size]))
sig = validation.err*0.8
#print "New std = ", sig
#plt.figure(3)
#plt.plot(training.x[left:right], training.y[left:right])
#plt.plot(training.x[left:right], func(training.x[left:right]))
#plt.show()
#plt.figure(1)
#Find the rough location of the best window size
windowsizes = np.logspace(-1.3, 0.5, num=20)
chisq = []
skip = 0
for i, windowsize in enumerate(windowsizes):
npixels = roundodd(windowsize/dx)
if npixels < 6:
skip += 1
continue
if npixels > training.size:
windowsizes = windowsizes[:i]
break
smoothed = FittingUtilities.Iterative_SV(training.y.copy(), npixels, smoothorder, lowreject, highreject, numiters)
smooth_fcn = interp(training.x, smoothed)
predict = smooth_fcn(validation.x)
#sig = validation.err
#chisq.append(cost(training.y, smoothed, training.err))
chisq.append(cost(validation.y, predict, sig, validation.x[1] - validation.x[0]))
#chisq.append(np.sum((predict - validation.y)**2/sig**2)/float(predict.size))
#sig = np.std(smoothed / training.y)
#chisq.append(np.std(predict/validation.y) / sig)
print "\t", windowsize, chisq[-1]
#plt.loglog(windowsizes, chisq)
#plt.show()
windowsizes = windowsizes[skip:]
chisq = np.array(chisq)
idx = np.argmin(abs(chisq-1.0))
sorter = np.argsort(chisq)
chisq = chisq[sorter]
windowsizes = windowsizes[sorter]
left, right = HelperFunctions.GetSurrounding(chisq, 1, return_index=True)
if left > right:
temp = left
left = right
right = temp
print windowsizes[left], windowsizes[right]
#Refine the window size to get more accurate
windowsizes = np.logspace(np.log10(windowsizes[left]), np.log10(windowsizes[right]), num=10)
chisq = []
for i, windowsize in enumerate(windowsizes):
npixels = roundodd(windowsize/dx)
if npixels > training.size:
windowsizes = windowsizes[:i]
break
smoothed = FittingUtilities.Iterative_SV(training.y.copy(), npixels, smoothorder, lowreject, highreject, numiters)
smooth_fcn = interp(training.x, smoothed)
predict = smooth_fcn(validation.x)
#sig = validation.err
#chisq.append(cost(training.y, smoothed, training.err))
chisq.append(cost(validation.y, predict, sig, validation.x[1] - validation.x[0]))
#chisq.append(np.sum((predict - validation.y)**2/sig**2)/float(predict.size))
#sig = np.std(smoothed / training.y)
#chisq.append(np.std(predict/validation.y) / sig)
print "\t", windowsize, chisq[-1]
chisq = np.array(chisq)
idx = np.argmin(abs(chisq-1.0))
windowsize = windowsizes[idx]
npixels = roundodd(windowsize/dx)
smoothed = order.copy()
smoothed.y = FittingUtilities.Iterative_SV(order.y, npixels, smoothorder, lowreject, highreject, numiters)
#plt.plot(data.x, data.y)
#plt.plot(smoothed.x, smoothed.y)
#plt.show()
if normalize:
smoothed.y /= smoothed.y.max()
return smoothed, windowsize
def GPSmooth(data, low=0.1, high=10, debug=False):
"""
This will smooth the data using Gaussian processes. It will find the best
smoothing parameter via cross-validation to be between the low and high.
The low and high keywords are reasonable bounds for A and B stars with
vsini > 100 km/s.
"""
smoothed = data.copy()
# First, find outliers by doing a guess smooth
smoothed = SmoothData(data, normalize=False)
temp = smoothed.copy()
temp.y = data.y/smoothed.y
temp.cont = FittingUtilities.Continuum(temp.x, temp.y, lowreject=2, highreject=2, fitorder=3)
outliers = HelperFunctions.FindOutliers(temp, numsiglow=3, expand=5)
if len(outliers) > 0:
data.y[outliers] = smoothed.y[outliers]
gp = GaussianProcess(corr='squared_exponential',
theta0 = np.sqrt(low*high),
thetaL = low,
thetaU = high,
normalize = False,
nugget = (data.err / data.y)**2,
random_start=1)
try:
gp.fit(data.x[:,None], data.y)
except ValueError:
#On some orders with large telluric residuals, this will fail.
# Just fall back to the old smoothing method in that case.
return SmoothData(data), 91
if debug:
print "\tSmoothing parameter theta = ", gp.theta_
smoothed.y, smoothed.err = gp.predict(data.x[:,None], eval_MSE=True)
return smoothed, gp.theta_[0][0]
if __name__ == "__main__":
fileList = []
plot = False
vsini_file = "%s/School/Research/Useful_Datafiles/Vsini.csv" %(os.environ["HOME"])
vsini_skip = 10
vsini_idx = 1
for arg in sys.argv[1:]:
if "-p" in arg:
plot = True
elif "-vsinifile" in arg:
vsini_file = arg.split("=")[-1]
elif "-vsiniskip" in arg:
vsini_skip = int(arg.split("=")[-1])
elif "-vsiniidx" in arg:
vsini_idx = int(arg.split("=")[-1])
else:
fileList.append(arg)
#Read in the vsini table
vsini_data = ascii.read(vsini_file)[vsini_skip:]
if len(fileList) == 0:
fileList = [f for f in os.listdir("./") if f.endswith("telluric_corrected.fits")]
for fname in fileList:
orders = HelperFunctions.ReadFits(fname, extensions=True, x="wavelength", y="flux", cont="continuum", errors="error")
#Find the vsini of this star
header = fits.getheader(fname)
starname = header["object"]
for data in vsini_data:
if data[0] == starname:
vsini = abs(float(data[vsini_idx]))
break
else:
sys.exit("Cannot find %s in the vsini data: %s" %(starname, vsini_file))
print starname, vsini
#Begin looping over the orders
column_list = []
header_list = []
for i, order in enumerate(orders):
print "Smoothing order %i/%i" %(i+1, len(orders))
#Fix errors
order.err[order.err > 1e8] = np.sqrt(order.y[order.err > 1e8])
#Linearize
xgrid = np.linspace(order.x[0], order.x[-1], order.x.size)
order = FittingUtilities.RebinData(order, xgrid)
dx = order.x[1] - order.x[0]
smooth_factor = 0.8
theta = max(21, roundodd(vsini/3e5 * order.x.mean()/dx * smooth_factor))
denoised = SmoothData(order,
windowsize=theta,
smoothorder=3,
lowreject=3,
highreject=3,
expand=10,
numiters=10)
#denoised, theta = GPSmooth(order.copy())
#denoised, theta = CrossValidation(order.copy(), 5, 2, 2, 10)
#denoised, theta = OptimalSmooth(order.copy())
#denoised.y *= order.cont/order.cont.mean()
print "Window size = %.4f nm" %theta
column = {"wavelength": denoised.x,
"flux": order.y / denoised.y,
"continuum": denoised.cont,
"error": denoised.err}
header_list.append((("Smoother", theta, "Smoothing Parameter"),))
column_list.append(column)
if plot:
plt.figure(1)
plt.plot(order.x, order.y/order.y.mean())
plt.plot(denoised.x, denoised.y/denoised.y.mean())
plt.title(starname)
plt.figure(2)
plt.plot(order.x, order.y/denoised.y)
plt.title(starname)
#plt.plot(order.x, (order.y-denoised.y)/np.median(order.y))
#plt.show()
if plot:
plt.show()
outfilename = "%s_smoothed.fits" %(fname.split(".fits")[0])
print "Outputting to %s" %outfilename
HelperFunctions.OutputFitsFileExtensions(column_list, fname, outfilename, mode='new', headers_info=header_list)
|
gpl-3.0
|
harshk360/yantra_shiksha
|
hw5/nips_em.py
|
1
|
4672
|
import math, random, copy
import numpy as np
import sys
from scipy import misc
from scipy.spatial import distance
from scipy.cluster.vq import vq, kmeans, whiten
from numpy.linalg import inv
import math
import matplotlib.pyplot as plt
def expectation_maximization(x, nbclusters, nbiter=30, epsilon=0.0001):
def closest_cluster(words_for_doc, mus, nbclusters):
distances = np.zeros((nbclusters,))
for j in xrange(nbclusters):
distances[j] = distance.euclidean(words_for_doc, mus[j])
return np.argmin(distances)
def calculate_q(x, nbclusters, mus, pies, w):
sigma = 0
for j in xrange(nbclusters):
inner_prod = x*np.log(mus[j])
sum = inner_prod + math.log(pies[j])
sigma += sum*w[:,j,np.newaxis]
return np.sum(sigma)
#E step, compute w_i,j
#vector of pies - pies - [j]
#vector of mus - mus - [j,k]
#x - [i,k]
#w should be [i,j]
#logA - [i,j]
#logA_max - [i,1]
#logY - log(w_ij)
def e_step(x, nbclusters, mus, pies):
logA = np.zeros((x.shape[0], nbclusters))
for j in xrange(nbclusters):
sigma = x*np.log(mus[j])
logA[:,j] = np.log(pies[j]) + np.sum(sigma, axis=1)
logA_max = np.zeros((x.shape[0],))
logA.max(axis=1, out=logA_max)
sum=0
for j in xrange(nbclusters):
sum += np.exp(logA[:,j] - logA_max)
term3 = np.log(sum)
logY = np.zeros((x.shape[0], nbclusters))
for j in xrange(nbclusters):
logY[:,j] = logA[:,j] - logA_max - term3
y = np.exp(logY)
w = y
return w
def m_step(x, w, nbclusters):
new_mus = np.zeros((nbclusters, x.shape[1]))
new_pies = np.zeros((nbclusters))
for j in xrange(nbclusters):
den = np.sum(np.sum(x, axis=1)*w[:,j])
num = np.sum(x*w[:,j,np.newaxis], axis=0)
new_mus[j] = num/den
new_pies[j] = np.sum(w[:,j])/1500
new_new_mus = np.zeros((nbclusters, x.shape[1]))
for j in xrange(nbclusters):
new_new_mus[j] = (new_mus[j]+.0001)/(np.sum(new_mus[j])+new_mus.shape[1]/10000)
return new_new_mus, new_pies
#USE K-MEANS TO GET INITIAL CENTERS
centroids, distortion = kmeans(x,k_or_guess=nbclusters, iter=5)
#normalizes ps ie mus
mus = np.zeros((nbclusters, centroids.shape[1]))
for j in xrange(nbclusters):
mus[j] = (centroids[j]+.0001)/(np.sum(centroids[j])+centroids.shape[1]/10000)
pies = np.full((nbclusters), 1.0/nbclusters)
iter = 0
difference = 10000
old_q = 0
q = 0
while iter < 30 and difference > epsilon:
iter += 1
print "running iteration " + str(iter)
w = e_step(x, nbclusters, mus, pies)
mus, pies = m_step(x, w, nbclusters)
old_q = q
q = calculate_q(x, nbclusters, mus, pies, w)
difference = abs(q-old_q)/abs(q)
print "Difference in quality is " + str(difference)
result = {}
result['clusters'] = {}
result['params'] = {}
for i in xrange(nbclusters):
result['params'][i] = {}
result['params'][i]['pi'] = pies[i]
result['params'][i]['mu'] = mus[i]
for index, words_for_doc in enumerate(x):
cluster = closest_cluster(words_for_doc, mus, nbclusters)
if cluster not in result['clusters']:
result['clusters'][cluster]=[]
result['clusters'][cluster].append(index)
#find top 10 words for each cluster
print ""
print "top 10 words for each cluster"
data = [line.strip() for line in open("vocab.nips.txt", 'r')]
for i in xrange(nbclusters):
top10 = result['params'][i]['mu'].argsort()[-10:][::-1]
top10_words = [data[index] for index in top10]
print top10_words
return result
#-----------------------------------------------------------------------
num_clusters = 30
data = np.loadtxt('docword.nips.txt', skiprows=3)
#k is total number of words
k = np.max(data[:,1])
i = 1500
#x - [i,k]
x = np.zeros((i,k))
for observation in data:
x[observation[0]-1][observation[1]-1] = observation[2]
# for vector in x:
# for word in vector:
# word += 1
print x
sdsdsd
result = expectation_maximization(x, num_clusters)
new_pies = np.zeros(num_clusters)
for i in xrange(num_clusters):
new_pies[i]=result['params'][i]['pi']
x_s = np.zeros(30)
for i in xrange(30):
x_s[i] =i
fig, ax= plt.subplots()
ax.set_xlim([0,30])
ax.set_ylim([0,np.max(new_pies)+.01])
plt.plot(new_pies)
plt.scatter(x_s,new_pies,s=100)
plt.ylabel('Probablities')
#plt.yticks(.01)
plt.xlabel('Topics')
plt.show()
|
gpl-3.0
|
cpatrickalves/simprev
|
util/graficos.py
|
1
|
8682
|
# -*- coding: utf-8 -*-
"""
@author: Patrick Alves
"""
from matplotlib import pyplot as plt
import numpy as np
import os
##### Modulo que cria gráficos com os resultados
# Diretório onde as figuras serão salvas
fig_dir = os.path.join('resultados', 'figuras')
if not os.path.isdir(fig_dir):
os.makedirs(fig_dir)
def plot_erros_LDO2018(resultados, savefig=False, showfig=True):
x = resultados['erro_despesas'].index #2014-2060
plt.plot(resultados['erro_despesas'], label='Despesa',lw=4, ls='--' )
plt.plot(resultados['erro_receitas'], label='Receita', lw=4)
plt.plot(resultados['erro_PIB'], label='PIB', lw=4, ls=':')
plt.grid(True)
plt.xlabel('ANO')
plt.ylabel('VARIAÇÃO (%)')
plt.title('Variação da Projeção do SimPrev com relação a LDO', y=1.05)
plt.xticks(np.arange(min(x)+6, max(x)+1, 5.0))
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.15), ncol=3)
if savefig: plt.savefig(os.path.join(fig_dir, 'variacao_simprev_LDO.png'), dpi=300, format='png', bbox_inches='tight')
if showfig: plt.show()
plt.close()
plt.plot(resultados['despesas_PIB'], label='SimPrev',lw=4, ls='--' )
plt.plot(resultados['despesas_PIB_LDO'], label='LDO', lw=4)
plt.grid(True)
plt.xlabel('ANO')
plt.ylabel('DESPESA/PIB (%)')
plt.title('Variação da Projeção da Despesa/PIB do SimPrev com relação a LDO', y=1.05)
plt.xticks(np.arange(min(x)+1, max(x)+1, 5.0))
plt.legend(loc=0)
if savefig: plt.savefig(os.path.join(fig_dir, 'despesas_pib_simprev_LDO.png'), dpi=300, format='png')
if showfig: plt.show()
plt.close()
plt.plot(resultados['receitas_PIB'], label='SimPrev',lw=4, ls='--' )
plt.plot(resultados['receitas_PIB_LDO'], label='LDO', lw=4)
plt.grid(True)
plt.xlabel('ANO')
plt.ylabel('RECEITA/PIB(%)')
plt.title('Variação da Projeção da Receita/PIB do SimPrev com relação a LDO', y=1.05)
plt.xticks(np.arange(min(x)+1, max(x)+1, 5.0))
plt.legend(loc=0)
if savefig: plt.savefig(os.path.join(fig_dir, 'receitas_pib_simprev_LDO.png'), dpi=300, format='png')
if showfig: plt.show()
plt.close()
plt.plot(resultados['despesas_LDO'], label='LDO',lw=4, ls='--' )
plt.plot(resultados['despesas'], label='SimPrev', lw=4)
plt.grid(True)
plt.xlabel('ANO')
plt.ylabel('VALOR (R$)')
plt.title('Despesas do RGPS', y=1.05)
plt.xticks(np.arange(min(x)+1, max(x)+1, 5.0))
plt.legend(loc=0)
if savefig: plt.savefig(os.path.join(fig_dir, 'despesas_simprev_LDO.png'), dpi=300, format='png')
if showfig: plt.show()
plt.close()
plt.plot(resultados['receitas_LDO'], label='LDO',lw=4, ls='--' )
plt.plot(resultados['receitas'], label='SimPrev', lw=4)
plt.grid(True)
plt.xlabel('ANO')
plt.ylabel('VALOR (R$)')
plt.title('Receitas do RGPS', y=1.05)
plt.xticks(np.arange(min(x)+1, max(x)+1, 5.0))
plt.legend(loc=0)
if savefig: plt.savefig(os.path.join(fig_dir, 'receitas_simprev_LDO.png'), dpi=300, format='png')
if showfig: plt.show()
plt.close()
# Gráfico em barras com os erros comparando com o AEPS
index = np.arange(4)
bar_width = 0.35
plt.bar(index, resultados['erros_AEPS'].loc[2014, :], bar_width, label='2014', color='b')
plt.bar(index + bar_width, resultados['erros_AEPS'].loc[2015, :], bar_width, label='2015', color='r')
plt.grid(True)
plt.ylabel('ERRO (%)')
plt.title('Erros com relação ao AEPS', y=1.05)
plt.xticks(index + bar_width, ('Receitas', 'Despesas', 'Aposentadorias', 'Pensões'))# plt.xticks([2014, 2015])
plt.legend(loc=0)
plt.tight_layout()
if savefig: plt.savefig(os.path.join(fig_dir, 'erros_aeps.png'), dpi=300, format='png')
if showfig: plt.show()
plt.close()
def plot_resultados(resultados, savefig=False, showfig=True):
x = resultados['despesas'].index #2014-2060
# Receitas e Despesas
plt.plot(resultados['receitas'], label='Receitas',lw=4)
plt.plot(resultados['despesas'], label='Despesas', lw=4)
plt.grid(True)
plt.xlabel('ANO')
plt.ylabel('VALOR (R$)')
plt.title('Receitas e Despesas do RGPS', y=1.05)
plt.set_cmap('jet')
plt.xticks(np.arange(min(x)+1, max(x)+1, 5.0))
plt.legend(loc=0)
if savefig: plt.savefig(os.path.join(fig_dir, 'receitas_despesa_simprev.png'), dpi=300, format='png')
if showfig: plt.show()
plt.close()
# Receitas e Despesas pelo PIB
plt.plot(resultados['receitas_PIB'], label='Receitas/PIB',lw=4, ls='--', color='b' )
plt.plot(resultados['despesas_PIB'], label='Despesas/PIB', lw=4, color='r')
plt.grid(True)
plt.xlabel('ANO')
plt.ylabel('RECEITA E DESPESA / PIB (%)')
plt.title('Receitas e Despesas do RGPS sobre o PIB', y=1.05)
plt.xticks(np.arange(min(x)+1, max(x)+1, 5.0))
plt.legend(loc=0)
if savefig: plt.savefig(os.path.join(fig_dir, 'receita_despesa_pib_simprev.png'), dpi=300, format='png')
if showfig: plt.show()
plt.close()
# Resultado Financeiro
plt.plot(resultados['resultado_financeiro'], lw=4 )
plt.grid(True)
plt.xlabel('ANO')
plt.ylabel('RESULTADO FINANCEIRO (R$)')
plt.title('Resultado Financeiro do RGPS', y=1.05)
plt.xticks(np.arange(min(x)+1, max(x)+1, 5.0))
if savefig: plt.savefig(os.path.join(fig_dir, 'resultado_financeiro_simprev.png'), dpi=300, format='png')
if showfig: plt.show()
plt.close()
# Resultado Financeiro pelo PIB
plt.plot(resultados['resultado_financeiro_PIB'],lw=4 )
plt.grid(True)
plt.xlabel('ANO')
plt.ylabel('RESULTADO FINANCEIRO/PIB (%)')
plt.title('Necessidade de Financiamento do RGPS pelo PIB', y=1.05)
plt.xticks(np.arange(min(x)+1, max(x)+1, 5.0))
if savefig: plt.savefig(os.path.join(fig_dir, 'resultado_financeiro_pib_simprev.png'), dpi=300, format='png')
if showfig: plt.show()
plt.close()
# Quantidades de contribuintes e beneficiários
plt.plot(resultados['contribuintes'],lw=4, label='Contribuintes', color='b', ls='--' )
plt.plot(resultados['beneficiarios'],lw=4, label='Beneficiários', color='r')
plt.grid(True)
plt.xlabel('ANO')
plt.ylabel('QUANTIDADE')
plt.title('Quantidade de Contribuintes e Beneficiários', y=1.05)
plt.xticks(np.arange(min(x)+1, max(x)+1, 5.0))
plt.legend(loc=0)
if savefig: plt.savefig(os.path.join(fig_dir, 'contribuintes_beneficiarios_simprev.png'), dpi=300, format='png')
if showfig: plt.show()
plt.close()
# Salário médio e valor médio dos benefícios
plt.plot(resultados['salario_medio'],lw=4, label='Salário médio', color='b', ls='--' )
plt.plot(resultados['valor_medio_beneficios'],lw=4, label='Valor médio benefícios', color='r')
plt.grid(True)
plt.xlabel('ANO')
plt.ylabel('VALOR (R$)')
plt.title('Quantidade de Contribuintes e Beneficiários', y=1.05)
plt.xticks(np.arange(min(x)+1, max(x)+1, 5.0))
plt.legend(loc=0)
if savefig: plt.savefig(os.path.join(fig_dir, 'salarioMedio_valorBeneficio_simprev.png'), dpi=300, format='png')
if showfig: plt.show()
plt.close()
# Razão de dependência previdenciária
plt.plot(resultados['RDP'],lw=4, color='b')
plt.grid(True)
plt.xlabel('ANO')
plt.ylabel('BENEFICIÁRIOS/CONTRIBUINTES')
plt.title('Razão de dependência previdenciária', y=1.05)
plt.xticks(np.arange(min(x)+1, max(x)+1, 5.0))
if savefig: plt.savefig(os.path.join(fig_dir, 'rdp_simprev.png'), dpi=300, format='png')
if showfig: plt.show()
plt.close()
#Taxa de reposição
plt.plot(resultados['taxa_reposicao'], lw=4, color='r')
plt.grid(True)
plt.xlabel('ANO')
plt.ylabel('TAXA DE REPOSIÇÃO')
plt.title('Taxa de Reposição Média', y=1.05)
plt.xticks(np.arange(min(x)+1, max(x)+1, 5.0))
if savefig: plt.savefig(os.path.join(fig_dir, 'taxa_reposicao_simprev.png'), dpi=300, format='png')
if showfig: plt.show()
plt.close()
# Indicador sintético da sustentabilidade
plt.plot(resultados['ISS'], lw=4, color='r')
plt.grid(True)
plt.xlabel('ANO')
plt.ylabel('INDICADOR DE SUSTENTABILIDADE')
plt.title('Indicador sintético da sustentabilidade', y=1.05)
plt.xticks(np.arange(min(x)+1, max(x)+1, 5.0))
if savefig: plt.savefig(os.path.join(fig_dir, 'iss_simprev.png'), dpi=300, format='png')
if showfig: plt.show()
plt.close()
|
gpl-3.0
|
isomerase/mozziesniff
|
temperature_interpolator_testing.py
|
2
|
3181
|
import numpy as np
from scipy.interpolate import Rbf
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# generate observations
left = -2.
right = 3.
# floor = 0.
# ceiling = 1.
bottom = -1.
top = 2.
resolution = 0.7
unique_xs = np.arange(bottom, top, resolution)
unique_ys = np.arange(left, right, resolution)
coords = (unique_xs, unique_ys)
coords_mesh = np.meshgrid(*coords, indexing="ij")
fn_value = np.power(coords_mesh[0], 2) + coords_mesh[1] * coords_mesh[2] # F(x, y, z)
coords_array = np.vstack([x.flatten() for x in coords_mesh]).T # Columns are x, y, z
x, y, z = [x.flatten() for x in coords_mesh]
d = fn_value.flatten()
# # xx, yy = np.meshgrid(x, y, indexing='ij')
# df_dict = dict()
# df_dict['x'] = np.concatenate((xx.ravel(), xx.ravel()))
# df_dict['y'] = np.concatenate((yy.ravel(), yy.ravel()))
observations = pd.DataFrame(data=df_dict)
# make some places hot
temp = 19.
observations['avg_temp'] = np.array([temp] * len(observations)) # make it the same temperature everywhere
# observations.loc[((observations['x'] > -.5) & (observations['x'] < 0.5) & (observations['y'] > -.2) \
# & (observations['y'] < 0.6)), 'avg_temp'] = 50.
# slant
# observations.loc[((observations['x'] > 0) &
# (observations['x'] < top) &
# (observations['y'] > 0) &
# (observations['y'] < right)),
# 'avg_temp'] = observations.loc[((observations['x'] > 0) & (observations['x'] < top) & (observations['y'] > 0) & (observations['y'] < right)), 'x'] * 50 +\
# observations.loc[((observations['x'] > 0) & (observations['x'] < top) & (observations['y'] > 0) & (observations['y'] < right)), 'y'] / 50
# tt = np.reshape(observations.avg_temp, np.shape(xx))
# ### gauss
# tt = plt.mlab.bivariate_normal(xx, yy)
# observations['avg_temp'] = np.ravel(tt)
# tt = np.reshape(observations.avg_temp, np.shape(xx))
# make interpolator
rbfi = Rbf(observations.x.values, observations.y.values, observations.avg_temp.values),
# function='cubic')
# # define positions to interpolate at
# xi = np.linspace(bottom/2, top/2, 10) # xmin * .8
# yi = np.linspace(left/2, right/2, 10)
# xxi, yyi = np.meshgrid(xi, yi, indexing='ij')
# xxi_flat = xxi.ravel()
# yyi_flat = yyi.ravel()
# interpolate
# interp_temps = rbfi(xxi_flat, yyi_flat)
# tti = interp_temps.reshape((len(xi), len(yi)))
interp_temps = rbfi(observations.x.values, observations.y.values)
tti = interp_temps.reshape(np.shape(xx))
print """
Interpolated temp stats
min {}
max {}
avg {}
""".format(interp_temps.min(), interp_temps.max(), interp_temps.mean())
fig = plt.figure()
ax = fig.gca(projection='3d')
# plt.scatter(xxi_flat, yyi_flat, c=interp_temps, cmap='inferno', lw=0)
# ax.plot_wireframe(xx, yy, tt)
ax.plot_wireframe(xx, yy, tti, color='green')
# plt.scatter(observations.x.values, observations.y.values, c=observations.avg_temp.values, marker='x')
plt.show()
# # save to df
# df_dict = dict()
# df_dict['x'] = xxi_flat
# df_dict['y'] = yyi_flat
# df_dict['avg_temp'] = interp_temps
# df = pd.DataFrame(df_dict)
|
mit
|
rohanp/scikit-learn
|
benchmarks/bench_20newsgroups.py
|
377
|
3555
|
from __future__ import print_function, division
from time import time
import argparse
import numpy as np
from sklearn.dummy import DummyClassifier
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.metrics import accuracy_score
from sklearn.utils.validation import check_array
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
ESTIMATORS = {
"dummy": DummyClassifier(),
"random_forest": RandomForestClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"extra_trees": ExtraTreesClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"logistic_regression": LogisticRegression(),
"naive_bayes": MultinomialNB(),
"adaboost": AdaBoostClassifier(n_estimators=10),
}
###############################################################################
# Data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--estimators', nargs="+", required=True,
choices=ESTIMATORS)
args = vars(parser.parse_args())
data_train = fetch_20newsgroups_vectorized(subset="train")
data_test = fetch_20newsgroups_vectorized(subset="test")
X_train = check_array(data_train.data, dtype=np.float32,
accept_sparse="csc")
X_test = check_array(data_test.data, dtype=np.float32, accept_sparse="csr")
y_train = data_train.target
y_test = data_test.target
print("20 newsgroups")
print("=============")
print("X_train.shape = {0}".format(X_train.shape))
print("X_train.format = {0}".format(X_train.format))
print("X_train.dtype = {0}".format(X_train.dtype))
print("X_train density = {0}"
"".format(X_train.nnz / np.product(X_train.shape)))
print("y_train {0}".format(y_train.shape))
print("X_test {0}".format(X_test.shape))
print("X_test.format = {0}".format(X_test.format))
print("X_test.dtype = {0}".format(X_test.dtype))
print("y_test {0}".format(y_test.shape))
print()
print("Classifier Training")
print("===================")
accuracy, train_time, test_time = {}, {}, {}
for name in sorted(args["estimators"]):
clf = ESTIMATORS[name]
try:
clf.set_params(random_state=0)
except (TypeError, ValueError):
pass
print("Training %s ... " % name, end="")
t0 = time()
clf.fit(X_train, y_train)
train_time[name] = time() - t0
t0 = time()
y_pred = clf.predict(X_test)
test_time[name] = time() - t0
accuracy[name] = accuracy_score(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print()
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time",
"Accuracy"))
print("-" * 44)
for name in sorted(accuracy, key=accuracy.get):
print("%s %s %s %s" % (name.ljust(16),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % accuracy[name]).center(10)))
print()
|
bsd-3-clause
|
Emptyset110/dHydra
|
dHydra/core/util.py
|
1
|
21041
|
# -*- coding: utf-8 -*-
"""
工具类
Created on 03/01/2016
@author: Wen Gu
@contact: emptyset110@gmail.com
"""
import requests
import asyncio
import math
import time
from datetime import datetime
import pandas
import os
import ntplib
from pandas import DataFrame
from pymongo import MongoClient
import re
import random
import json
import logging
def get_worker_names(logger=None):
"""
根据文件夹名字返回所有可能的worker_name
:return:
"""
worker_names = []
path = os.path.split(os.path.realpath(__file__))[0][:-5]+"/Worker"
worker_names.extend(os.listdir(path))
try:
worker_names.extend(os.listdir(os.getcwd()+"/Worker"))
except FileNotFoundError as e:
if logger is None:
print("dHydra运行目录下没有Worker文件夹")
else:
logger.warning("dHydra运行目录下没有Worker文件夹")
return worker_names
def camel_to_underscore(name):
if len(name) > 0:
name = name[0].lower()+name[1:len(name)]
name = re.sub(
r'(?P<value>[A-Z])', lambda x: '_'+x.group('value').lower(), name
)
return name
def get_logger(
logger_name="main",
log_path="log", #
console_log=True, # 屏幕打印日志开关,默认True
console_log_level=logging.INFO, # 屏幕打印日志的级别,默认为INFO
critical_log=False, # critical单独写文件日志,默认关闭
error_log=True, # error级别单独写文件日志,默认开启
warning_log=False, # warning级别单独写日志,默认关闭
info_log=True, # info级别单独写日志,默认开启
debug_log=False, # debug级别日志,默认关闭
):
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(message)s'
)
logger = logging.getLogger(logger_name)
logger.setLevel(logging.DEBUG)
if log_path:
# 补全文件夹
if log_path[-1] != '/':
log_path += '/'
if not logger.handlers:
# 屏幕日志打印设置
if console_log:
console_handler = logging.StreamHandler()
console_handler.setFormatter(formatter)
console_handler.setLevel(logging.INFO)
logger.addHandler(console_handler)
if not os.path.exists(log_path + logger_name):
os.makedirs(log_path + logger_name)
# 打开下面的输出到文件
if critical_log:
log_handler = logging.FileHandler(
log_path + logger_name + '/critical.log'
)
log_handler.setLevel(logging.CRITICAL)
log_handler.setFormatter(formatter)
logger.addHandler(log_handler)
if error_log:
log_handler = logging.FileHandler(
log_path + logger_name + '/error.log'
)
log_handler.setLevel(logging.ERROR)
log_handler.setFormatter(formatter)
logger.addHandler(log_handler)
if warning_log:
log_handler = logging.FileHandler(
log_path + logger_name + '/warning.log'
)
log_handler.setLevel(logging.WARNING)
log_handler.setFormatter(formatter)
logger.addHandler(log_handler)
if info_log:
log_handler = logging.FileHandler(
log_path + logger_name + '/info.log'
)
log_handler.setLevel(logging.INFO)
log_handler.setFormatter(formatter)
logger.addHandler(log_handler)
if debug_log:
log_handler = logging.FileHandler(
log_path + logger_name + '/debug.log'
)
log_handler.setLevel(logging.DEBUG)
log_handler.setFormatter(formatter)
logger.addHandler(log_handler)
return logger
def generate_token():
import hashlib
token = hashlib.sha1()
token.update(str(time.time()).encode())
token = token.hexdigest()
return token
def _code_to_symbol(code, index=False):
"""
生成symbol代码标志
@author: Jimmy Liu
@group : waditu
@contact: jimmysoa@sina.cn
@modified: Wen Gu
"""
# 股票指数与代码的转换表(无需修改)
INDEX_LABELS = ['sh', 'sz', 'hs300', 'sz50', 'cyb', 'zxb', 'zx300', 'zh500']
INDEX_LIST = {'sh': 'sh000001', 'sz': 'sz399001',
'hs300': 'sz399300',
'sz50': 'sh000016',
'zxb': 'sz399005',
'cyb': 'sz399006',
'zx300': 'sz399008',
'zh500': 'sh000905',
'HSCEI': 'sz110010',
'HSI': 'sz110000'
}
if code in INDEX_LIST.keys():
return INDEX_LIST[code]
else:
if len(code) != 6:
return ''
else:
if index is True:
return 'sh%s' % code if code[:1] in ['5', '6', '9', '0']\
else 'sz%s' % code
else:
return 'sh%s' % code if code[:1] in ['5', '6', '9']\
else 'sz%s' % code
def symbol_list_to_code(symbolList):
codeList = []
for symbol in symbolList:
codeList.append(symbol[2:8])
return codeList
def code_list_to_symbol(codeList, index=False):
symbolList = []
for code in codeList:
symbolList.append(_code_to_symbol(code, index=index))
return symbolList
def _get_public_ip():
return requests.get('http://ipinfo.io/ip').text.strip()
def get_client_ip():
while True:
try:
response = requests.get(
'https://ff.sinajs.cn/?_=%s&list=sys_clientip'
% int(time.time() * 1000)).text
ip = re.findall(r'\"(.*)\"', response)
break
except Exception as e:
try:
ip = _get_public_ip()
return ip
except:
pass
return ip[0]
# 用于将一个loop交给一个线程来完成一些任务
def thread_loop(loop, tasks):
# loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(asyncio.wait(tasks))
loop.close()
# 用于将一个list按一定步长切片,返回这个list切分后的list
def slice_list(step=None, num=None, data_list=None):
if not ((step is None) & (num is None)):
if num is not None:
step = math.ceil(len(data_list) / num)
return [data_list[i: i + step] for i in range(0, len(data_list), step)]
else:
print("step和num不能同时为空")
return False
# n个任务交给m个Thread完成
def threads_for_tasks(taskList):
import threading
threads = []
for task in taskList:
t = threading.Thread(target=task.target, args=task.args)
threads.append(t)
for t in threads:
t.start()
print("开启线程:", t.name)
for t in threads:
t.join()
def symbols_to_string(symbols):
if (
isinstance(symbols, list) or
isinstance(symbols, set) or
isinstance(symbols, tuple) or
isinstance(symbols, pandas.Series)
):
return ','.join(symbols)
else:
return symbols
"""
与时间相关的转化函数
"""
def datetime_to_timestamp(dt, timeFormat='ms'):
if timeFormat == 'ms':
return int(time.mktime(dt.timetuple()) * 1000)
elif timeFormat == 's':
return int(time.mktime(dt.timetuple()))
def date_to_timestamp(date, dateFormat='%Y-%m-%d', timeFormat='ms'):
return datetime_to_timestamp(
dt=datetime.strptime(date, dateFormat),
timeFormat=timeFormat,
)
def string_to_date(date):
return datetime.strptime(date, "%Y-%m-%d").date()
def timestamp_to_datetime(timestamp, timeFormat='ms'):
if timeFormat == 'ms':
timestamp = timestamp / 1000
return datetime.strftime(timestamp)
def time_now():
return int(time.time() * 1000)
# 从国家授时中心获取时间戳
def get_network_time():
start = time.time()
c = ntplib.NTPClient()
response = c.request('pool.ntp.org')
ts = response.tx_time
return ts
def check_time(precision=0.1):
duration = 2.0
while duration > precision:
try:
print("{}, 开始获取网络时间戳".format(time.time()))
start = time.time()
networkTime = get_network_time()
end = time.time()
duration = end - start
except Exception as e:
print("获取网络时间出了点小状况,正重试", duration)
# print("网络耗时:{}".format( duration ) )
# print("{}, 网络时间戳".format( networkTime ) )
# print("{}, 现在时间戳".format( time.time()) )
difference = networkTime - (start + duration)
print("difference = {}, (本地时间戳+difference)=网络时间戳".format(difference))
return difference
"""
symbols相关函数
"""
def split_symbols(symbols):
df = DataFrame(symbols, columns=['s'])
sz = list(df[df.s > 'sz']["s"])
sh = list(df[df.s < 'sz']["s"])
return [sz, sh]
def upper(data_list):
for i in range(0, len(data_list)):
data_list[i] = data_list[i].upper()
return data_list
"""
用于解析Sina l2的函数
"""
def get_trading_date():
from dHydra.core.Functions import get_vendor
print("实例化Sina")
sina = get_vendor("Sina")
print("尝试获取交易日")
sh000300 = sina.get_quote(symbols=["sh000300"],timeout=5)
sh000300_date = sh000300.iloc[0].date
return sh000300_date
def ws_parse(message, trading_date, to_dict=False):
"""
trading_date 最好外部传入
:param message:
:param trading_date: e.g."2016-12-30"
:param to_dict:
:return:
"""
data_list = re.findall(
r'(?:((?:2cn_)?((?:sh|sz)[\d]{6})'
r'(?:_0|_1|_orders|_i)?)(?:=)(.*)(?:\n))',
message,
)
result = list()
for data in data_list:
if len(data[0]) == 12: # quotation
wstype = 'quotation'
elif (data[0][-2:] == '_0') | (data[0][-2:] == '_1'):
wstype = 'transaction'
elif data[0][-6:] == 'orders':
wstype = 'orders'
elif data[0][-2:] == '_i':
wstype = 'info'
else:
wstype = 'other'
result = ws_parse_to_list(
wstype=wstype,
symbol=data[1],
data=data[2],
trading_date=trading_date,
result=result,
to_dict=to_dict
)
return result
def ws_parse_to_list(wstype, symbol, data, trading_date, result, to_dict):
data = data.split(',')
if wstype is 'transaction':
for d in data:
x = list()
x.append(wstype)
x.append(symbol)
x.extend(d.split('|'))
if to_dict is True:
t = transaction_to_dict(x, trading_date)
if t is not None:
result.append(t)
else:
result.append(x)
else:
x = list()
x.append(wstype)
x.append(symbol)
x.extend(data)
if to_dict is True:
if wstype is 'quotation':
result.append(quotation_to_dict(x))
# elif wstype is 'info':
# result.append(info_to_dict(x))
elif wstype is 'orders':
result.append(orders_to_dict(x, trading_date))
else:
result.append(x)
return result
def orders_to_dict(data, trading_date):
"""
return
------
"""
try:
orders = {
"data_type": "orders",
"symbol": data[1],
"time": datetime(
int(trading_date[0:4]),
int(trading_date[5:7]),
int(trading_date[8:10]),
int(data[3][0:2]),
int(data[3][3:5]),
int(data[3][6:8])
), # 时间 datetime格式
"bid_price": float(data[4]),
"bid_volume": int(data[5]),
"bid_num": int(data[6]),
"ask_price": float(data[7]),
"ask_volume": int(data[8]),
"ask_num": int(data[9]),
"bid_orders": data[10].split("|"),
"ask_orders": data[12].split("|")
}
except ValueError:
return {}
return orders
def quotation_to_dict(data):
"""
整个转换大约耗时1*10^(-4)s, 其中datetime.strptime()占用较多耗时
根据Issue #5,"time"不再用strptime来转化
return
------
"""
# print("{}, length = {}".format( data, len(data) ))
quotation = {}
if len(data) == 68:
quotation = {
"data_type": 'quotation',
"symbol": data[1], # "股票代码"
"name": data[2], # "中文名"
# "datetime格式的日期时间"
"time": datetime(
int(data[4][0:4]),
int(data[4][5:7]),
int(data[4][8:10]),
int(data[3][0:2]),
int(data[3][3:5]),
int(data[3][6:8])
),
# "昨收"
"last_close": float(data[5]),
# "今开"
"open": float(data[6]),
# "最高价"
"high": float(data[7]),
# "最低价"
"low": float(data[8]),
# "现价"
"now": float(data[9]),
# 状态:
# PH=盘后,PZ=盘中,TP=停牌,
# WX=午休, LT=临时停牌,KJ=开盘集合竞价,PZ=连续竞价
"status": data[10],
"transaction_count": float(data[11]), # "成交笔数"
"total_volume": int(data[12]), # "成交总量"
"total_amount": float(data[13]), # "总成交金额"
# "当前委买总金额"
"current_bid_amount": int(data[14]) if data[14] else 0,
# "加权平均委买价格"
"average_bid_price": float(data[15]) if data[15] else 0.0,
# "当前委卖总金额"
"current_ask_amount": int(data[16]) if data[16] else 0,
# "加权平均委卖价格"
"average_ask_price": float(data[17]) if data[17] else 0.0,
"cancel_bid_num": int(data[18]), # "买入撤单笔数"
"cancel_bid_amount": int(data[19]), # "买入撤单金额"
"unknown_bid": float(data[20]), # 不知道是什么
"cancel_ask": int(data[21]), # "卖出撤单笔数"
"cancel_ask_amount": int(data[22]), # "卖出撤金额"
"unknown_ask": float(data[23]), # 不知道是什么
"total_bid": int(data[24]) if data[24] else 0, # "委买总笔数"
"total_ask": int(data[25]) if data[25] else 0, # "委卖总笔数"
# "bid": data[26], # "买档位"肯定是10,不记录
# "ask": data[27], # "卖档位"肯定是10,不记录
"b1_price": float(data[28]) if data[28] else 0.0, # 买1价
"b2_price": float(data[29]) if data[29] else 0.0, # 买2价
"b3_price": float(data[30]) if data[30] else 0.0, # 买3价
"b4_price": float(data[31]) if data[31] else 0.0, # 买4价
"b5_price": float(data[32]) if data[32] else 0.0, # 买5价
"b6_price": float(data[33]) if data[33] else 0.0, # 买6价
"b7_price": float(data[34]) if data[34] else 0.0, # 买7价
"b8_price": float(data[35]) if data[35] else 0.0, # 买8价
"b9_price": float(data[36]) if data[36] else 0.0, # 买9价
"b10_price": float(data[37]) if data[37] else 0.0, # 买10价
"b1_volume": int(data[38]) if data[38] else 0, # 买1量
"b2_volume": int(data[39]) if data[39] else 0, # 买2量
"b3_volume": int(data[40]) if data[40] else 0, # 买3量
"b4_volume": int(data[41]) if data[41] else 0, # 买4量
"b5_volume": int(data[42]) if data[42] else 0, # 买5量
"b6_volume": int(data[43]) if data[43] else 0, # 买6量
"b7_volume": int(data[44]) if data[44] else 0, # 买7量
"b8_volume": int(data[45]) if data[45] else 0, # 买8量
"b9_volume": int(data[46]) if data[46] else 0, # 买9量
"b10_volume": int(data[47]) if data[47] else 0, # 买10量
"a1_price": float(data[48]) if data[48] else 0.0, # 卖1价
"a2_price": float(data[49]) if data[49] else 0.0, # 卖2价
"a3_price": float(data[50]) if data[50] else 0.0, # 卖3价
"a4_price": float(data[51]) if data[51] else 0.0, # 卖4价
"a5_price": float(data[52]) if data[52] else 0.0, # 卖5价
"a6_price": float(data[53]) if data[53] else 0.0, # 卖6价
"a7_price": float(data[54]) if data[54] else 0.0, # 卖7价
"a8_price": float(data[55]) if data[55] else 0.0, # 卖8价
"a9_price": float(data[56]) if data[56] else 0.0, # 卖9价
"a10_price": float(data[57]) if data[57] else 0.0, # 卖10价
"a1_volume": int(data[58]) if data[58] else 0, # 卖1量
"a2_volume": int(data[59]) if data[59] else 0, # 卖2量
"a3_volume": int(data[60]) if data[60] else 0, # 卖3量
"a4_volume": int(data[61]) if data[61] else 0, # 卖4量
"a5_volume": int(data[62]) if data[62] else 0, # 卖5量
"a6_volume": int(data[63]) if data[63] else 0, # 卖6量
"a7_volume": int(data[64]) if data[64] else 0, # 卖7量
"a8_volume": int(data[65]) if data[65] else 0, # 卖8量
"a9_volume": int(data[66]) if data[66] else 0, # 卖9量
"a10_volume": int(data[67]) if data[67] else 0, # 卖10量
}
elif len(data) == 67:
quotation = {
"data_type": 'quotation',
"symbol": data[1], # "股票代码"
"name": data[2], # "中文名"
# "datetime格式的日期时间"
"time": datetime(
int(data[4][0:4]),
int(data[4][5:7]),
int(data[4][8:10]),
int(data[3][0:2]),
int(data[3][3:5]),
int(data[3][6:8])
),
"last_close": float(data[5]), # "昨收"
"open": float(data[6]), # "今开"
"high": float(data[7]), # "最高价"
"low": float(data[8]), # "最低价"
"now": float(data[9]), # "现价"
# "状态,
# PH=盘后,PZ=盘中,TP=停牌, WX=午休,
# LT=临时停牌,KJ=开盘集合竞价,PZ=连续竞价"
"status": data[10],
"transaction_count": float(data[11]), # "成交笔数"
"total": int(data[12]), # "成交总量"
"amount": float(data[13]), # "总成交金额"
}
return quotation
def transaction_to_dict(data, trading_date):
if len(data) == 11:
transaction = {
"data_type": 'transaction',
"symbol": data[1], # 股票代码
"index": data[2], # 成交序号
"time": datetime(
int(trading_date[0:4]),
int(trading_date[5:7]),
int(trading_date[8:10]),
int(data[3][0:2]),
int(data[3][3:5]),
int(data[3][6:8]),
int(data[3][9:])*1000
), # 时间 datetime格式
# "time": data[3], # 时间,字符串格式,不带日期
"price": float(data[4]), # 成交价格
"volume": int(data[5]), # 成交量
"amount": float(data[6]), # 成交金额
"buynum": int(data[7]), # 买单委托序号
"sellnum": int(data[8]), # 卖单委托序号
"iotype": int(data[9]), # 主动性买卖标识
"channel": int(data[10]), # 成交通道(这是交易所的一个标记,没有作用)
}
return transaction
else:
return None
def symbol_type(symbol):
"""
description: 用于判断股票代码类型:母基金,分级基金,指数,AB股
return
------
"""
return symbol_type
def read_config(file_path):
# 读取配置
try:
f_config = open(file_path)
cfg = json.load(f_config)
except Exception as e:
print("{}".format(e))
cfg = dict()
print(
"未能正确加载{},请检查路径,json文档格式,或者忽略此警告"
.format(file_path)
)
return cfg
def write_config(cfg, file_path):
# 将配置写入
print("写入配置:\n{}".format(json.dumps(cfg, indent=2)))
f = open(file_path, 'w', encoding='UTF-8')
f.write(json.dumps(cfg, indent=2))
f.close()
|
apache-2.0
|
thewtex/TubeTK
|
src/Python/pyfsa/dcl.py
|
8
|
4750
|
"""dcl.py
Demonstrate evaluation of a discriminant SVM graph classifier
using N-Fold cross-validation.
"""
__license__ = "Apache License, Version 2.0 (see TubeTK)"
__author__ = "Roland Kwitt, Kitware Inc., 2013"
__email__ = "E-Mail: roland.kwitt@kitware.com"
__status__ = "Development"
# Graph handling
import networkx as nx
# Machine learning
from sklearn.metrics import accuracy_score
from sklearn.cross_validation import KFold
from sklearn.cross_validation import ShuffleSplit
from sklearn.linear_model import LogisticRegression
from sklearn.grid_search import GridSearchCV
from sklearn import preprocessing
from sklearn import svm
# Misc.
from optparse import OptionParser
import logging
import numpy as np
import scipy.sparse
import time
import sys
import os
# pyfsa imports
import core.fsa as fsa
import core.utils as utils
def main(argv=None):
if argv is None:
argv = sys.argv
# Setup vanilla CLI parsing and add custom arg(s).
parser = utils.setup_cli_parsing()
parser.add_option("",
"--codewords",
help="number of codewords.",
default=50,
type="int")
(options, args) = parser.parse_args()
# Setup logging
utils.setup_logging(options)
logger = logging.getLogger()
# Read graph file list and label file list
graph_file_list = utils.read_graph_file_list(options)
if not options.globalLabelFile is None:
label_file_list = [options.globalLabelFile] * len(graph_file_list)
else:
label_file_list = utils.read_label_file_list(options,
graph_file_list)
# Read class info and grouping info
class_info = utils.read_class_info(options)
group_info = utils.read_group_info(options)
assert (group_info.shape[0] ==
len(class_info) ==
len(graph_file_list) ==
len(label_file_list))
# Zip lists together
data = zip(graph_file_list,
label_file_list,
class_info)
# Run fine-structure analysis
fsa_res = fsa.run_fsa(data,
options.radii,
options.recompute,
options.writeAs,
options.skip,
options.omitDegenerate)
data_mat = fsa_res['data_mat']
data_idx = fsa_res['data_idx']
# Create cross-validation folds
n_graphs = len(class_info)
cv = ShuffleSplit(n_graphs,
n_iter=options.cvRuns,
test_size=0.2,
random_state=0)
# Try inplace feature normalization
if options.normalize:
logger.info("Running feature normalization ...")
scaler = preprocessing.StandardScaler(copy=False)
scaler.fit_transform(fsa_res['data_mat'])
scores = []
for cv_id, (trn, tst) in enumerate(cv):
# Compose training data
pos = []
for i in trn:
tmp = np.where(data_idx==i)[0]
pos.extend(list(tmp))
np_pos = np.array(pos)
# Learn a codebook from training data
codebook = fsa.learn_codebook(data_mat[np_pos,:],
options.codewords,
options.seed)
# Compute BoW histograms for training data
bow_trn_mat = np.zeros((len(trn), options.codewords))
for cnt, i in enumerate(trn):
np_pos = np.where(data_idx==i)[0]
bow_trn_mat[cnt,:] = np.asarray(fsa.bow(data_mat[np_pos,:],
codebook))
# Cross-validate (5-fold) SVM classifier and parameters
param_selection = [{'kernel': ['rbf'],
'gamma': np.logspace(-6,2,10),
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'],
'C': [1, 10, 100, 1000]}]
clf = GridSearchCV(svm.SVC(C=1), param_selection)
clf.fit(bow_trn_mat, np.asarray(class_info)[trn], cv=5)
# Compute BoW histograms for testing data
bow_tst_mat = np.zeros((len(tst), options.codewords))
for cnt,i in enumerate(tst):
pos = np.where(data_idx==i)[0]
bow_tst_mat[cnt,:] = fsa.bow(data_mat[pos,:], codebook)
print "yhat : ", clf.predict(bow_tst_mat)
print "gold : ", np.asarray(class_info)[tst]
# Score the classifier
score = clf.score(bow_tst_mat, np.asarray(class_info)[tst])
scores.append(score)
logger.info("Score (%.2d): %.2f" % (cv_id,100*score))
utils.show_summary(scores)
if __name__ == "__main__":
main()
|
apache-2.0
|
xavierfav/freesound-python
|
script_clustering.py
|
1
|
12025
|
import manager
from scipy.spatial.distance import pdist
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import cosine_similarity
import webbrowser
import community.community_louvain as com
import networkx as nx
import numpy as np
import operator
#c = manager.Client()
#b = c.load_basket_pickle('UrbanSound8K') # Can load a basket from a search result instead
#b = c.load_basket_pickle('freesound_db_071216.pkl')
#
#k_nn = 200 # param for k-nn graph creation
#
#
## __________________ FEATURE __________________ #
## Extract features and create similarity matrix from:
## Acoustic descriptors
#b.analysis_stats = [None] * len(b) # this is because the basket is old and now analysis_stats contains None values initialy
#b.add_analysis_stats()
#b.remove_sounds_with_no_analysis()
#d = b.extract_descriptor_stats(scale=True)
#sound_similarity_matrix_d = euclidean_distances(d)
#sound_similarity_matrix_d = sound_similarity_matrix_d/sound_similarity_matrix_d.max()
#sound_similarity_matrix_d = 1 - sound_similarity_matrix_d
#
## Tags
#t = b.preprocessing_tag()
#for idx, tt in enumerate(t):
# b.sounds[idx].tags = tt
#nlp = manager.Nlp(b)
#nlp.create_sound_tag_matrix()
#sound_similarity_matrix_t = nlp.return_similarity_matrix_tags(nlp.sound_tag_matrix)
#
#
## __________________ GRAPH __________________ #
## Create k-nn graphs
#g_t = nlp.create_knn_graph(sound_similarity_matrix_t, k_nn)
#g_d = nlp.create_knn_graph(sound_similarity_matrix_d, k_nn)
#g_t.name = 'Tag knn graph'
#g_d.name = 'Audio knn graph'
#
## community detection
#cc_t = com.best_partition(g_t)
#cc_d = com.best_partition(g_d)
#nb_c_t = max(cc_t.values()) + 1
#nb_c_d = max(cc_d.values()) + 1
#
## generate dendrogram
#dendro_t = com.generate_dendrogram(g_t)
#dendro_d = com.generate_dendrogram(g_d)
#
## extract clusters (list of ids for each cluster)
#clas_t = [[e for e in cc_t.keys() if cc_t[e]==cl] for cl in range(nb_c_t)]
#clas_d = [[e for e in cc_d.keys() if cc_d[e]==cl] for cl in range(nb_c_d)]
class Cluster:
"""
Compute the clusters with the knn-graph based clustering using Louvain aglorithm.
Parameters
----------
name : string, optional
a name for the cluster (use it to store the experiment configurations)
basket : manager.Basket
a basket holding the sound collection to cluster
k_nn : int
the parameter of the k nearest neighbour for graph generation. Default to 50
feature_type : 'text' or 'acoustic'
the type of features used for computing similarity between sounds.
Examples
--------
from script_clustering import *
c = manager.Client()
b = c.load_basket_pickle('UrbanSound8K')
cluster = Cluster(basket=b)
cluster.compute_similarity_matrix()
cluster.generate_graph()
cluster.cluster_graph()
cluster.create_cluster_baskets()
cluster.display_clusters()
"""
def __init__(self, name='Cluster Object', basket=None, k_nn=50):
self.name = name
self.basket = basket
self.k_nn = k_nn
self.feature_type = None
self.acoustic_features = None
self.acoustic_similarity_matrix = None
self.text_features = None
self.text_similarity_matrix = None
self.graph = None
self.nb_clusters = None
self.ids_in_clusters = None
def compute_similarity_matrix(self, basket=None, feature_type='text'):
self.feature_type = feature_type
basket = basket or self.basket
if basket == None:
print 'You must provide a basket as argument'
else:
if feature_type == 'text':
self.extract_text_features(basket)
self.create_similarity_matrix_text(self.text_features)
elif feature_type == 'acoustic':
self.extract_acoustic_features(basket)
self.create_similarity_matrix_acoustic(self.acoustic_features)
def extract_text_features(self, basket=None):
basket = basket or self.basket
t = basket.preprocessing_tag() #some stemming
for idx, tt in enumerate(t):
basket.sounds[idx].tags = tt
nlp = manager.Nlp(basket) # counting terms...
nlp.create_sound_tag_matrix() # create the feature vectors
self.text_features = nlp.sound_tag_matrix
def create_similarity_matrix_text(self, features=None):
if features == None:
features = self.text_features
if features == None:
print 'You must provide the text features as argument or run extract_text_features() first'
else:
self.text_similarity_matrix = cosine_similarity(features)
def extract_acoustic_features(self, basket=None):
"""Extract acoustic features"""
basket = basket or self.basket
basket.analysis_stats = [None] * len(b) # is case of the basket is old, now analysis_stats contains None values initialy
basket.add_analysis_stats()
basket.remove_sounds_with_no_analysis()
self.acoustic_features = basket.extract_descriptor_stats(scale=True) # list of all descriptors stats for each sound in the basket
def create_similarity_matrix_acoustic(self, features=None):
features = features or self.acoustic_features
if features == None:
print 'You must provide the acoustic features as argument or run extract_acoustic_features() first'
else:
matrix = euclidean_distances(features)
matrix = matrix/matrix.max()
self.acoustic_similarity_matrix = 1 - matrix
def generate_graph(self, similarity_matrix=None, k_nn=None):
k_nn = k_nn or self.k_nn
if similarity_matrix == None:
if self.feature_type == 'text':
similarity_matrix = self.text_similarity_matrix
elif self.features_type == 'acoustic':
similarity_matrix = self.acoustic_similarity_matrix
self.graph = self.create_knn_graph(similarity_matrix, k_nn)
def cluster_graph(self, graph=None):
graph = graph or self.graph
classes = com.best_partition(graph)
self.nb_clusters = max(classes.values()) + 1
#dendrogram = com.generate_dendrogram(graph)
self.ids_in_clusters = [[e for e in classes.keys() if classes[e]==cl] for cl in range(self.nb_clusters)]
@staticmethod
def nearest_neighbors(similarity_matrix, idx, k):
distances = []
for x in range(len(similarity_matrix)):
distances.append((x,similarity_matrix[idx][x]))
distances.sort(key=operator.itemgetter(1), reverse=True)
return [d[0] for d in distances[0:k]]
def create_knn_graph(self, similarity_matrix, k):
""" Returns a knn graph from a similarity matrix - NetworkX module """
np.fill_diagonal(similarity_matrix, 0) # for removing the 1 from diagonal
g = nx.Graph()
g.add_nodes_from(range(len(similarity_matrix)))
for idx in range(len(similarity_matrix)):
g.add_edges_from([(idx, i) for i in self.nearest_neighbors(similarity_matrix, idx, k)])
print idx, self.nearest_neighbors(similarity_matrix, idx, k)
return g
def create_cluster_baskets(self):
list_baskets = [self.basket.parent_client.new_basket() for i in range(self.nb_clusters)]
for cl in range(len(self.ids_in_clusters)):
for s in self.ids_in_clusters[cl]:
list_baskets[cl].push(self.basket.sounds[s])
self.cluster_baskets = list_baskets
def display_clusters(self):
tags_occurrences = [basket.tags_occurrences() for basket in self.cluster_baskets]
normalized_tags_occurrences = []
for idx, tag_occurrence in enumerate(tags_occurrences):
normalized_tags_occurrences.append([(t_o[0], float(t_o[1])/len(self.cluster_baskets[idx].sounds)) for t_o in tag_occurrence])
def print_basket(list_baskets, normalized_tags_occurrences, num_basket, max_tag = 20):
"""Print tag occurrences"""
print '\n Cluster %s, containing %s sounds' % (num_basket, len(list_baskets[num_basket]))
for idx, tag in enumerate(normalized_tags_occurrences[num_basket]):
if idx < max_tag:
print tag[0].ljust(30) + str(tag[1])[0:5]
else:
break
print '\n ____________________________________________________'
print '\n Cluster tags occurrences for Tag based method:'
for i in range(len(self.ids_in_clusters)):
print_basket(self.cluster_baskets, normalized_tags_occurrences, i, 10)
## ________________ EVALUATION ________________ #
#list_baskets_t = [c.new_basket() for i in range(nb_c_t)]
#list_baskets_d = [c.new_basket() for i in range(nb_c_d)]
#
#for cl in range(len(clas_t)):
# for s in clas_t[cl]:
# list_baskets_t[cl].push(b.sounds[s])
#for cl in range(len(clas_d)):
# for s in clas_d[cl]:
# list_baskets_d[cl].push(b.sounds[s])
#
#tags_occurrences_t = [basket.tags_occurrences() for basket in list_baskets_t]
#tags_occurrences_d = [basket.tags_occurrences() for basket in list_baskets_d]
#
#normalized_tags_occurrences_t = []
#normalized_tags_occurrences_d = []
#
#for idx, tag_occurrence in enumerate(tags_occurrences_t):
# normalized_tags_occurrences_t.append([(t_o[0], float(t_o[1])/len(list_baskets_t[idx].sounds)) for t_o in tag_occurrence])
#for idx, tag_occurrence in enumerate(tags_occurrences_d):
# normalized_tags_occurrences_d.append([(t_o[0], float(t_o[1])/len(list_baskets_d[idx].sounds)) for t_o in tag_occurrence])
#
#def print_basket(list_baskets, normalized_tags_occurrences, num_basket, max_tag = 20):
# """Print tag occurrences"""
# print '\n Cluster %s, containing %s sounds' % (num_basket, len(list_baskets[num_basket]))
# for idx, tag in enumerate(normalized_tags_occurrences[num_basket]):
# if idx < max_tag:
# print tag[0].ljust(30) + str(tag[1])[0:5]
# else:
# break
#print '\n ____________________________________________________'
#print '\n Cluster tags occurrences for Tag based method:'
#for i in range(len(clas_t)):
# print_basket(list_baskets_t, normalized_tags_occurrences_t, i, 10)
#print '\n ____________________________________________________'
#print '\n Cluster tags occurrences for Acoustic based method:'
#for i in range(len(clas_d)):
# print_basket(list_baskets_d, normalized_tags_occurrences_d, i, 10)
#
## Create html pages with sound clustered
#def create_html_for_cluster(list_baskets, num_cluster):
# """Create a html with the Freesound embed"""
# # This list contains the begining and the end of the embed
# # Need to insert the id of the sound
# embed_blocks = ['<iframe frameborder="0" scrolling="no" src="https://www.freesound.org/embed/sound/iframe/', '/simple/medium/" width="481" height="86"></iframe>']
#
# # Create the html string
# message = """
# <html>
# <head></head>
# <body>
# """
# for idx, ids in enumerate(list_baskets[num_cluster].ids):
# message += embed_blocks[0] + str(ids) + embed_blocks[1]
# if idx > 50:
# break
# message += """
# </body>
# </html>
# """
#
# # Create the file
# f = open('result_cluster'+ str(num_cluster) +'.html', 'w')
# f.write(message)
# f.close()
#
# # Open it im the browser
# webbrowser.open_new_tab('result_cluster'+ str(num_cluster) +'.html')
#
#def pop_html(method):
# if method == 't':
# clas = clas_t
# list_baskets = list_baskets_t
# elif method == 'd':
# clas = clas_d
# list_baskets = list_baskets_d
# for i in range(len(clas)):
# create_html_for_cluster(list_baskets, i)
#
|
mit
|
mayblue9/scikit-learn
|
examples/linear_model/plot_sparse_recovery.py
|
243
|
7461
|
"""
============================================================
Sparse recovery: feature selection for sparse linear models
============================================================
Given a small number of observations, we want to recover which features
of X are relevant to explain y. For this :ref:`sparse linear models
<l1_feature_selection>` can outperform standard statistical tests if the
true model is sparse, i.e. if a small fraction of the features are
relevant.
As detailed in :ref:`the compressive sensing notes
<compressive_sensing>`, the ability of L1-based approach to identify the
relevant variables depends on the sparsity of the ground truth, the
number of samples, the number of features, the conditioning of the
design matrix on the signal subspace, the amount of noise, and the
absolute value of the smallest non-zero coefficient [Wainwright2006]
(http://statistics.berkeley.edu/tech-reports/709.pdf).
Here we keep all parameters constant and vary the conditioning of the
design matrix. For a well-conditioned design matrix (small mutual
incoherence) we are exactly in compressive sensing conditions (i.i.d
Gaussian sensing matrix), and L1-recovery with the Lasso performs very
well. For an ill-conditioned matrix (high mutual incoherence),
regressors are very correlated, and the Lasso randomly selects one.
However, randomized-Lasso can recover the ground truth well.
In each situation, we first vary the alpha parameter setting the sparsity
of the estimated model and look at the stability scores of the randomized
Lasso. This analysis, knowing the ground truth, shows an optimal regime
in which relevant features stand out from the irrelevant ones. If alpha
is chosen too small, non-relevant variables enter the model. On the
opposite, if alpha is selected too large, the Lasso is equivalent to
stepwise regression, and thus brings no advantage over a univariate
F-test.
In a second time, we set alpha and compare the performance of different
feature selection methods, using the area under curve (AUC) of the
precision-recall.
"""
print(__doc__)
# Author: Alexandre Gramfort and Gael Varoquaux
# License: BSD 3 clause
import warnings
import matplotlib.pyplot as plt
import numpy as np
from scipy import linalg
from sklearn.linear_model import (RandomizedLasso, lasso_stability_path,
LassoLarsCV)
from sklearn.feature_selection import f_regression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import auc, precision_recall_curve
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.utils.extmath import pinvh
from sklearn.utils import ConvergenceWarning
def mutual_incoherence(X_relevant, X_irelevant):
"""Mutual incoherence, as defined by formula (26a) of [Wainwright2006].
"""
projector = np.dot(np.dot(X_irelevant.T, X_relevant),
pinvh(np.dot(X_relevant.T, X_relevant)))
return np.max(np.abs(projector).sum(axis=1))
for conditioning in (1, 1e-4):
###########################################################################
# Simulate regression data with a correlated design
n_features = 501
n_relevant_features = 3
noise_level = .2
coef_min = .2
# The Donoho-Tanner phase transition is around n_samples=25: below we
# will completely fail to recover in the well-conditioned case
n_samples = 25
block_size = n_relevant_features
rng = np.random.RandomState(42)
# The coefficients of our model
coef = np.zeros(n_features)
coef[:n_relevant_features] = coef_min + rng.rand(n_relevant_features)
# The correlation of our design: variables correlated by blocs of 3
corr = np.zeros((n_features, n_features))
for i in range(0, n_features, block_size):
corr[i:i + block_size, i:i + block_size] = 1 - conditioning
corr.flat[::n_features + 1] = 1
corr = linalg.cholesky(corr)
# Our design
X = rng.normal(size=(n_samples, n_features))
X = np.dot(X, corr)
# Keep [Wainwright2006] (26c) constant
X[:n_relevant_features] /= np.abs(
linalg.svdvals(X[:n_relevant_features])).max()
X = StandardScaler().fit_transform(X.copy())
# The output variable
y = np.dot(X, coef)
y /= np.std(y)
# We scale the added noise as a function of the average correlation
# between the design and the output variable
y += noise_level * rng.normal(size=n_samples)
mi = mutual_incoherence(X[:, :n_relevant_features],
X[:, n_relevant_features:])
###########################################################################
# Plot stability selection path, using a high eps for early stopping
# of the path, to save computation time
alpha_grid, scores_path = lasso_stability_path(X, y, random_state=42,
eps=0.05)
plt.figure()
# We plot the path as a function of alpha/alpha_max to the power 1/3: the
# power 1/3 scales the path less brutally than the log, and enables to
# see the progression along the path
hg = plt.plot(alpha_grid[1:] ** .333, scores_path[coef != 0].T[1:], 'r')
hb = plt.plot(alpha_grid[1:] ** .333, scores_path[coef == 0].T[1:], 'k')
ymin, ymax = plt.ylim()
plt.xlabel(r'$(\alpha / \alpha_{max})^{1/3}$')
plt.ylabel('Stability score: proportion of times selected')
plt.title('Stability Scores Path - Mutual incoherence: %.1f' % mi)
plt.axis('tight')
plt.legend((hg[0], hb[0]), ('relevant features', 'irrelevant features'),
loc='best')
###########################################################################
# Plot the estimated stability scores for a given alpha
# Use 6-fold cross-validation rather than the default 3-fold: it leads to
# a better choice of alpha:
# Stop the user warnings outputs- they are not necessary for the example
# as it is specifically set up to be challenging.
with warnings.catch_warnings():
warnings.simplefilter('ignore', UserWarning)
warnings.simplefilter('ignore', ConvergenceWarning)
lars_cv = LassoLarsCV(cv=6).fit(X, y)
# Run the RandomizedLasso: we use a paths going down to .1*alpha_max
# to avoid exploring the regime in which very noisy variables enter
# the model
alphas = np.linspace(lars_cv.alphas_[0], .1 * lars_cv.alphas_[0], 6)
clf = RandomizedLasso(alpha=alphas, random_state=42).fit(X, y)
trees = ExtraTreesRegressor(100).fit(X, y)
# Compare with F-score
F, _ = f_regression(X, y)
plt.figure()
for name, score in [('F-test', F),
('Stability selection', clf.scores_),
('Lasso coefs', np.abs(lars_cv.coef_)),
('Trees', trees.feature_importances_),
]:
precision, recall, thresholds = precision_recall_curve(coef != 0,
score)
plt.semilogy(np.maximum(score / np.max(score), 1e-4),
label="%s. AUC: %.3f" % (name, auc(recall, precision)))
plt.plot(np.where(coef != 0)[0], [2e-4] * n_relevant_features, 'mo',
label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Score")
# Plot only the 100 first coefficients
plt.xlim(0, 100)
plt.legend(loc='best')
plt.title('Feature selection scores - Mutual incoherence: %.1f'
% mi)
plt.show()
|
bsd-3-clause
|
ky822/scikit-learn
|
examples/model_selection/grid_search_digits.py
|
227
|
2665
|
"""
============================================================
Parameter estimation using grid search with cross-validation
============================================================
This examples shows how a classifier is optimized by cross-validation,
which is done using the :class:`sklearn.grid_search.GridSearchCV` object
on a development set that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from __future__ import print_function
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5,
scoring='%s_weighted' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
for params, mean_score, scores in clf.grid_scores_:
print("%0.3f (+/-%0.03f) for %r"
% (mean_score, scores.std() * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
|
bsd-3-clause
|
plcode7/rad2py
|
psp2py/controllers/estimate.py
|
8
|
6937
|
# coding: utf8
# try something like
from statistics import calc_correlation, calc_significance, calc_linear_regression, calc_student_t_probability, calc_prediction_interval
from draws import draw_linear_regression
def get_projects_metrics():
"Query size and time metrics series summarized by project"
q = db.psp_project.completed!=None # only account finished ones!
rows = db(q & (db.psp_project.actual_loc!=None)).select(db.psp_project.actual_loc, orderby=db.psp_project.project_id)
actual_loc = [row.actual_loc for row in rows]
rows = db(q & (db.psp_project.project_id==db.psp_time_summary.project_id)).select(db.psp_time_summary.actual.sum().with_alias("total"), groupby=db.psp_project.project_id, orderby=db.psp_project.project_id)
hours = [row.total/60.0/60.0 for row in rows]
return actual_loc, hours
def correlation():
"Check correlation between actual object LOC and hours"
# according [HUMPHREY95] p.513 & p.151:
# - when 0.9 <= r2 : the relationship is considered predictive
# - when 0.7 <= r2 < 0.9 : there is a strong correlation
# - when 0.5 <= r2 < 0.7 : there is an adequate correlation (use with caution)
# - when r2 < 0.5 : not reliable for planning purposes
actual_loc, hours = get_projects_metrics()
r = calc_correlation(actual_loc, hours)
r2 = r**2
if 0.9 <= r2:
corr = 'high (predictive)'
elif 0.7 <= r2 < 0.9:
corr = 'strong (planning)'
elif 0.5 <= r2 < 0.7:
corr = 'adequate (use with care)'
elif r2 < 0.5:
corr = 'weak (not reliable)'
return {'loc': actual_loc, 'hours': hours, 'r2': r**2, 'correlation': corr}
def significance():
"Check the significance of a correlation"
#TODO: test probability with student t
# p = student_t(n-1, t)
# if 1-p<=0.05 data is considered good [HUMPHREY95] p.70
actual_loc, hours = get_projects_metrics()
t, r2, n = calc_significance(actual_loc, hours)
p = calc_student_t_probability(t, n-1)
s = 1 - p
if s<0.005:
significance = "very high"
elif s<0.01:
significance = "high"
elif s<0.05:
significance = "good"
elif s<0.2:
significance = "adequate"
else:
significance = "poor"
return {'loc': actual_loc, 'hours': hours, 'n': n, 'r2': r2, 't': t, 'p': p, 's': s, "significance": significance}
def get_time_todate():
"Calculate accumulated time per phase to date"
q = db.psp_project.project_id==db.psp_time_summary.project_id
q &= db.psp_project.completed!=None # only account finished ones!
rows = db(q).select(
db.psp_time_summary.actual.sum().with_alias("subtotal"),
db.psp_time_summary.phase,
groupby=db.psp_time_summary.phase)
total = float(sum([row.subtotal or 0 for row in rows], 0))
todate = sorted([(row.psp_time_summary.phase, row.subtotal or 0, (row.subtotal or 0)/total*100.0) for row in rows],
key=lambda x: PSP_PHASES.index(x[0]))
return todate
def time_in_phase():
times = get_time_todate()
return {'times': times}
def index():
"Estimate Time and Prediction Interval (UPI, LPI)"
# use historical data of actual object size (LOC) and time to calculate
# development time based on planned LOC [HUMPHREY95] pp.153-155
#TODO: calculate Upper and Lower Prediction Interval
form = SQLFORM.factory(
Field("size", "integer",
default=request.vars.planned_loc,
comment="Planned size (estimated LOC)"),
Field("prediction_interval", "integer",
default="70", requires=IS_INT_IN_RANGE(0, 100),
comment="Percentage (for LPI, UPI)"),
Field("project_id", db.psp_project,
requires=IS_IN_DB(db, db.psp_project.project_id, "%(name)s"),
comment="Project to update plan"),
)
if form.accepts(request.vars, session):
# calculate regression parameters for historical LOC and tiem data:
actual_loc, hours = get_projects_metrics()
b0, b1 = calc_linear_regression(actual_loc, hours)
# get LOC planned size and calculate development time
size_k = form.vars.size
time_t = b0 + b1*size_k
alpha = form.vars.prediction_interval/100.0
redirect(URL("update_plan",
args=form.vars.project_id,
vars={'size_k': size_k, 'time_t': time_t,
'alpha': alpha })
)
return {'form': form}
def update_plan():
"Get resource estimates (size and time) and update project plan summary"
project_id = request.args[0]
# get resources previously calculated
estimated_loc = int(request.vars.size_k)
estimated_time = float(request.vars.time_t)
alpha = float(request.vars.alpha)
# summarize actual times in each pahse [(phase, to_date, to_date_%)]
time_summary = get_time_todate()
# subdivide time for each phase [HUMPHREY95] p.52
# (according actual distribution of develpoment time)
times = {}
for phase, to_date, percentage in time_summary:
times[phase] = estimated_time * percentage / 100.0
for phase, plan in times.items():
# convert plan time from hours to seconds
plan = int(plan * 60 * 60)
q = db.psp_time_summary.project_id==project_id
q &= db.psp_time_summary.phase==phase
# update current record
cnt = db(q).update(plan=plan)
if not cnt:
# insert record if not exists
db.psp_time_summary.insert(project_id=project_id,
phase=phase,
plan=plan,
actual=0,
interruption=0,
)
# calculate regression parameters and prediction interval for historical LOC and tiem data:
actual_loc, hours = get_projects_metrics()
b0, b1, p_range, upi, lpi, t = calc_prediction_interval(actual_loc, hours, estimated_loc, estimated_time, alpha)
# update planned loc and time prediction interval:
db(db.psp_project.project_id==project_id).update(
planned_loc=estimated_loc,
planned_time=estimated_time,
time_lpi=lpi,
time_upi=upi,
)
# show project summary
redirect(URL(c='projects', f='show', args=("psp_project", project_id)))
def linear_regression():
"Draw the linear regression chart for actual loc and dev times"
# this need matplotlib!
import pylab
actual_loc, hours = get_projects_metrics()
x = pylab.array(actual_loc)
y = pylab.array(hours)
return draw_linear_regression(x, y, "Size (LOC)", "Time (hs)", "Linear Regression", response.body)
|
gpl-3.0
|
chugunovyar/factoryForBuild
|
env/lib/python2.7/site-packages/matplotlib/backends/backend_wxagg.py
|
10
|
5840
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import matplotlib
from matplotlib.figure import Figure
from .backend_agg import FigureCanvasAgg
from . import wx_compat as wxc
from . import backend_wx
from .backend_wx import (FigureManagerWx, FigureCanvasWx,
FigureFrameWx, DEBUG_MSG, NavigationToolbar2Wx, Toolbar)
import wx
show = backend_wx.Show()
class FigureFrameWxAgg(FigureFrameWx):
def get_canvas(self, fig):
return FigureCanvasWxAgg(self, -1, fig)
def _get_toolbar(self, statbar):
if matplotlib.rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2WxAgg(self.canvas)
toolbar.set_status_bar(statbar)
else:
toolbar = None
return toolbar
class FigureCanvasWxAgg(FigureCanvasAgg, FigureCanvasWx):
"""
The FigureCanvas contains the figure and does event handling.
In the wxPython backend, it is derived from wxPanel, and (usually)
lives inside a frame instantiated by a FigureManagerWx. The parent
window probably implements a wxSizer to control the displayed
control size - but we give a hint as to our preferred minimum
size.
"""
def draw(self, drawDC=None):
"""
Render the figure using agg.
"""
DEBUG_MSG("draw()", 1, self)
FigureCanvasAgg.draw(self)
self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
self._isDrawn = True
self.gui_repaint(drawDC=drawDC, origin='WXAgg')
def blit(self, bbox=None):
"""
Transfer the region of the agg buffer defined by bbox to the display.
If bbox is None, the entire buffer is transferred.
"""
if bbox is None:
self.bitmap = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
self.gui_repaint()
return
l, b, w, h = bbox.bounds
r = l + w
t = b + h
x = int(l)
y = int(self.bitmap.GetHeight() - t)
srcBmp = _convert_agg_to_wx_bitmap(self.get_renderer(), None)
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destDC = wx.MemoryDC()
destDC.SelectObject(self.bitmap)
destDC.Blit(x, y, int(w), int(h), srcDC, x, y)
destDC.SelectObject(wx.NullBitmap)
srcDC.SelectObject(wx.NullBitmap)
self.gui_repaint()
filetypes = FigureCanvasAgg.filetypes
def print_figure(self, filename, *args, **kwargs):
# Use pure Agg renderer to draw
FigureCanvasAgg.print_figure(self, filename, *args, **kwargs)
# Restore the current view; this is needed because the
# artist contains methods rely on particular attributes
# of the rendered figure for determining things like
# bounding boxes.
if self._isDrawn:
self.draw()
class NavigationToolbar2WxAgg(NavigationToolbar2Wx):
def get_canvas(self, frame, fig):
return FigureCanvasWxAgg(frame, -1, fig)
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# in order to expose the Figure constructor to the pylab
# interface we need to create the figure here
DEBUG_MSG("new_figure_manager()", 3, None)
backend_wx._create_wx_app()
FigureClass = kwargs.pop('FigureClass', Figure)
fig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, fig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
frame = FigureFrameWxAgg(num, figure)
figmgr = frame.get_figure_manager()
if matplotlib.is_interactive():
figmgr.frame.Show()
figure.canvas.draw_idle()
return figmgr
#
# agg/wxPython image conversion functions (wxPython >= 2.8)
#
def _convert_agg_to_wx_image(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Image. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgb -> image
image = wxc.EmptyImage(int(agg.width), int(agg.height))
image.SetData(agg.tostring_rgb())
return image
else:
# agg => rgba buffer -> bitmap => clipped bitmap => image
return wx.ImageFromBitmap(_WX28_clipped_agg_as_bitmap(agg, bbox))
def _convert_agg_to_wx_bitmap(agg, bbox):
"""
Convert the region of the agg buffer bounded by bbox to a wx.Bitmap. If
bbox is None, the entire buffer is converted.
Note: agg must be a backend_agg.RendererAgg instance.
"""
if bbox is None:
# agg => rgba buffer -> bitmap
return wxc.BitmapFromBuffer(int(agg.width), int(agg.height),
agg.buffer_rgba())
else:
# agg => rgba buffer -> bitmap => clipped bitmap
return _WX28_clipped_agg_as_bitmap(agg, bbox)
def _WX28_clipped_agg_as_bitmap(agg, bbox):
"""
Convert the region of a the agg buffer bounded by bbox to a wx.Bitmap.
Note: agg must be a backend_agg.RendererAgg instance.
"""
l, b, width, height = bbox.bounds
r = l + width
t = b + height
srcBmp = wxc.BitmapFromBuffer(int(agg.width), int(agg.height),
agg.buffer_rgba())
srcDC = wx.MemoryDC()
srcDC.SelectObject(srcBmp)
destBmp = wxc.EmptyBitmap(int(width), int(height))
destDC = wx.MemoryDC()
destDC.SelectObject(destBmp)
x = int(l)
y = int(int(agg.height) - t)
destDC.Blit(0, 0, int(width), int(height), srcDC, x, y)
srcDC.SelectObject(wx.NullBitmap)
destDC.SelectObject(wx.NullBitmap)
return destBmp
FigureCanvas = FigureCanvasWxAgg
FigureManager = FigureManagerWx
|
gpl-3.0
|
0todd0000/spm1d
|
spm1d/rft1d/examples/smoothness_estimation_broken.py
|
1
|
1077
|
import numpy as np
from matplotlib import pyplot
from spm1d import rft1d
'''
WARNING!
Calls to rft1d.random.randn1d must set pad=True
when FWHM is greater than 50
'''
#(0) Set parameters:
np.random.seed(0)
nResponses = 1000
nNodes = 101
### generate a field mask:
nodes = np.array([True]*nNodes) #nothing masked out
nodes[10:30] = False #this region will be masked out
nodes[60:85] = False
#(1) Cycle through smoothing kernels:
FWHM = np.linspace(1, 50, 21) #actual FWHM
FWHMe = [] #estimated FWHM
for w in FWHM:
y = rft1d.random.randn1d(nResponses, nodes, w, pad=False) #broken fields
FWHMe.append( rft1d.geom.estimate_fwhm(y) )
print( 'Actual FWHM: %06.3f, estimated FWHM: %06.3f' %(w, FWHMe[-1]) )
#(2) Plot results:
pyplot.close('all')
pyplot.plot(FWHM, FWHM, 'k:', label='Actual')
pyplot.plot(FWHM, FWHMe, 'go', label='Estimated')
pyplot.legend(loc='upper left')
pyplot.xlabel('Actual FWHM', size=16)
pyplot.ylabel('Estimated FWHM', size=16)
pyplot.title('FWHM estimation validation (broken fields)', size=20)
pyplot.show()
|
gpl-3.0
|
pombredanne/bokeh
|
bokeh/core/compat/mplexporter/tools.py
|
75
|
1732
|
"""
Tools for matplotlib plot exporting
"""
def ipynb_vega_init():
"""Initialize the IPython notebook display elements
This function borrows heavily from the excellent vincent package:
http://github.com/wrobstory/vincent
"""
try:
from IPython.core.display import display, HTML
except ImportError:
print('IPython Notebook could not be loaded.')
require_js = '''
if (window['d3'] === undefined) {{
require.config({{ paths: {{d3: "http://d3js.org/d3.v3.min"}} }});
require(["d3"], function(d3) {{
window.d3 = d3;
{0}
}});
}};
if (window['topojson'] === undefined) {{
require.config(
{{ paths: {{topojson: "http://d3js.org/topojson.v1.min"}} }}
);
require(["topojson"], function(topojson) {{
window.topojson = topojson;
}});
}};
'''
d3_geo_projection_js_url = "http://d3js.org/d3.geo.projection.v0.min.js"
d3_layout_cloud_js_url = ("http://wrobstory.github.io/d3-cloud/"
"d3.layout.cloud.js")
topojson_js_url = "http://d3js.org/topojson.v1.min.js"
vega_js_url = 'http://trifacta.github.com/vega/vega.js'
dep_libs = '''$.getScript("%s", function() {
$.getScript("%s", function() {
$.getScript("%s", function() {
$.getScript("%s", function() {
$([IPython.events]).trigger("vega_loaded.vincent");
})
})
})
});''' % (d3_geo_projection_js_url, d3_layout_cloud_js_url,
topojson_js_url, vega_js_url)
load_js = require_js.format(dep_libs)
html = '<script>'+load_js+'</script>'
display(HTML(html))
|
bsd-3-clause
|
zfrenchee/pandas
|
pandas/tests/frame/test_missing.py
|
1
|
29948
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
from distutils.version import LooseVersion
from numpy import nan, random
import numpy as np
from pandas.compat import lrange
from pandas import (DataFrame, Series, Timestamp,
date_range, Categorical)
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_frame_equal
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.tests.frame.common import TestData, _check_mixed_float
try:
import scipy
_is_scipy_ge_0190 = (LooseVersion(scipy.__version__) >=
LooseVersion('0.19.0'))
except:
_is_scipy_ge_0190 = False
def _skip_if_no_pchip():
try:
from scipy.interpolate import pchip_interpolate # noqa
except ImportError:
import pytest
pytest.skip('scipy.interpolate.pchip missing')
class TestDataFrameMissingData(TestData):
def test_dropEmptyRows(self):
N = len(self.frame.index)
mat = random.randn(N)
mat[:5] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
original = Series(mat, index=self.frame.index, name='foo')
expected = original.dropna()
inplace_frame1, inplace_frame2 = frame.copy(), frame.copy()
smaller_frame = frame.dropna(how='all')
# check that original was preserved
assert_series_equal(frame['foo'], original)
inplace_frame1.dropna(how='all', inplace=True)
assert_series_equal(smaller_frame['foo'], expected)
assert_series_equal(inplace_frame1['foo'], expected)
smaller_frame = frame.dropna(how='all', subset=['foo'])
inplace_frame2.dropna(how='all', subset=['foo'], inplace=True)
assert_series_equal(smaller_frame['foo'], expected)
assert_series_equal(inplace_frame2['foo'], expected)
def test_dropIncompleteRows(self):
N = len(self.frame.index)
mat = random.randn(N)
mat[:5] = nan
frame = DataFrame({'foo': mat}, index=self.frame.index)
frame['bar'] = 5
original = Series(mat, index=self.frame.index, name='foo')
inp_frame1, inp_frame2 = frame.copy(), frame.copy()
smaller_frame = frame.dropna()
assert_series_equal(frame['foo'], original)
inp_frame1.dropna(inplace=True)
exp = Series(mat[5:], index=self.frame.index[5:], name='foo')
tm.assert_series_equal(smaller_frame['foo'], exp)
tm.assert_series_equal(inp_frame1['foo'], exp)
samesize_frame = frame.dropna(subset=['bar'])
assert_series_equal(frame['foo'], original)
assert (frame['bar'] == 5).all()
inp_frame2.dropna(subset=['bar'], inplace=True)
tm.assert_index_equal(samesize_frame.index, self.frame.index)
tm.assert_index_equal(inp_frame2.index, self.frame.index)
def test_dropna(self):
df = DataFrame(np.random.randn(6, 4))
df[2][:2] = nan
dropped = df.dropna(axis=1)
expected = df.loc[:, [0, 1, 3]]
inp = df.copy()
inp.dropna(axis=1, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=0)
expected = df.loc[lrange(2, 6)]
inp = df.copy()
inp.dropna(axis=0, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
# threshold
dropped = df.dropna(axis=1, thresh=5)
expected = df.loc[:, [0, 1, 3]]
inp = df.copy()
inp.dropna(axis=1, thresh=5, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=0, thresh=4)
expected = df.loc[lrange(2, 6)]
inp = df.copy()
inp.dropna(axis=0, thresh=4, inplace=True)
assert_frame_equal(dropped, expected)
assert_frame_equal(inp, expected)
dropped = df.dropna(axis=1, thresh=4)
assert_frame_equal(dropped, df)
dropped = df.dropna(axis=1, thresh=3)
assert_frame_equal(dropped, df)
# subset
dropped = df.dropna(axis=0, subset=[0, 1, 3])
inp = df.copy()
inp.dropna(axis=0, subset=[0, 1, 3], inplace=True)
assert_frame_equal(dropped, df)
assert_frame_equal(inp, df)
# all
dropped = df.dropna(axis=1, how='all')
assert_frame_equal(dropped, df)
df[2] = nan
dropped = df.dropna(axis=1, how='all')
expected = df.loc[:, [0, 1, 3]]
assert_frame_equal(dropped, expected)
# bad input
pytest.raises(ValueError, df.dropna, axis=3)
def test_drop_and_dropna_caching(self):
# tst that cacher updates
original = Series([1, 2, np.nan], name='A')
expected = Series([1, 2], dtype=original.dtype, name='A')
df = pd.DataFrame({'A': original.values.copy()})
df2 = df.copy()
df['A'].dropna()
assert_series_equal(df['A'], original)
df['A'].dropna(inplace=True)
assert_series_equal(df['A'], expected)
df2['A'].drop([1])
assert_series_equal(df2['A'], original)
df2['A'].drop([1], inplace=True)
assert_series_equal(df2['A'], original.drop([1]))
def test_dropna_corner(self):
# bad input
pytest.raises(ValueError, self.frame.dropna, how='foo')
pytest.raises(TypeError, self.frame.dropna, how=None)
# non-existent column - 8303
pytest.raises(KeyError, self.frame.dropna, subset=['A', 'X'])
def test_dropna_multiple_axes(self):
df = DataFrame([[1, np.nan, 2, 3],
[4, np.nan, 5, 6],
[np.nan, np.nan, np.nan, np.nan],
[7, np.nan, 8, 9]])
cp = df.copy()
result = df.dropna(how='all', axis=[0, 1])
result2 = df.dropna(how='all', axis=(0, 1))
expected = df.dropna(how='all').dropna(how='all', axis=1)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
assert_frame_equal(df, cp)
inp = df.copy()
inp.dropna(how='all', axis=(0, 1), inplace=True)
assert_frame_equal(inp, expected)
def test_fillna(self):
tf = self.tsframe
tf.loc[tf.index[:5], 'A'] = nan
tf.loc[tf.index[-5:], 'A'] = nan
zero_filled = self.tsframe.fillna(0)
assert (zero_filled.loc[zero_filled.index[:5], 'A'] == 0).all()
padded = self.tsframe.fillna(method='pad')
assert np.isnan(padded.loc[padded.index[:5], 'A']).all()
assert (padded.loc[padded.index[-5:], 'A'] ==
padded.loc[padded.index[-5], 'A']).all()
# mixed type
mf = self.mixed_frame
mf.loc[mf.index[5:20], 'foo'] = nan
mf.loc[mf.index[-10:], 'A'] = nan
result = self.mixed_frame.fillna(value=0)
result = self.mixed_frame.fillna(method='pad')
pytest.raises(ValueError, self.tsframe.fillna)
pytest.raises(ValueError, self.tsframe.fillna, 5, method='ffill')
# mixed numeric (but no float16)
mf = self.mixed_float.reindex(columns=['A', 'B', 'D'])
mf.loc[mf.index[-10:], 'A'] = nan
result = mf.fillna(value=0)
_check_mixed_float(result, dtype=dict(C=None))
result = mf.fillna(method='pad')
_check_mixed_float(result, dtype=dict(C=None))
# empty frame (GH #2778)
df = DataFrame(columns=['x'])
for m in ['pad', 'backfill']:
df.x.fillna(method=m, inplace=True)
df.x.fillna(method=m)
# with different dtype (GH3386)
df = DataFrame([['a', 'a', np.nan, 'a'], [
'b', 'b', np.nan, 'b'], ['c', 'c', np.nan, 'c']])
result = df.fillna({2: 'foo'})
expected = DataFrame([['a', 'a', 'foo', 'a'],
['b', 'b', 'foo', 'b'],
['c', 'c', 'foo', 'c']])
assert_frame_equal(result, expected)
df.fillna({2: 'foo'}, inplace=True)
assert_frame_equal(df, expected)
# limit and value
df = DataFrame(np.random.randn(10, 3))
df.iloc[2:7, 0] = np.nan
df.iloc[3:5, 2] = np.nan
expected = df.copy()
expected.iloc[2, 0] = 999
expected.iloc[3, 2] = 999
result = df.fillna(999, limit=1)
assert_frame_equal(result, expected)
# with datelike
# GH 6344
df = DataFrame({
'Date': [pd.NaT, Timestamp("2014-1-1")],
'Date2': [Timestamp("2013-1-1"), pd.NaT]
})
expected = df.copy()
expected['Date'] = expected['Date'].fillna(
df.loc[df.index[0], 'Date2'])
result = df.fillna(value={'Date': df['Date2']})
assert_frame_equal(result, expected)
# with timezone
# GH 15855
df = pd.DataFrame({'A': [pd.Timestamp('2012-11-11 00:00:00+01:00'),
pd.NaT]})
exp = pd.DataFrame({'A': [pd.Timestamp('2012-11-11 00:00:00+01:00'),
pd.Timestamp('2012-11-11 00:00:00+01:00')]})
assert_frame_equal(df.fillna(method='pad'), exp)
df = pd.DataFrame({'A': [pd.NaT,
pd.Timestamp('2012-11-11 00:00:00+01:00')]})
exp = pd.DataFrame({'A': [pd.Timestamp('2012-11-11 00:00:00+01:00'),
pd.Timestamp('2012-11-11 00:00:00+01:00')]})
assert_frame_equal(df.fillna(method='bfill'), exp)
def test_na_actions_categorical(self):
cat = Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
vals = ["a", "b", np.nan, "d"]
df = DataFrame({"cats": cat, "vals": vals})
cat2 = Categorical([1, 2, 3, 3], categories=[1, 2, 3])
vals2 = ["a", "b", "b", "d"]
df_exp_fill = DataFrame({"cats": cat2, "vals": vals2})
cat3 = Categorical([1, 2, 3], categories=[1, 2, 3])
vals3 = ["a", "b", np.nan]
df_exp_drop_cats = DataFrame({"cats": cat3, "vals": vals3})
cat4 = Categorical([1, 2], categories=[1, 2, 3])
vals4 = ["a", "b"]
df_exp_drop_all = DataFrame({"cats": cat4, "vals": vals4})
# fillna
res = df.fillna(value={"cats": 3, "vals": "b"})
tm.assert_frame_equal(res, df_exp_fill)
with tm.assert_raises_regex(ValueError, "fill value must be "
"in categories"):
df.fillna(value={"cats": 4, "vals": "c"})
res = df.fillna(method='pad')
tm.assert_frame_equal(res, df_exp_fill)
# dropna
res = df.dropna(subset=["cats"])
tm.assert_frame_equal(res, df_exp_drop_cats)
res = df.dropna()
tm.assert_frame_equal(res, df_exp_drop_all)
# make sure that fillna takes missing values into account
c = Categorical([np.nan, "b", np.nan], categories=["a", "b"])
df = pd.DataFrame({"cats": c, "vals": [1, 2, 3]})
cat_exp = Categorical(["a", "b", "a"], categories=["a", "b"])
df_exp = DataFrame({"cats": cat_exp, "vals": [1, 2, 3]})
res = df.fillna("a")
tm.assert_frame_equal(res, df_exp)
def test_fillna_categorical_nan(self):
# GH 14021
# np.nan should always be a valid filler
cat = Categorical([np.nan, 2, np.nan])
val = Categorical([np.nan, np.nan, np.nan])
df = DataFrame({"cats": cat, "vals": val})
res = df.fillna(df.median())
v_exp = [np.nan, np.nan, np.nan]
df_exp = DataFrame({"cats": [2, 2, 2], "vals": v_exp},
dtype='category')
tm.assert_frame_equal(res, df_exp)
result = df.cats.fillna(np.nan)
tm.assert_series_equal(result, df.cats)
result = df.vals.fillna(np.nan)
tm.assert_series_equal(result, df.vals)
idx = pd.DatetimeIndex(['2011-01-01 09:00', '2016-01-01 23:45',
'2011-01-01 09:00', pd.NaT, pd.NaT])
df = DataFrame({'a': Categorical(idx)})
tm.assert_frame_equal(df.fillna(value=pd.NaT), df)
idx = pd.PeriodIndex(['2011-01', '2011-01', '2011-01',
pd.NaT, pd.NaT], freq='M')
df = DataFrame({'a': Categorical(idx)})
tm.assert_frame_equal(df.fillna(value=pd.NaT), df)
idx = pd.TimedeltaIndex(['1 days', '2 days',
'1 days', pd.NaT, pd.NaT])
df = DataFrame({'a': Categorical(idx)})
tm.assert_frame_equal(df.fillna(value=pd.NaT), df)
def test_fillna_downcast(self):
# GH 15277
# infer int64 from float64
df = pd.DataFrame({'a': [1., np.nan]})
result = df.fillna(0, downcast='infer')
expected = pd.DataFrame({'a': [1, 0]})
assert_frame_equal(result, expected)
# infer int64 from float64 when fillna value is a dict
df = pd.DataFrame({'a': [1., np.nan]})
result = df.fillna({'a': 0}, downcast='infer')
expected = pd.DataFrame({'a': [1, 0]})
assert_frame_equal(result, expected)
def test_fillna_dtype_conversion(self):
# make sure that fillna on an empty frame works
df = DataFrame(index=["A", "B", "C"], columns=[1, 2, 3, 4, 5])
result = df.get_dtype_counts().sort_values()
expected = Series({'object': 5})
assert_series_equal(result, expected)
result = df.fillna(1)
expected = DataFrame(1, index=["A", "B", "C"], columns=[1, 2, 3, 4, 5])
result = result.get_dtype_counts().sort_values()
expected = Series({'int64': 5})
assert_series_equal(result, expected)
# empty block
df = DataFrame(index=lrange(3), columns=['A', 'B'], dtype='float64')
result = df.fillna('nan')
expected = DataFrame('nan', index=lrange(3), columns=['A', 'B'])
assert_frame_equal(result, expected)
# equiv of replace
df = DataFrame(dict(A=[1, np.nan], B=[1., 2.]))
for v in ['', 1, np.nan, 1.0]:
expected = df.replace(np.nan, v)
result = df.fillna(v)
assert_frame_equal(result, expected)
def test_fillna_datetime_columns(self):
# GH 7095
df = pd.DataFrame({'A': [-1, -2, np.nan],
'B': date_range('20130101', periods=3),
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]},
index=date_range('20130110', periods=3))
result = df.fillna('?')
expected = pd.DataFrame({'A': [-1, -2, '?'],
'B': date_range('20130101', periods=3),
'C': ['foo', 'bar', '?'],
'D': ['foo2', 'bar2', '?']},
index=date_range('20130110', periods=3))
tm.assert_frame_equal(result, expected)
df = pd.DataFrame({'A': [-1, -2, np.nan],
'B': [pd.Timestamp('2013-01-01'),
pd.Timestamp('2013-01-02'), pd.NaT],
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]},
index=date_range('20130110', periods=3))
result = df.fillna('?')
expected = pd.DataFrame({'A': [-1, -2, '?'],
'B': [pd.Timestamp('2013-01-01'),
pd.Timestamp('2013-01-02'), '?'],
'C': ['foo', 'bar', '?'],
'D': ['foo2', 'bar2', '?']},
index=pd.date_range('20130110', periods=3))
tm.assert_frame_equal(result, expected)
def test_ffill(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
assert_frame_equal(self.tsframe.ffill(),
self.tsframe.fillna(method='ffill'))
def test_bfill(self):
self.tsframe['A'][:5] = nan
self.tsframe['A'][-5:] = nan
assert_frame_equal(self.tsframe.bfill(),
self.tsframe.fillna(method='bfill'))
def test_frame_pad_backfill_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index, method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index, method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_frame_fillna_limit(self):
index = np.arange(10)
df = DataFrame(np.random.randn(10, 4), index=index)
result = df[:2].reindex(index)
result = result.fillna(method='pad', limit=5)
expected = df[:2].reindex(index).fillna(method='pad')
expected.values[-3:] = np.nan
tm.assert_frame_equal(result, expected)
result = df[-2:].reindex(index)
result = result.fillna(method='backfill', limit=5)
expected = df[-2:].reindex(index).fillna(method='backfill')
expected.values[:3] = np.nan
tm.assert_frame_equal(result, expected)
def test_fillna_skip_certain_blocks(self):
# don't try to fill boolean, int blocks
df = DataFrame(np.random.randn(10, 4).astype(int))
# it works!
df.fillna(np.nan)
def test_fillna_inplace(self):
df = DataFrame(np.random.randn(10, 4))
df[1][:4] = np.nan
df[3][-4:] = np.nan
expected = df.fillna(value=0)
assert expected is not df
df.fillna(value=0, inplace=True)
tm.assert_frame_equal(df, expected)
expected = df.fillna(value={0: 0}, inplace=True)
assert expected is None
df[1][:4] = np.nan
df[3][-4:] = np.nan
expected = df.fillna(method='ffill')
assert expected is not df
df.fillna(method='ffill', inplace=True)
tm.assert_frame_equal(df, expected)
def test_fillna_dict_series(self):
df = DataFrame({'a': [nan, 1, 2, nan, nan],
'b': [1, 2, 3, nan, nan],
'c': [nan, 1, 2, 3, 4]})
result = df.fillna({'a': 0, 'b': 5})
expected = df.copy()
expected['a'] = expected['a'].fillna(0)
expected['b'] = expected['b'].fillna(5)
assert_frame_equal(result, expected)
# it works
result = df.fillna({'a': 0, 'b': 5, 'd': 7})
# Series treated same as dict
result = df.fillna(df.max())
expected = df.fillna(df.max().to_dict())
assert_frame_equal(result, expected)
# disable this for now
with tm.assert_raises_regex(NotImplementedError,
'column by column'):
df.fillna(df.max(1), axis=1)
def test_fillna_dataframe(self):
# GH 8377
df = DataFrame({'a': [nan, 1, 2, nan, nan],
'b': [1, 2, 3, nan, nan],
'c': [nan, 1, 2, 3, 4]},
index=list('VWXYZ'))
# df2 may have different index and columns
df2 = DataFrame({'a': [nan, 10, 20, 30, 40],
'b': [50, 60, 70, 80, 90],
'foo': ['bar'] * 5},
index=list('VWXuZ'))
result = df.fillna(df2)
# only those columns and indices which are shared get filled
expected = DataFrame({'a': [nan, 1, 2, nan, 40],
'b': [1, 2, 3, nan, 90],
'c': [nan, 1, 2, 3, 4]},
index=list('VWXYZ'))
assert_frame_equal(result, expected)
def test_fillna_columns(self):
df = DataFrame(np.random.randn(10, 10))
df.values[:, ::2] = np.nan
result = df.fillna(method='ffill', axis=1)
expected = df.T.fillna(method='pad').T
assert_frame_equal(result, expected)
df.insert(6, 'foo', 5)
result = df.fillna(method='ffill', axis=1)
expected = df.astype(float).fillna(method='ffill', axis=1)
assert_frame_equal(result, expected)
def test_fillna_invalid_method(self):
with tm.assert_raises_regex(ValueError, 'ffil'):
self.frame.fillna(method='ffil')
def test_fillna_invalid_value(self):
# list
pytest.raises(TypeError, self.frame.fillna, [1, 2])
# tuple
pytest.raises(TypeError, self.frame.fillna, (1, 2))
# frame with series
pytest.raises(TypeError, self.frame.iloc[:, 0].fillna, self.frame)
def test_fillna_col_reordering(self):
cols = ["COL." + str(i) for i in range(5, 0, -1)]
data = np.random.rand(20, 5)
df = DataFrame(index=lrange(20), columns=cols, data=data)
filled = df.fillna(method='ffill')
assert df.columns.tolist() == filled.columns.tolist()
def test_fill_corner(self):
mf = self.mixed_frame
mf.loc[mf.index[5:20], 'foo'] = nan
mf.loc[mf.index[-10:], 'A'] = nan
filled = self.mixed_frame.fillna(value=0)
assert (filled.loc[filled.index[5:20], 'foo'] == 0).all()
del self.mixed_frame['foo']
empty_float = self.frame.reindex(columns=[])
# TODO(wesm): unused?
result = empty_float.fillna(value=0) # noqa
def test_fill_value_when_combine_const(self):
# GH12723
dat = np.array([0, 1, np.nan, 3, 4, 5], dtype='float')
df = DataFrame({'foo': dat}, index=range(6))
exp = df.fillna(0).add(2)
res = df.add(2, fill_value=0)
assert_frame_equal(res, exp)
class TestDataFrameInterpolate(TestData):
def test_interp_basic(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
expected = DataFrame({'A': [1., 2., 3., 4.],
'B': [1., 4., 9., 9.],
'C': [1, 2, 3, 5],
'D': list('abcd')})
result = df.interpolate()
assert_frame_equal(result, expected)
result = df.set_index('C').interpolate()
expected = df.set_index('C')
expected.loc[3, 'A'] = 3
expected.loc[5, 'B'] = 9
assert_frame_equal(result, expected)
def test_interp_bad_method(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
with pytest.raises(ValueError):
df.interpolate(method='not_a_method')
def test_interp_combo(self):
df = DataFrame({'A': [1., 2., np.nan, 4.],
'B': [1, 4, 9, np.nan],
'C': [1, 2, 3, 5],
'D': list('abcd')})
result = df['A'].interpolate()
expected = Series([1., 2., 3., 4.], name='A')
assert_series_equal(result, expected)
result = df['A'].interpolate(downcast='infer')
expected = Series([1, 2, 3, 4], name='A')
assert_series_equal(result, expected)
def test_interp_nan_idx(self):
df = DataFrame({'A': [1, 2, np.nan, 4], 'B': [np.nan, 2, 3, 4]})
df = df.set_index('A')
with pytest.raises(NotImplementedError):
df.interpolate(method='values')
@td.skip_if_no_scipy
def test_interp_various(self):
df = DataFrame({'A': [1, 2, np.nan, 4, 5, np.nan, 7],
'C': [1, 2, 3, 5, 8, 13, 21]})
df = df.set_index('C')
expected = df.copy()
result = df.interpolate(method='polynomial', order=1)
expected.A.loc[3] = 2.66666667
expected.A.loc[13] = 5.76923076
assert_frame_equal(result, expected)
result = df.interpolate(method='cubic')
# GH #15662.
# new cubic and quadratic interpolation algorithms from scipy 0.19.0.
# previously `splmake` was used. See scipy/scipy#6710
if _is_scipy_ge_0190:
expected.A.loc[3] = 2.81547781
expected.A.loc[13] = 5.52964175
else:
expected.A.loc[3] = 2.81621174
expected.A.loc[13] = 5.64146581
assert_frame_equal(result, expected)
result = df.interpolate(method='nearest')
expected.A.loc[3] = 2
expected.A.loc[13] = 5
assert_frame_equal(result, expected, check_dtype=False)
result = df.interpolate(method='quadratic')
if _is_scipy_ge_0190:
expected.A.loc[3] = 2.82150771
expected.A.loc[13] = 6.12648668
else:
expected.A.loc[3] = 2.82533638
expected.A.loc[13] = 6.02817974
assert_frame_equal(result, expected)
result = df.interpolate(method='slinear')
expected.A.loc[3] = 2.66666667
expected.A.loc[13] = 5.76923077
assert_frame_equal(result, expected)
result = df.interpolate(method='zero')
expected.A.loc[3] = 2.
expected.A.loc[13] = 5
assert_frame_equal(result, expected, check_dtype=False)
@td.skip_if_no_scipy
def test_interp_alt_scipy(self):
df = DataFrame({'A': [1, 2, np.nan, 4, 5, np.nan, 7],
'C': [1, 2, 3, 5, 8, 13, 21]})
result = df.interpolate(method='barycentric')
expected = df.copy()
expected.loc[2, 'A'] = 3
expected.loc[5, 'A'] = 6
assert_frame_equal(result, expected)
result = df.interpolate(method='barycentric', downcast='infer')
assert_frame_equal(result, expected.astype(np.int64))
result = df.interpolate(method='krogh')
expectedk = df.copy()
expectedk['A'] = expected['A']
assert_frame_equal(result, expectedk)
_skip_if_no_pchip()
import scipy
result = df.interpolate(method='pchip')
expected.loc[2, 'A'] = 3
if LooseVersion(scipy.__version__) >= LooseVersion('0.17.0'):
expected.loc[5, 'A'] = 6.0
else:
expected.loc[5, 'A'] = 6.125
assert_frame_equal(result, expected)
def test_interp_rowwise(self):
df = DataFrame({0: [1, 2, np.nan, 4],
1: [2, 3, 4, np.nan],
2: [np.nan, 4, 5, 6],
3: [4, np.nan, 6, 7],
4: [1, 2, 3, 4]})
result = df.interpolate(axis=1)
expected = df.copy()
expected.loc[3, 1] = 5
expected.loc[0, 2] = 3
expected.loc[1, 3] = 3
expected[4] = expected[4].astype(np.float64)
assert_frame_equal(result, expected)
result = df.interpolate(axis=1, method='values')
assert_frame_equal(result, expected)
result = df.interpolate(axis=0)
expected = df.interpolate()
assert_frame_equal(result, expected)
def test_rowwise_alt(self):
df = DataFrame({0: [0, .5, 1., np.nan, 4, 8, np.nan, np.nan, 64],
1: [1, 2, 3, 4, 3, 2, 1, 0, -1]})
df.interpolate(axis=0)
@pytest.mark.parametrize("check_scipy", [
False, pytest.param(True, marks=td.skip_if_no_scipy)
])
def test_interp_leading_nans(self, check_scipy):
df = DataFrame({"A": [np.nan, np.nan, .5, .25, 0],
"B": [np.nan, -3, -3.5, np.nan, -4]})
result = df.interpolate()
expected = df.copy()
expected['B'].loc[3] = -3.75
assert_frame_equal(result, expected)
if check_scipy:
result = df.interpolate(method='polynomial', order=1)
assert_frame_equal(result, expected)
def test_interp_raise_on_only_mixed(self):
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': ['a', 'b', 'c', 'd'],
'C': [np.nan, 2, 5, 7],
'D': [np.nan, np.nan, 9, 9],
'E': [1, 2, 3, 4]})
with pytest.raises(TypeError):
df.interpolate(axis=1)
def test_interp_inplace(self):
df = DataFrame({'a': [1., 2., np.nan, 4.]})
expected = DataFrame({'a': [1., 2., 3., 4.]})
result = df.copy()
result['a'].interpolate(inplace=True)
assert_frame_equal(result, expected)
result = df.copy()
result['a'].interpolate(inplace=True, downcast='infer')
assert_frame_equal(result, expected.astype('int64'))
def test_interp_inplace_row(self):
# GH 10395
result = DataFrame({'a': [1., 2., 3., 4.],
'b': [np.nan, 2., 3., 4.],
'c': [3, 2, 2, 2]})
expected = result.interpolate(method='linear', axis=1, inplace=False)
result.interpolate(method='linear', axis=1, inplace=True)
assert_frame_equal(result, expected)
def test_interp_ignore_all_good(self):
# GH
df = DataFrame({'A': [1, 2, np.nan, 4],
'B': [1, 2, 3, 4],
'C': [1., 2., np.nan, 4.],
'D': [1., 2., 3., 4.]})
expected = DataFrame({'A': np.array(
[1, 2, 3, 4], dtype='float64'),
'B': np.array(
[1, 2, 3, 4], dtype='int64'),
'C': np.array(
[1., 2., 3, 4.], dtype='float64'),
'D': np.array(
[1., 2., 3., 4.], dtype='float64')})
result = df.interpolate(downcast=None)
assert_frame_equal(result, expected)
# all good
result = df[['B', 'D']].interpolate(downcast=None)
assert_frame_equal(result, df[['B', 'D']])
|
bsd-3-clause
|
hombit/scientific_python
|
misc/share_jupyter/jupyter/jupyter_notebook_config.py
|
1
|
23610
|
# Configuration file for jupyter-notebook.
#------------------------------------------------------------------------------
# Application(SingletonConfigurable) configuration
#------------------------------------------------------------------------------
## This is an application.
## The date format used by logging formatters for %(asctime)s
#c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
#c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
#c.Application.log_level = 30
#------------------------------------------------------------------------------
# JupyterApp(Application) configuration
#------------------------------------------------------------------------------
## Base class for Jupyter applications
## Answer yes to any prompts.
#c.JupyterApp.answer_yes = False
## Full path of a config file.
#c.JupyterApp.config_file = ''
## Specify a config file to load.
#c.JupyterApp.config_file_name = ''
## Generate default config file.
#c.JupyterApp.generate_config = False
#------------------------------------------------------------------------------
# NotebookApp(JupyterApp) configuration
#------------------------------------------------------------------------------
## Set the Access-Control-Allow-Credentials: true header
#c.NotebookApp.allow_credentials = False
## Set the Access-Control-Allow-Origin header
#
# Use '*' to allow any origin to access your server.
#
# Takes precedence over allow_origin_pat.
#c.NotebookApp.allow_origin = ''
## Use a regular expression for the Access-Control-Allow-Origin header
#
# Requests from an origin matching the expression will get replies with:
#
# Access-Control-Allow-Origin: origin
#
# where `origin` is the origin of the request.
#
# Ignored if allow_origin is set.
#c.NotebookApp.allow_origin_pat = ''
## Whether to allow the user to run the notebook as root.
#c.NotebookApp.allow_root = False
## DEPRECATED use base_url
#c.NotebookApp.base_project_url = '/'
## The base URL for the notebook server.
#
# Leading and trailing slashes can be omitted, and will automatically be added.
c.NotebookApp.base_url = '/jupyter/'
## Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
#c.NotebookApp.browser = ''
## The full path to an SSL/TLS certificate file.
#c.NotebookApp.certfile = ''
## The full path to a certificate authority certificate for SSL/TLS client
# authentication.
#c.NotebookApp.client_ca = ''
## The config manager class to use
#c.NotebookApp.config_manager_class = 'notebook.services.config.manager.ConfigManager'
## The notebook manager class to use.
#c.NotebookApp.contents_manager_class = 'notebook.services.contents.largefilemanager.LargeFileManager'
## Extra keyword arguments to pass to `set_secure_cookie`. See tornado's
# set_secure_cookie docs for details.
#c.NotebookApp.cookie_options = {}
## The random bytes used to secure cookies. By default this is a new random
# number every time you start the Notebook. Set it to a value in a config file
# to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
#c.NotebookApp.cookie_secret = b''
## The file where the cookie secret is stored.
#c.NotebookApp.cookie_secret_file = ''
## The default URL to redirect to from `/`
#c.NotebookApp.default_url = '/tree'
## Disable cross-site-request-forgery protection
#
# Jupyter notebook 4.3.1 introduces protection from cross-site request
# forgeries, requiring API requests to either:
#
# - originate from pages served by this server (validated with XSRF cookie and
# token), or - authenticate with a token
#
# Some anonymous compute resources still desire the ability to run code,
# completely without authentication. These services can disable all
# authentication and security checks, with the full knowledge of what that
# implies.
#c.NotebookApp.disable_check_xsrf = False
## Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library Jupyter uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
#c.NotebookApp.enable_mathjax = True
## extra paths to look for Javascript notebook extensions
#c.NotebookApp.extra_nbextensions_path = []
## Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
#c.NotebookApp.extra_static_paths = []
## Extra paths to search for serving jinja templates.
#
# Can be used to override templates from notebook.templates.
#c.NotebookApp.extra_template_paths = []
##
#c.NotebookApp.file_to_run = ''
## Deprecated: Use minified JS file or not, mainly use during dev to avoid JS
# recompilation
#c.NotebookApp.ignore_minified_js = False
## (bytes/sec) Maximum rate at which messages can be sent on iopub before they
# are limited.
#c.NotebookApp.iopub_data_rate_limit = 1000000
## (msgs/sec) Maximum rate at which messages can be sent on iopub before they are
# limited.
#c.NotebookApp.iopub_msg_rate_limit = 1000
## The IP address the notebook server will listen on.
#c.NotebookApp.ip = 'localhost'
## Supply extra arguments that will be passed to Jinja environment.
#c.NotebookApp.jinja_environment_options = {}
## Extra variables to supply to jinja templates when rendering.
#c.NotebookApp.jinja_template_vars = {}
## The kernel manager class to use.
#c.NotebookApp.kernel_manager_class = 'notebook.services.kernels.kernelmanager.MappingKernelManager'
## The kernel spec manager class to use. Should be a subclass of
# `jupyter_client.kernelspec.KernelSpecManager`.
#
# The Api of KernelSpecManager is provisional and might change without warning
# between this version of Jupyter and the next stable one.
#c.NotebookApp.kernel_spec_manager_class = 'jupyter_client.kernelspec.KernelSpecManager'
## The full path to a private key file for usage with SSL/TLS.
#c.NotebookApp.keyfile = ''
## The login handler class to use.
#c.NotebookApp.login_handler_class = 'notebook.auth.login.LoginHandler'
## The logout handler class to use.
#c.NotebookApp.logout_handler_class = 'notebook.auth.logout.LogoutHandler'
## The MathJax.js configuration file that is to be used.
#c.NotebookApp.mathjax_config = 'TeX-AMS-MML_HTMLorMML-full,Safe'
## A custom url for MathJax.js. Should be in the form of a case-sensitive url to
# MathJax, for example: /static/components/MathJax/MathJax.js
#c.NotebookApp.mathjax_url = ''
## Dict of Python modules to load as notebook server extensions.Entry values can
# be used to enable and disable the loading ofthe extensions. The extensions
# will be loaded in alphabetical order.
#c.NotebookApp.nbserver_extensions = {}
## The directory to use for notebooks and kernels.
#c.NotebookApp.notebook_dir = ''
## Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
#c.NotebookApp.open_browser = True
## Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from notebook.auth import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
c.NotebookApp.password = 'sha1:6c1a5cca33dc:30f31ede1973570aa5e471d9d5537852a5f9386b'
## Forces users to use a password for the Notebook server. This is useful in a
# multi user environment, for instance when everybody in the LAN can access each
# other's machine though ssh.
#
# In such a case, server the notebook server on localhost is not secure since
# any user can connect to the notebook server via ssh.
#c.NotebookApp.password_required = False
## The port the notebook server will listen on.
c.NotebookApp.port = 8000
## The number of additional ports to try if the specified port is not available.
#c.NotebookApp.port_retries = 50
## DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
#c.NotebookApp.pylab = 'disabled'
## (sec) Time window used to check the message and data rate limits.
#c.NotebookApp.rate_limit_window = 3
## Reraise exceptions encountered loading server extensions?
#c.NotebookApp.reraise_server_extension_failures = False
## DEPRECATED use the nbserver_extensions dict instead
#c.NotebookApp.server_extensions = []
## The session manager class to use.
#c.NotebookApp.session_manager_class = 'notebook.services.sessions.sessionmanager.SessionManager'
## Supply SSL options for the tornado HTTPServer. See the tornado docs for
# details.
#c.NotebookApp.ssl_options = {}
## Supply overrides for terminado. Currently only supports "shell_command".
#c.NotebookApp.terminado_settings = {}
## Token used for authenticating first-time connections to the server.
#
# When no password is enabled, the default is to generate a new, random token.
#
# Setting to an empty string disables authentication altogether, which is NOT
# RECOMMENDED.
#c.NotebookApp.token = '<generated>'
## Supply overrides for the tornado.web.Application that the Jupyter notebook
# uses.
#c.NotebookApp.tornado_settings = {}
## Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
#c.NotebookApp.trust_xheaders = False
## DEPRECATED, use tornado_settings
#c.NotebookApp.webapp_settings = {}
## The base URL for websockets, if it differs from the HTTP server (hint: it
# almost certainly doesn't).
#
# Should be in the form of an HTTP origin: ws[s]://hostname[:port]
#c.NotebookApp.websocket_url = ''
#------------------------------------------------------------------------------
# ConnectionFileMixin(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Mixin for configurable classes that work with connection files
## JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
#c.ConnectionFileMixin.connection_file = ''
## set the control (ROUTER) port [default: random]
#c.ConnectionFileMixin.control_port = 0
## set the heartbeat port [default: random]
#c.ConnectionFileMixin.hb_port = 0
## set the iopub (PUB) port [default: random]
#c.ConnectionFileMixin.iopub_port = 0
## Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
#c.ConnectionFileMixin.ip = ''
## set the shell (ROUTER) port [default: random]
#c.ConnectionFileMixin.shell_port = 0
## set the stdin (ROUTER) port [default: random]
#c.ConnectionFileMixin.stdin_port = 0
##
#c.ConnectionFileMixin.transport = 'tcp'
#------------------------------------------------------------------------------
# KernelManager(ConnectionFileMixin) configuration
#------------------------------------------------------------------------------
## Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
## Should we autorestart the kernel if it dies.
#c.KernelManager.autorestart = True
## DEPRECATED: Use kernel_name instead.
#
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, Jupyter does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the Jupyter command
# line.
#c.KernelManager.kernel_cmd = []
## Time to wait for a kernel to terminate before killing it, in seconds.
#c.KernelManager.shutdown_wait_time = 5.0
#------------------------------------------------------------------------------
# Session(Configurable) configuration
#------------------------------------------------------------------------------
## Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
## Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
#c.Session.buffer_threshold = 1024
## Whether to check PID to protect against calls after fork.
#
# This check can be disabled if fork-safety is handled elsewhere.
#c.Session.check_pid = True
## Threshold (in bytes) beyond which a buffer should be sent without copying.
#c.Session.copy_threshold = 65536
## Debug output in the Session
#c.Session.debug = False
## The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
#c.Session.digest_history_size = 65536
## The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
#c.Session.item_threshold = 64
## execution key, for signing messages.
#c.Session.key = b''
## path to file containing execution key.
#c.Session.keyfile = ''
## Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
#c.Session.metadata = {}
## The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
#c.Session.packer = 'json'
## The UUID identifying this session.
#c.Session.session = ''
## The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
#c.Session.signature_scheme = 'hmac-sha256'
## The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
#c.Session.unpacker = 'json'
## Username for the Session. Default is your system username.
#c.Session.username = 'username'
#------------------------------------------------------------------------------
# MultiKernelManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A class for managing multiple kernels.
## The name of the default kernel to start
#c.MultiKernelManager.default_kernel_name = 'python3'
## The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
#c.MultiKernelManager.kernel_manager_class = 'jupyter_client.ioloop.IOLoopKernelManager'
#------------------------------------------------------------------------------
# MappingKernelManager(MultiKernelManager) configuration
#------------------------------------------------------------------------------
## A KernelManager that handles notebook mapping and HTTP error handling
##
#c.MappingKernelManager.root_dir = ''
#------------------------------------------------------------------------------
# ContentsManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Base class for serving files and directories.
#
# This serves any text or binary file, as well as directories, with special
# handling for JSON notebook documents.
#
# Most APIs take a path argument, which is always an API-style unicode path, and
# always refers to a directory.
#
# - unicode, not url-escaped
# - '/'-separated
# - leading and trailing '/' will be stripped
# - if unspecified, path defaults to '',
# indicating the root path.
##
#c.ContentsManager.checkpoints = None
##
#c.ContentsManager.checkpoints_class = 'notebook.services.contents.checkpoints.Checkpoints'
##
#c.ContentsManager.checkpoints_kwargs = {}
## Glob patterns to hide in file and directory listings.
#c.ContentsManager.hide_globs = ['__pycache__', '*.pyc', '*.pyo', '.DS_Store', '*.so', '*.dylib', '*~']
## Python callable or importstring thereof
#
# To be called on a contents model prior to save.
#
# This can be used to process the structure, such as removing notebook outputs
# or other side effects that should not be saved.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(path=path, model=model, contents_manager=self)
#
# - model: the model to be saved. Includes file contents.
# Modifying this dict will affect the file that is stored.
# - path: the API path of the save destination
# - contents_manager: this ContentsManager instance
#c.ContentsManager.pre_save_hook = None
##
#c.ContentsManager.root_dir = '/'
## The base name used when creating untitled directories.
#c.ContentsManager.untitled_directory = 'Untitled Folder'
## The base name used when creating untitled files.
#c.ContentsManager.untitled_file = 'untitled'
## The base name used when creating untitled notebooks.
#c.ContentsManager.untitled_notebook = 'Untitled'
#------------------------------------------------------------------------------
# FileManagerMixin(Configurable) configuration
#------------------------------------------------------------------------------
## Mixin for ContentsAPI classes that interact with the filesystem.
#
# Provides facilities for reading, writing, and copying both notebooks and
# generic files.
#
# Shared by FileContentsManager and FileCheckpoints.
#
# Note ---- Classes using this mixin must provide the following attributes:
#
# root_dir : unicode
# A directory against against which API-style paths are to be resolved.
#
# log : logging.Logger
## By default notebooks are saved on disk on a temporary file and then if
# succefully written, it replaces the old ones. This procedure, namely
# 'atomic_writing', causes some bugs on file system whitout operation order
# enforcement (like some networked fs). If set to False, the new notebook is
# written directly on the old one which could fail (eg: full filesystem or quota
# )
#c.FileManagerMixin.use_atomic_writing = True
#------------------------------------------------------------------------------
# FileContentsManager(FileManagerMixin,ContentsManager) configuration
#------------------------------------------------------------------------------
## Python callable or importstring thereof
#
# to be called on the path of a file just saved.
#
# This can be used to process the file on disk, such as converting the notebook
# to a script or HTML via nbconvert.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(os_path=os_path, model=model, contents_manager=instance)
#
# - path: the filesystem path to the file just written - model: the model
# representing the file - contents_manager: this ContentsManager instance
#c.FileContentsManager.post_save_hook = None
##
#c.FileContentsManager.root_dir = ''
## DEPRECATED, use post_save_hook. Will be removed in Notebook 5.0
#c.FileContentsManager.save_script = False
#------------------------------------------------------------------------------
# NotebookNotary(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A class for computing and verifying notebook signatures.
## The hashing algorithm used to sign notebooks.
#c.NotebookNotary.algorithm = 'sha256'
## The sqlite file in which to store notebook signatures. By default, this will
# be in your Jupyter data directory. You can set it to ':memory:' to disable
# sqlite writing to the filesystem.
#c.NotebookNotary.db_file = ''
## The secret key with which notebooks are signed.
#c.NotebookNotary.secret = b''
## The file where the secret key is stored.
#c.NotebookNotary.secret_file = ''
## A callable returning the storage backend for notebook signatures. The default
# uses an SQLite database.
#c.NotebookNotary.store_factory = traitlets.Undefined
#------------------------------------------------------------------------------
# KernelSpecManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## If there is no Python kernelspec registered and the IPython kernel is
# available, ensure it is added to the spec list.
#c.KernelSpecManager.ensure_native_kernel = True
## The kernel spec class. This is configurable to allow subclassing of the
# KernelSpecManager for customized behavior.
#c.KernelSpecManager.kernel_spec_class = 'jupyter_client.kernelspec.KernelSpec'
## Whitelist of allowed kernel names.
#
# By default, all installed kernels are allowed.
#c.KernelSpecManager.whitelist = set()
## https://github.com/jbwhit/til/blob/master/jupyter/autosave_html_py.md
import os
from subprocess import check_call
from queue import Queue
from threading import Thread
import nbformat
from tempfile import TemporaryFile
class PostSave:
__queue = Queue()
def __init__(self):
t = Thread(target=self.__worker)
t.start()
def __worker(self):
while True:
args, kwargs = self.__queue.get()
self.__convert(*args, **kwargs)
self.__queue.task_done()
@staticmethod
def __convert(model, os_path, contents_manager):
d, fname = os.path.split(os_path)
if model['type'] == 'notebook':
check_call(['jupyter', 'nbconvert', '--to', 'html', fname], cwd=d)
def __call__(self, *args, **kwargs):
self.__queue.put((args, kwargs))
# Convert .ipynb files into .html after each save.
c.FileContentsManager.post_save_hook = PostSave()
|
mit
|
livc/Paddle
|
demo/gan/gan_trainer.py
|
13
|
12731
|
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import random
import numpy
import cPickle
import sys, os
from PIL import Image
from paddle.trainer.config_parser import parse_config
from paddle.trainer.config_parser import logger
import py_paddle.swig_paddle as api
import matplotlib.pyplot as plt
def plot2DScatter(data, outputfile):
'''
Plot the data as a 2D scatter plot and save to outputfile
data needs to be two dimensinoal
'''
x = data[:, 0]
y = data[:, 1]
logger.info("The mean vector is %s" % numpy.mean(data, 0))
logger.info("The std vector is %s" % numpy.std(data, 0))
heatmap, xedges, yedges = numpy.histogram2d(x, y, bins=50)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
plt.clf()
plt.scatter(x, y)
plt.savefig(outputfile, bbox_inches='tight')
def CHECK_EQ(a, b):
assert a == b, "a=%s, b=%s" % (a, b)
def copy_shared_parameters(src, dst):
'''
copy the parameters from src to dst
:param src: the source of the parameters
:type src: GradientMachine
:param dst: the destination of the parameters
:type dst: GradientMachine
'''
src_params = [src.getParameter(i) for i in xrange(src.getParameterSize())]
src_params = dict([(p.getName(), p) for p in src_params])
for i in xrange(dst.getParameterSize()):
dst_param = dst.getParameter(i)
src_param = src_params.get(dst_param.getName(), None)
if src_param is None:
continue
src_value = src_param.getBuf(api.PARAMETER_VALUE)
dst_value = dst_param.getBuf(api.PARAMETER_VALUE)
CHECK_EQ(len(src_value), len(dst_value))
dst_value.copyFrom(src_value)
dst_param.setValueUpdated()
def print_parameters(src):
src_params = [src.getParameter(i) for i in xrange(src.getParameterSize())]
print "***************"
for p in src_params:
print "Name is %s" % p.getName()
print "value is %s \n" % p.getBuf(api.PARAMETER_VALUE).copyToNumpyArray(
)
def load_mnist_data(imageFile):
f = open(imageFile, "rb")
f.read(16)
# Define number of samples for train/test
if "train" in imageFile:
n = 60000
else:
n = 10000
data = numpy.fromfile(f, 'ubyte', count=n * 28 * 28).reshape((n, 28 * 28))
data = data / 255.0 * 2.0 - 1.0
f.close()
return data.astype('float32')
def load_cifar_data(cifar_path):
batch_size = 10000
data = numpy.zeros((5 * batch_size, 32 * 32 * 3), dtype="float32")
for i in range(1, 6):
file = cifar_path + "/data_batch_" + str(i)
fo = open(file, 'rb')
dict = cPickle.load(fo)
fo.close()
data[(i - 1) * batch_size:(i * batch_size), :] = dict["data"]
data = data / 255.0 * 2.0 - 1.0
return data
# synthesize 2-D uniform data
def load_uniform_data():
data = numpy.random.rand(1000000, 2).astype('float32')
return data
def merge(images, size):
if images.shape[1] == 28 * 28:
h, w, c = 28, 28, 1
else:
h, w, c = 32, 32, 3
img = numpy.zeros((h * size[0], w * size[1], c))
for idx in xrange(size[0] * size[1]):
i = idx % size[1]
j = idx // size[1]
img[j*h:j*h+h, i*w:i*w+w, :] = \
((images[idx, :].reshape((h, w, c), order="F").transpose(1, 0, 2) + 1.0) / 2.0 * 255.0)
return img.astype('uint8')
def save_images(images, path):
merged_img = merge(images, [8, 8])
if merged_img.shape[2] == 1:
im = Image.fromarray(numpy.squeeze(merged_img)).convert('RGB')
else:
im = Image.fromarray(merged_img, mode="RGB")
im.save(path)
def get_real_samples(batch_size, data_np):
return data_np[numpy.random.choice(
data_np.shape[0], batch_size, replace=False), :]
def get_noise(batch_size, noise_dim):
return numpy.random.normal(size=(batch_size, noise_dim)).astype('float32')
def get_fake_samples(generator_machine, batch_size, noise):
gen_inputs = api.Arguments.createArguments(1)
gen_inputs.setSlotValue(0, api.Matrix.createDenseFromNumpy(noise))
gen_outputs = api.Arguments.createArguments(0)
generator_machine.forward(gen_inputs, gen_outputs, api.PASS_TEST)
fake_samples = gen_outputs.getSlotValue(0).copyToNumpyMat()
return fake_samples
def get_training_loss(training_machine, inputs):
outputs = api.Arguments.createArguments(0)
training_machine.forward(inputs, outputs, api.PASS_TEST)
loss = outputs.getSlotValue(0).copyToNumpyMat()
return numpy.mean(loss)
def prepare_discriminator_data_batch_pos(batch_size, data_np):
real_samples = get_real_samples(batch_size, data_np)
labels = numpy.ones(batch_size, dtype='int32')
inputs = api.Arguments.createArguments(2)
inputs.setSlotValue(0, api.Matrix.createDenseFromNumpy(real_samples))
inputs.setSlotIds(1, api.IVector.createVectorFromNumpy(labels))
return inputs
def prepare_discriminator_data_batch_neg(generator_machine, batch_size, noise):
fake_samples = get_fake_samples(generator_machine, batch_size, noise)
labels = numpy.zeros(batch_size, dtype='int32')
inputs = api.Arguments.createArguments(2)
inputs.setSlotValue(0, api.Matrix.createDenseFromNumpy(fake_samples))
inputs.setSlotIds(1, api.IVector.createVectorFromNumpy(labels))
return inputs
def prepare_generator_data_batch(batch_size, noise):
label = numpy.ones(batch_size, dtype='int32')
inputs = api.Arguments.createArguments(2)
inputs.setSlotValue(0, api.Matrix.createDenseFromNumpy(noise))
inputs.setSlotIds(1, api.IVector.createVectorFromNumpy(label))
return inputs
def find(iterable, cond):
for item in iterable:
if cond(item):
return item
return None
def get_layer_size(model_conf, layer_name):
layer_conf = find(model_conf.layers, lambda x: x.name == layer_name)
assert layer_conf is not None, "Cannot find '%s' layer" % layer_name
return layer_conf.size
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--data_source", help="mnist or cifar or uniform")
parser.add_argument(
"--use_gpu", default="1", help="1 means use gpu for training")
parser.add_argument("--gpu_id", default="0", help="the gpu_id parameter")
args = parser.parse_args()
data_source = args.data_source
use_gpu = args.use_gpu
assert data_source in ["mnist", "cifar", "uniform"]
assert use_gpu in ["0", "1"]
if not os.path.exists("./%s_samples/" % data_source):
os.makedirs("./%s_samples/" % data_source)
if not os.path.exists("./%s_params/" % data_source):
os.makedirs("./%s_params/" % data_source)
api.initPaddle('--use_gpu=' + use_gpu, '--dot_period=10',
'--log_period=100', '--gpu_id=' + args.gpu_id,
'--save_dir=' + "./%s_params/" % data_source)
if data_source == "uniform":
conf = "gan_conf.py"
num_iter = 10000
else:
conf = "gan_conf_image.py"
num_iter = 1000
gen_conf = parse_config(conf, "mode=generator_training,data=" + data_source)
dis_conf = parse_config(conf,
"mode=discriminator_training,data=" + data_source)
generator_conf = parse_config(conf, "mode=generator,data=" + data_source)
batch_size = dis_conf.opt_config.batch_size
noise_dim = get_layer_size(gen_conf.model_config, "noise")
if data_source == "mnist":
data_np = load_mnist_data("./data/mnist_data/train-images-idx3-ubyte")
elif data_source == "cifar":
data_np = load_cifar_data("./data/cifar-10-batches-py/")
else:
data_np = load_uniform_data()
# this creates a gradient machine for discriminator
dis_training_machine = api.GradientMachine.createFromConfigProto(
dis_conf.model_config)
# this create a gradient machine for generator
gen_training_machine = api.GradientMachine.createFromConfigProto(
gen_conf.model_config)
# generator_machine is used to generate data only, which is used for
# training discriminator
logger.info(str(generator_conf.model_config))
generator_machine = api.GradientMachine.createFromConfigProto(
generator_conf.model_config)
dis_trainer = api.Trainer.create(dis_conf, dis_training_machine)
gen_trainer = api.Trainer.create(gen_conf, gen_training_machine)
dis_trainer.startTrain()
gen_trainer.startTrain()
# Sync parameters between networks (GradientMachine) at the beginning
copy_shared_parameters(gen_training_machine, dis_training_machine)
copy_shared_parameters(gen_training_machine, generator_machine)
# constrain that either discriminator or generator can not be trained
# consecutively more than MAX_strike times
curr_train = "dis"
curr_strike = 0
MAX_strike = 5
for train_pass in xrange(100):
dis_trainer.startTrainPass()
gen_trainer.startTrainPass()
for i in xrange(num_iter):
# Do forward pass in discriminator to get the dis_loss
noise = get_noise(batch_size, noise_dim)
data_batch_dis_pos = prepare_discriminator_data_batch_pos(
batch_size, data_np)
dis_loss_pos = get_training_loss(dis_training_machine,
data_batch_dis_pos)
data_batch_dis_neg = prepare_discriminator_data_batch_neg(
generator_machine, batch_size, noise)
dis_loss_neg = get_training_loss(dis_training_machine,
data_batch_dis_neg)
dis_loss = (dis_loss_pos + dis_loss_neg) / 2.0
# Do forward pass in generator to get the gen_loss
data_batch_gen = prepare_generator_data_batch(batch_size, noise)
gen_loss = get_training_loss(gen_training_machine, data_batch_gen)
if i % 100 == 0:
print "d_pos_loss is %s d_neg_loss is %s" % (dis_loss_pos,
dis_loss_neg)
print "d_loss is %s g_loss is %s" % (dis_loss, gen_loss)
# Decide which network to train based on the training history
# And the relative size of the loss
if (not (curr_train == "dis" and curr_strike == MAX_strike)) and \
((curr_train == "gen" and curr_strike == MAX_strike) or dis_loss > gen_loss):
if curr_train == "dis":
curr_strike += 1
else:
curr_train = "dis"
curr_strike = 1
dis_trainer.trainOneDataBatch(batch_size, data_batch_dis_neg)
dis_trainer.trainOneDataBatch(batch_size, data_batch_dis_pos)
copy_shared_parameters(dis_training_machine,
gen_training_machine)
else:
if curr_train == "gen":
curr_strike += 1
else:
curr_train = "gen"
curr_strike = 1
gen_trainer.trainOneDataBatch(batch_size, data_batch_gen)
# TODO: add API for paddle to allow true parameter sharing between different GradientMachines
# so that we do not need to copy shared parameters.
copy_shared_parameters(gen_training_machine,
dis_training_machine)
copy_shared_parameters(gen_training_machine, generator_machine)
dis_trainer.finishTrainPass()
gen_trainer.finishTrainPass()
# At the end of each pass, save the generated samples/images
fake_samples = get_fake_samples(generator_machine, batch_size, noise)
if data_source == "uniform":
plot2DScatter(fake_samples, "./%s_samples/train_pass%s.png" %
(data_source, train_pass))
else:
save_images(fake_samples, "./%s_samples/train_pass%s.png" %
(data_source, train_pass))
dis_trainer.finishTrain()
gen_trainer.finishTrain()
if __name__ == '__main__':
main()
|
apache-2.0
|
chintak/scikit-image
|
doc/examples/applications/plot_morphology.py
|
2
|
8162
|
"""
=======================
Morphological Filtering
=======================
Morphological image processing is a collection of non-linear operations related
to the shape or morphology of features in an image, such as boundaries,
skeletons, etc. In any given technique, we probe an image with a small shape or
template called a structuring element, which defines the region of interest or
neighborhood around a pixel.
In this document we outline the following basic morphological operations:
1. Erosion
2. Dilation
3. Opening
4. Closing
5. White Tophat
6. Black Tophat
7. Skeletonize
8. Convex Hull
To get started, let's load an image using ``io.imread``. Note that morphology
functions only work on gray-scale or binary images, so we set ``as_grey=True``.
"""
import matplotlib.pyplot as plt
from skimage.data import data_dir
from skimage.util import img_as_ubyte
from skimage import io
plt.gray()
phantom = img_as_ubyte(io.imread(data_dir+'/phantom.png', as_grey=True))
plt.imshow(phantom)
"""
.. image:: PLOT2RST.current_figure
Let's also define a convenience function for plotting comparisons:
"""
def plot_comparison(original, filtered, filter_name):
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(8, 4))
ax1.imshow(original)
ax1.set_title('original')
ax1.axis('off')
ax2.imshow(filtered)
ax2.set_title(filter_name)
ax2.axis('off')
"""
Erosion
=======
Morphological ``erosion`` sets a pixel at (i, j) to the *minimum over all
pixels in the neighborhood centered at (i, j)*. The structuring element,
``selem``, passed to ``erosion`` is a boolean array that describes this
neighborhood. Below, we use ``disk`` to create a circular structuring element,
which we use for most of the following examples.
"""
from skimage.morphology import erosion, dilation, opening, closing, white_tophat
from skimage.morphology import black_tophat, skeletonize, convex_hull_image
from skimage.morphology import disk
selem = disk(6)
eroded = erosion(phantom, selem)
plot_comparison(phantom, eroded, 'erosion')
"""
.. image:: PLOT2RST.current_figure
Notice how the white boundary of the image disappears or gets eroded as we
increase the size of the disk. Also notice the increase in size of the two
black ellipses in the center and the disappearance of the 3 light grey
patches in the lower part of the image.
Dilation
========
Morphological ``dilation`` sets a pixel at (i, j) to the *maximum over all
pixels in the neighborhood centered at (i, j)*. Dilation enlarges bright
regions and shrinks dark regions.
"""
dilated = dilation(phantom, selem)
plot_comparison(phantom, dilated, 'dilation')
"""
.. image:: PLOT2RST.current_figure
Notice how the white boundary of the image thickens, or gets dilated, as we
increase the size of the disk. Also notice the decrease in size of the two
black ellipses in the centre, and the thickening of the light grey circle in
the center and the 3 patches in the lower part of the image.
Opening
=======
Morphological ``opening`` on an image is defined as an *erosion followed by a
dilation*. Opening can remove small bright spots (i.e. "salt") and connect
small dark cracks.
"""
opened = opening(phantom, selem)
plot_comparison(phantom, opened, 'opening')
"""
.. image:: PLOT2RST.current_figure
Since ``opening`` an image starts with an erosion operation, light regions that
are *smaller* than the structuring element are removed. The dilation operation
that follows ensures that light regions that are *larger* than the structuring
element retain their original size. Notice how the light and dark shapes in the
center their original thickness but the 3 lighter patches in the bottom get
completely eroded. The size dependence is highlighted by the outer white ring:
The parts of the ring thinner than the structuring element were completely
erased, while the thicker region at the top retains its original thickness.
Closing
=======
Morphological ``closing`` on an image is defined as a *dilation followed by an
erosion*. Closing can remove small dark spots (i.e. "pepper") and connect
small bright cracks.
To illustrate this more clearly, let's add a small crack to the white border:
"""
phantom = img_as_ubyte(io.imread(data_dir+'/phantom.png', as_grey=True))
phantom[10:30, 200:210] = 0
closed = closing(phantom, selem)
plot_comparison(phantom, closed, 'closing')
"""
.. image:: PLOT2RST.current_figure
Since ``closing`` an image starts with an dilation operation, dark regions
that are *smaller* than the structuring element are removed. The dilation
operation that follows ensures that dark regions that are *larger* than the
structuring element retain their original size. Notice how the white ellipses
at the bottom get connected because of dilation, but other dark region retain
their original sizes. Also notice how the crack we added is mostly removed.
White tophat
============
The ``white_tophat`` of an image is defined as the *image minus its
morphological opening*. This operation returns the bright spots of the image
that are smaller than the structuring element.
To make things interesting, we'll add bright and dark spots to the image:
"""
phantom = img_as_ubyte(io.imread(data_dir+'/phantom.png', as_grey=True))
phantom[340:350, 200:210] = 255
phantom[100:110, 200:210] = 0
w_tophat = white_tophat(phantom, selem)
plot_comparison(phantom, w_tophat, 'white tophat')
"""
.. image:: PLOT2RST.current_figure
As you can see, the 10-pixel wide white square is highlighted since it is
smaller than the structuring element. Also, the thin, white edges around most
of the ellipse are retained because they're smaller than the structuring
element, but the thicker region at the top disappears.
Black tophat
============
The ``black_tophat`` of an image is defined as its morphological **closing
minus the original image**. This operation returns the *dark spots of the
image that are smaller than the structuring element*.
"""
b_tophat = black_tophat(phantom, selem)
plot_comparison(phantom, b_tophat, 'black tophat')
"""
.. image:: PLOT2RST.current_figure
As you can see, the 10-pixel wide black square is highlighted since it is
smaller than the structuring element.
Duality
-------
As you should have noticed, many of these operations are simply the reverse
of another operation. This duality can be summarized as follows:
1. Erosion <-> Dilation
2. Opening <-> Closing
3. White tophat <-> Black tophat
Skeletonize
===========
Thinning is used to reduce each connected component in a binary image to a
*single-pixel wide skeleton*. It is important to note that this is performed
on binary images only.
"""
from skimage import img_as_bool
horse = ~img_as_bool(io.imread(data_dir+'/horse.png', as_grey=True))
sk = skeletonize(horse)
plot_comparison(horse, sk, 'skeletonize')
"""
.. image:: PLOT2RST.current_figure
As the name suggests, this technique is used to thin the image to 1-pixel wide
skeleton by applying thinning successively.
Convex hull
===========
The ``convex_hull_image`` is the *set of pixels included in the smallest
convex polygon that surround all white pixels in the input image*. Again note
that this is also performed on binary images.
"""
hull1 = convex_hull_image(horse)
plot_comparison(horse, hull1, 'convex hull')
"""
.. image:: PLOT2RST.current_figure
As the figure illustrates, ``convex_hull_image`` gives the smallest polygon
which covers the white or True completely in the image.
If we add a small grain to the image, we can see how the convex hull adapts to
enclose that grain:
"""
import numpy as np
horse2 = np.copy(horse)
horse2[45:50, 75:80] = 1
hull2 = convex_hull_image(horse2)
plot_comparison(horse2, hull2, 'convex hull')
"""
.. image:: PLOT2RST.current_figure
Additional Resources
====================
1. `MathWorks tutorial on morphological processing
<http://www.mathworks.com/help/images/morphology-fundamentals-dilation-and-erosion.html>`_
2. `Auckland university's tutorial on Morphological Image Processing
<http://www.cs.auckland.ac.nz/courses/compsci773s1c/lectures/ImageProcessing-html/topic4.htm>`_
3. http://en.wikipedia.org/wiki/Mathematical_morphology
"""
plt.show()
|
bsd-3-clause
|
rahul-c1/scikit-learn
|
sklearn/feature_selection/rfe.py
|
10
|
14074
|
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Vincent Michel <vincent.michel@inria.fr>
# Gilles Louppe <g.louppe@gmail.com>
#
# License: BSD 3 clause
"""Recursive feature elimination for feature ranking"""
import numpy as np
from ..utils import check_X_y, safe_sqr
from ..base import BaseEstimator
from ..base import MetaEstimatorMixin
from ..base import clone
from ..base import is_classifier
from ..cross_validation import _check_cv as check_cv
from ..cross_validation import _safe_split, _score
from .base import SelectorMixin
from ..metrics.scorer import check_scoring
class RFE(BaseEstimator, MetaEstimatorMixin, SelectorMixin):
"""Feature ranking with recursive feature elimination.
Given an external estimator that assigns weights to features (e.g., the
coefficients of a linear model), the goal of recursive feature elimination
(RFE) is to select features by recursively considering smaller and smaller
sets of features. First, the estimator is trained on the initial set of
features and weights are assigned to each one of them. Then, features whose
absolute weights are the smallest are pruned from the current set features.
That procedure is recursively repeated on the pruned set until the desired
number of features to select is eventually reached.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
n_features_to_select : int or None (default=None)
The number of features to select. If `None`, half of the features
are selected.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
estimator_params : dict
Parameters for the external estimator.
Useful for doing grid searches when an `RFE` object is passed as an
argument to, e.g., a `sklearn.grid_search.GridSearchCV` object.
Attributes
----------
n_features_ : int
The number of selected features.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that `ranking_[i]` corresponds to the \
ranking position of the i-th feature. Selected (i.e., estimated \
best) features are assigned rank 1.
estimator_ : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the 5 right informative
features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFE
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFE(estimator, 5, step=1)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, n_features_to_select=None, step=1,
estimator_params={}, verbose=0):
self.estimator = estimator
self.n_features_to_select = n_features_to_select
self.step = step
self.estimator_params = estimator_params
self.verbose = verbose
def fit(self, X, y):
"""Fit the RFE model and then the underlying estimator on the selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values.
"""
X, y = check_X_y(X, y, "csc")
# Initialization
n_features = X.shape[1]
if self.n_features_to_select is None:
n_features_to_select = n_features / 2
else:
n_features_to_select = self.n_features_to_select
if 0.0 < self.step < 1.0:
step = int(self.step * n_features)
else:
step = int(self.step)
if step <= 0:
raise ValueError("Step must be >0")
support_ = np.ones(n_features, dtype=np.bool)
ranking_ = np.ones(n_features, dtype=np.int)
# Elimination
while np.sum(support_) > n_features_to_select:
# Remaining features
features = np.arange(n_features)[support_]
# Rank the remaining features
estimator = clone(self.estimator)
estimator.set_params(**self.estimator_params)
if self.verbose > 0:
print("Fitting estimator with %d features." % np.sum(support_))
estimator.fit(X[:, features], y)
if estimator.coef_.ndim > 1:
ranks = np.argsort(safe_sqr(estimator.coef_).sum(axis=0))
else:
ranks = np.argsort(safe_sqr(estimator.coef_))
# for sparse case ranks is matrix
ranks = np.ravel(ranks)
# Eliminate the worse features
threshold = min(step, np.sum(support_) - n_features_to_select)
support_[features[ranks][:threshold]] = False
ranking_[np.logical_not(support_)] += 1
# Set final attributes
self.estimator_ = clone(self.estimator)
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(X[:, support_], y)
self.n_features_ = support_.sum()
self.support_ = support_
self.ranking_ = ranking_
return self
def predict(self, X):
"""Reduce X to the selected features and then predict using the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
"""
return self.estimator_.predict(self.transform(X))
def score(self, X, y):
"""Reduce X to the selected features and then return the score of the
underlying estimator.
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The target values.
"""
return self.estimator_.score(self.transform(X), y)
def _get_support_mask(self):
return self.support_
def decision_function(self, X):
return self.estimator_.decision_function(self.transform(X))
def predict_proba(self, X):
return self.estimator_.predict_proba(self.transform(X))
class RFECV(RFE, MetaEstimatorMixin):
"""Feature ranking with recursive feature elimination and cross-validated
selection of the best number of features.
Parameters
----------
estimator : object
A supervised learning estimator with a `fit` method that updates a
`coef_` attribute that holds the fitted parameters. Important features
must correspond to high absolute values in the `coef_` array.
For instance, this is the case for most supervised learning
algorithms such as Support Vector Classifiers and Generalized
Linear Models from the `svm` and `linear_model` modules.
step : int or float, optional (default=1)
If greater than or equal to 1, then `step` corresponds to the (integer)
number of features to remove at each iteration.
If within (0.0, 1.0), then `step` corresponds to the percentage
(rounded down) of features to remove at each iteration.
cv : int or cross-validation generator, optional (default=None)
If int, it is the number of folds.
If None, 3-fold cross-validation is performed by default.
Specific cross-validation objects can also be passed, see
`sklearn.cross_validation module` for details.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
estimator_params : dict
Parameters for the external estimator.
Useful for doing grid searches when an `RFE` object is passed as an
argument to, e.g., a `sklearn.grid_search.GridSearchCV` object.
verbose : int, default=0
Controls verbosity of output.
Attributes
----------
n_features_ : int
The number of selected features with cross-validation.
support_ : array of shape [n_features]
The mask of selected features.
ranking_ : array of shape [n_features]
The feature ranking, such that `ranking_[i]`
corresponds to the ranking
position of the i-th feature.
Selected (i.e., estimated best)
features are assigned rank 1.
grid_scores_ : array of shape [n_subsets_of_features]
The cross-validation scores such that
`grid_scores_[i]` corresponds to
the CV score of the i-th subset of features.
estimator_ : object
The external estimator fit on the reduced dataset.
Examples
--------
The following example shows how to retrieve the a-priori not known 5
informative features in the Friedman #1 dataset.
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.feature_selection import RFECV
>>> from sklearn.svm import SVR
>>> X, y = make_friedman1(n_samples=50, n_features=10, random_state=0)
>>> estimator = SVR(kernel="linear")
>>> selector = RFECV(estimator, step=1, cv=5)
>>> selector = selector.fit(X, y)
>>> selector.support_ # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, True, True,
False, False, False, False, False], dtype=bool)
>>> selector.ranking_
array([1, 1, 1, 1, 1, 6, 4, 3, 2, 5])
References
----------
.. [1] Guyon, I., Weston, J., Barnhill, S., & Vapnik, V., "Gene selection
for cancer classification using support vector machines",
Mach. Learn., 46(1-3), 389--422, 2002.
"""
def __init__(self, estimator, step=1, cv=None, scoring=None,
estimator_params={}, verbose=0):
self.estimator = estimator
self.step = step
self.cv = cv
self.scoring = scoring
self.estimator_params = estimator_params
self.verbose = verbose
def fit(self, X, y):
"""Fit the RFE model and automatically tune the number of selected
features.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where `n_samples` is the number of samples and
`n_features` is the total number of features.
y : array-like, shape = [n_samples]
Target values (integers for classification, real numbers for
regression).
"""
X, y = check_X_y(X, y, "csr")
# Initialization
rfe = RFE(estimator=self.estimator, n_features_to_select=1,
step=self.step, estimator_params=self.estimator_params,
verbose=self.verbose - 1)
cv = check_cv(self.cv, X, y, is_classifier(self.estimator))
scorer = check_scoring(self.estimator, scoring=self.scoring)
scores = np.zeros(X.shape[1])
# Cross-validation
for n, (train, test) in enumerate(cv):
X_train, y_train = _safe_split(self.estimator, X, y, train)
X_test, y_test = _safe_split(self.estimator, X, y, test, train)
# Compute a full ranking of the features
ranking_ = rfe.fit(X_train, y_train).ranking_
# Score each subset of features
for k in range(0, max(ranking_)):
mask = np.where(ranking_ <= k + 1)[0]
estimator = clone(self.estimator)
estimator.fit(X_train[:, mask], y_train)
score = _score(estimator, X_test[:, mask], y_test, scorer)
if self.verbose > 0:
print("Finished fold with %d / %d feature ranks, score=%f"
% (k + 1, max(ranking_), score))
scores[k] += score
# Pick the best number of features on average
k = np.argmax(scores)
best_score = scores[k]
# Re-execute an elimination with best_k over the whole set
rfe = RFE(estimator=self.estimator,
n_features_to_select=k+1,
step=self.step, estimator_params=self.estimator_params)
rfe.fit(X, y)
# Set final attributes
self.support_ = rfe.support_
self.n_features_ = rfe.n_features_
self.ranking_ = rfe.ranking_
self.estimator_ = clone(self.estimator)
self.estimator_.set_params(**self.estimator_params)
self.estimator_.fit(self.transform(X), y)
# Fixing a normalization error, n is equal to len(cv) - 1
# here, the scores are normalized by len(cv)
self.grid_scores_ = scores / len(cv)
return self
|
bsd-3-clause
|
huzq/scikit-learn
|
sklearn/model_selection/_search.py
|
1
|
63016
|
"""
The :mod:`sklearn.model_selection._search` includes utilities to fine-tune the
parameters of an estimator.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Olivier Grisel <olivier.grisel@ensta.org>
# Raghav RV <rvraghav93@gmail.com>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from collections.abc import Mapping, Sequence, Iterable
from functools import partial, reduce
from itertools import product
import numbers
import operator
import time
import warnings
import numpy as np
from numpy.ma import MaskedArray
from scipy.stats import rankdata
from ..base import BaseEstimator, is_classifier, clone
from ..base import MetaEstimatorMixin
from ._split import check_cv
from ._validation import _fit_and_score
from ._validation import _aggregate_score_dicts
from ..exceptions import NotFittedError
from joblib import Parallel, delayed
from ..utils import check_random_state
from ..utils.random import sample_without_replacement
from ..utils.validation import indexable, check_is_fitted, _check_fit_params
from ..utils.validation import _deprecate_positional_args
from ..utils.metaestimators import if_delegate_has_method
from ..metrics._scorer import _check_multimetric_scoring
from ..metrics import check_scoring
from ..utils import deprecated
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
class ParameterGrid:
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
The order of the generated parameter combinations is deterministic.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_grid : dict of str to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.model_selection import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
Uses :class:`ParameterGrid` to perform a full parallelized parameter
search.
"""
def __init__(self, param_grid):
if not isinstance(param_grid, (Mapping, Iterable)):
raise TypeError('Parameter grid is not a dict or '
'a list ({!r})'.format(param_grid))
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
# check if all entries are dictionaries of lists
for grid in param_grid:
if not isinstance(grid, dict):
raise TypeError('Parameter grid is not a '
'dict ({!r})'.format(grid))
for key in grid:
if not isinstance(grid[key], Iterable):
raise TypeError('Parameter grid value is not iterable '
'(key={!r}, value={!r})'
.format(key, grid[key]))
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of str to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of str to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler:
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_distributions : dict
Dictionary with parameters names (`str`) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
If a list of dicts is given, first a dict is sampled uniformly, and
then a parameter is sampled using that dict as above.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState instance, default=None
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Pass an int for reproducible output across multiple
function calls.
See :term:`Glossary <random_state>`.
Returns
-------
params : dict of str to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.model_selection import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> rng = np.random.RandomState(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4,
... random_state=rng))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
@_deprecate_positional_args
def __init__(self, param_distributions, n_iter, *, random_state=None):
if not isinstance(param_distributions, (Mapping, Iterable)):
raise TypeError('Parameter distribution is not a dict or '
'a list ({!r})'.format(param_distributions))
if isinstance(param_distributions, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_distributions = [param_distributions]
for dist in param_distributions:
if not isinstance(dist, dict):
raise TypeError('Parameter distribution is not a '
'dict ({!r})'.format(dist))
for key in dist:
if (not isinstance(dist[key], Iterable)
and not hasattr(dist[key], 'rvs')):
raise TypeError('Parameter value is not iterable '
'or distribution (key={!r}, value={!r})'
.format(key, dist[key]))
self.n_iter = n_iter
self.random_state = random_state
self.param_distributions = param_distributions
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = all(
all(not hasattr(v, "rvs") for v in dist.values())
for dist in self.param_distributions)
rng = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
n_iter = self.n_iter
if grid_size < n_iter:
warnings.warn(
'The total space of parameters %d is smaller '
'than n_iter=%d. Running %d iterations. For exhaustive '
'searches, use GridSearchCV.'
% (grid_size, self.n_iter, grid_size), UserWarning)
n_iter = grid_size
for i in sample_without_replacement(grid_size, n_iter,
random_state=rng):
yield param_grid[i]
else:
for _ in range(self.n_iter):
dist = rng.choice(self.param_distributions)
# Always sort the keys of a dictionary, for reproducibility
items = sorted(dist.items())
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs(random_state=rng)
else:
params[k] = v[rng.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
# FIXME Remove fit_grid_point in 0.25
@deprecated(
"fit_grid_point is deprecated in version 0.23 "
"and will be removed in version 0.25"
)
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score=np.nan, **fit_params):
"""Run fit on one set of parameters.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None
The scorer callable object / function must have its signature as
``scorer(estimator, X, y)``.
If ``None`` the estimator's score method is used.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
# NOTE we are not using the return value as the scorer by itself should be
# validated before. We use check_scoring only to reject multimetric scorer
check_scoring(estimator, scorer)
results = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params=fit_params,
return_n_test_samples=True,
error_score=error_score)
return results["test_scores"], parameters, results["n_test_samples"]
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for name, v in p.items():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
if (isinstance(v, str) or
not isinstance(v, (np.ndarray, Sequence))):
raise ValueError("Parameter grid for parameter ({0}) needs to"
" be a list or numpy array, but got ({1})."
" Single values need to be wrapped in a list"
" with one element.".format(name, type(v)))
if len(v) == 0:
raise ValueError("Parameter values for parameter ({0}) need "
"to be a non-empty sequence.".format(name))
class BaseSearchCV(MetaEstimatorMixin, BaseEstimator, metaclass=ABCMeta):
"""Abstract base class for hyper parameter search with cross-validation.
"""
@abstractmethod
@_deprecate_positional_args
def __init__(self, estimator, *, scoring=None, n_jobs=None,
refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score=np.nan,
return_train_score=True):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
self.return_train_score = return_train_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
@property
def _pairwise(self):
# allows cross-validation to see 'precomputed' metrics
return getattr(self.estimator, '_pairwise', False)
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit.
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples, n_output) \
or (n_samples,), default=None
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
"""
self._check_is_fitted('score')
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
score = self.scorer_[self.refit] if self.multimetric_ else self.scorer_
return score(self.best_estimator_, X, y)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def score_samples(self, X):
"""Call score_samples on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``score_samples``.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements
of the underlying estimator.
Returns
-------
y_score : ndarray, shape (n_samples,)
"""
self._check_is_fitted('score_samples')
return self.best_estimator_.score_samples(X)
def _check_is_fitted(self, method_name):
if not self.refit:
raise NotFittedError('This %s instance was initialized '
'with refit=False. %s is '
'available only after refitting on the best '
'parameters. You can refit an estimator '
'manually using the ``best_params_`` '
'attribute'
% (type(self).__name__, method_name))
else:
check_is_fitted(self)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('predict')
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('predict_proba')
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('predict_log_proba')
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('decision_function')
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('transform')
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found params.
Only available if the underlying estimator implements
``inverse_transform`` and ``refit=True``.
Parameters
----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('inverse_transform')
return self.best_estimator_.inverse_transform(Xt)
@property
def n_features_in_(self):
# For consistency with other estimators we raise a AttributeError so
# that hasattr() fails if the search estimator isn't fitted.
try:
check_is_fitted(self)
except NotFittedError as nfe:
raise AttributeError(
"{} object has no n_features_in_ attribute."
.format(self.__class__.__name__)
) from nfe
return self.best_estimator_.n_features_in_
@property
def classes_(self):
self._check_is_fitted("classes_")
return self.best_estimator_.classes_
def _run_search(self, evaluate_candidates):
"""Repeatedly calls `evaluate_candidates` to conduct a search.
This method, implemented in sub-classes, makes it possible to
customize the the scheduling of evaluations: GridSearchCV and
RandomizedSearchCV schedule evaluations for their whole parameter
search space at once but other more sequential approaches are also
possible: for instance is possible to iteratively schedule evaluations
for new regions of the parameter search space based on previously
collected evaluation results. This makes it possible to implement
Bayesian optimization or more generally sequential model-based
optimization by deriving from the BaseSearchCV abstract base class.
Parameters
----------
evaluate_candidates : callable
This callback accepts a list of candidates, where each candidate is
a dict of parameter settings. It returns a dict of all results so
far, formatted like ``cv_results_``.
Examples
--------
::
def _run_search(self, evaluate_candidates):
'Try C=0.1 only if C=1 is better than C=10'
all_results = evaluate_candidates([{'C': 1}, {'C': 10}])
score = all_results['mean_test_score']
if score[0] < score[1]:
evaluate_candidates([{'C': 0.1}])
"""
raise NotImplementedError("_run_search not implemented.")
@_deprecate_positional_args
def fit(self, X, y=None, *, groups=None, **fit_params):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples, n_output) \
or (n_samples,), default=None
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like of shape (n_samples,), default=None
Group labels for the samples used while splitting the dataset into
train/test set. Only used in conjunction with a "Group" :term:`cv`
instance (e.g., :class:`~sklearn.model_selection.GroupKFold`).
**fit_params : dict of str -> object
Parameters passed to the ``fit`` method of the estimator
"""
estimator = self.estimator
cv = check_cv(self.cv, y, classifier=is_classifier(estimator))
scorers, self.multimetric_ = _check_multimetric_scoring(
self.estimator, scoring=self.scoring)
if self.multimetric_:
if self.refit is not False and (
not isinstance(self.refit, str) or
# This will work for both dict / list (tuple)
self.refit not in scorers) and not callable(self.refit):
raise ValueError("For multi-metric scoring, the parameter "
"refit must be set to a scorer key or a "
"callable to refit an estimator with the "
"best parameter setting on the whole "
"data and make the best_* attributes "
"available for that metric. If this is "
"not needed, refit should be set to "
"False explicitly. %r was passed."
% self.refit)
else:
refit_metric = self.refit
else:
refit_metric = 'score'
X, y, groups = indexable(X, y, groups)
fit_params = _check_fit_params(X, fit_params)
n_splits = cv.get_n_splits(X, y, groups)
base_estimator = clone(self.estimator)
parallel = Parallel(n_jobs=self.n_jobs,
pre_dispatch=self.pre_dispatch)
fit_and_score_kwargs = dict(scorer=scorers,
fit_params=fit_params,
return_train_score=self.return_train_score,
return_n_test_samples=True,
return_times=True,
return_parameters=False,
error_score=self.error_score,
verbose=self.verbose)
results = {}
with parallel:
all_candidate_params = []
all_out = []
def evaluate_candidates(candidate_params):
candidate_params = list(candidate_params)
n_candidates = len(candidate_params)
if self.verbose > 0:
print("Fitting {0} folds for each of {1} candidates,"
" totalling {2} fits".format(
n_splits, n_candidates, n_candidates * n_splits))
out = parallel(delayed(_fit_and_score)(clone(base_estimator),
X, y,
train=train, test=test,
parameters=parameters,
split_progress=(
split_idx,
n_splits),
candidate_progress=(
cand_idx,
n_candidates),
**fit_and_score_kwargs)
for (cand_idx, parameters),
(split_idx, (train, test)) in product(
enumerate(candidate_params),
enumerate(cv.split(X, y, groups))))
if len(out) < 1:
raise ValueError('No fits were performed. '
'Was the CV iterator empty? '
'Were there no candidates?')
elif len(out) != n_candidates * n_splits:
raise ValueError('cv.split and cv.get_n_splits returned '
'inconsistent results. Expected {} '
'splits, got {}'
.format(n_splits,
len(out) // n_candidates))
all_candidate_params.extend(candidate_params)
all_out.extend(out)
nonlocal results
results = self._format_results(
all_candidate_params, scorers, n_splits, all_out)
return results
self._run_search(evaluate_candidates)
# For multi-metric evaluation, store the best_index_, best_params_ and
# best_score_ iff refit is one of the scorer names
# In single metric evaluation, refit_metric is "score"
if self.refit or not self.multimetric_:
# If callable, refit is expected to return the index of the best
# parameter set.
if callable(self.refit):
self.best_index_ = self.refit(results)
if not isinstance(self.best_index_, numbers.Integral):
raise TypeError('best_index_ returned is not an integer')
if (self.best_index_ < 0 or
self.best_index_ >= len(results["params"])):
raise IndexError('best_index_ index out of range')
else:
self.best_index_ = results["rank_test_%s"
% refit_metric].argmin()
self.best_score_ = results["mean_test_%s" % refit_metric][
self.best_index_]
self.best_params_ = results["params"][self.best_index_]
if self.refit:
# we clone again after setting params in case some
# of the params are estimators as well.
self.best_estimator_ = clone(clone(base_estimator).set_params(
**self.best_params_))
refit_start_time = time.time()
if y is not None:
self.best_estimator_.fit(X, y, **fit_params)
else:
self.best_estimator_.fit(X, **fit_params)
refit_end_time = time.time()
self.refit_time_ = refit_end_time - refit_start_time
# Store the only scorer not as a dict for single metric evaluation
self.scorer_ = scorers if self.multimetric_ else scorers['score']
self.cv_results_ = results
self.n_splits_ = n_splits
return self
def _format_results(self, candidate_params, scorers, n_splits, out):
n_candidates = len(candidate_params)
out = _aggregate_score_dicts(out)
results = {}
def _store(key_name, array, weights=None, splits=False, rank=False):
"""A small helper to store the scores/times to the cv_results_"""
# When iterated first by splits, then by parameters
# We want `array` to have `n_candidates` rows and `n_splits` cols.
array = np.array(array, dtype=np.float64).reshape(n_candidates,
n_splits)
if splits:
for split_idx in range(n_splits):
# Uses closure to alter the results
results["split%d_%s"
% (split_idx, key_name)] = array[:, split_idx]
array_means = np.average(array, axis=1, weights=weights)
results['mean_%s' % key_name] = array_means
# Weighted std is not directly available in numpy
array_stds = np.sqrt(np.average((array -
array_means[:, np.newaxis]) ** 2,
axis=1, weights=weights))
results['std_%s' % key_name] = array_stds
if rank:
results["rank_%s" % key_name] = np.asarray(
rankdata(-array_means, method='min'), dtype=np.int32)
_store('fit_time', out["fit_time"])
_store('score_time', out["score_time"])
# Use one MaskedArray and mask all the places where the param is not
# applicable for that candidate. Use defaultdict as each candidate may
# not contain all the params
param_results = defaultdict(partial(MaskedArray,
np.empty(n_candidates,),
mask=True,
dtype=object))
for cand_idx, params in enumerate(candidate_params):
for name, value in params.items():
# An all masked empty array gets created for the key
# `"param_%s" % name` at the first occurrence of `name`.
# Setting the value at an index also unmasks that index
param_results["param_%s" % name][cand_idx] = value
results.update(param_results)
# Store a list of param dicts at the key 'params'
results['params'] = candidate_params
test_scores = _aggregate_score_dicts(out["test_scores"])
if self.return_train_score:
train_scores = _aggregate_score_dicts(out["train_scores"])
for scorer_name in test_scores:
# Computed the (weighted) mean and std for test scores alone
_store('test_%s' % scorer_name, test_scores[scorer_name],
splits=True, rank=True,
weights=None)
if self.return_train_score:
_store('train_%s' % scorer_name, train_scores[scorer_name],
splits=True)
return results
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" and a "score" method.
It also implements "score_samples", "predict", "predict_proba",
"decision_function", "transform" and "inverse_transform" if they are
implemented in the estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated grid-search over a parameter grid.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : estimator object.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_grid : dict or list of dictionaries
Dictionary with parameters names (`str`) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : str, callable, list/tuple or dict, default=None
A single str (see :ref:`scoring_parameter`) or a callable
(see :ref:`scoring`) to evaluate the predictions on the test set.
For evaluating multiple metrics, either give a list of (unique) strings
or a dict with names as keys and callables as values.
NOTE that when using custom scorers, each scorer should return a single
value. Metric functions returning a list/array of values can be wrapped
into multiple scorers that return one value each.
See :ref:`multimetric_grid_search` for an example.
If None, the estimator's score method is used.
n_jobs : int, default=None
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionchanged:: v0.20
`n_jobs` default changed from 1 to None
pre_dispatch : int, or str, default=n_jobs
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A str, giving an expression as a function of n_jobs,
as in '2*n_jobs'
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
refit : bool, str, or callable, default=True
Refit an estimator using the best found parameters on the whole
dataset.
For multiple metric evaluation, this needs to be a `str` denoting the
scorer that would be used to find the best parameters for refitting
the estimator at the end.
Where there are considerations other than maximum score in
choosing a best estimator, ``refit`` can be set to a function which
returns the selected ``best_index_`` given ``cv_results_``. In that
case, the ``best_estimator_`` and ``best_params_`` will be set
according to the returned ``best_index_`` while the ``best_score_``
attribute will not be available.
The refitted estimator is made available at the ``best_estimator_``
attribute and permits using ``predict`` directly on this
``GridSearchCV`` instance.
Also for multiple metric evaluation, the attributes ``best_index_``,
``best_score_`` and ``best_params_`` will only be available if
``refit`` is set and all of them will be determined w.r.t this specific
scorer.
See ``scoring`` parameter to know more about multiple metric
evaluation.
.. versionchanged:: 0.20
Support for callable added.
verbose : integer
Controls the verbosity: the higher, the more messages.
- >1 : the computation time for each fold and parameter candidate is
displayed;
- >2 : the score is also displayed;
- >3 : the fold and candidate parameter indexes are also displayed
together with the starting time of the computation.
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
return_train_score : bool, default=False
If ``False``, the ``cv_results_`` attribute will not include training
scores.
Computing training scores is used to get insights on how different
parameter settings impact the overfitting/underfitting trade-off.
However computing the scores on the training set can be computationally
expensive and is not strictly required to select the parameters that
yield the best generalization performance.
.. versionadded:: 0.19
.. versionchanged:: 0.21
Default value was changed from ``True`` to ``False``
Examples
--------
>>> from sklearn import svm, datasets
>>> from sklearn.model_selection import GridSearchCV
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svc = svm.SVC()
>>> clf = GridSearchCV(svc, parameters)
>>> clf.fit(iris.data, iris.target)
GridSearchCV(estimator=SVC(),
param_grid={'C': [1, 10], 'kernel': ('linear', 'rbf')})
>>> sorted(clf.cv_results_.keys())
['mean_fit_time', 'mean_score_time', 'mean_test_score',...
'param_C', 'param_kernel', 'params',...
'rank_test_score', 'split0_test_score',...
'split2_test_score', ...
'std_fit_time', 'std_score_time', 'std_test_score']
Attributes
----------
cv_results_ : dict of numpy (masked) ndarrays
A dict with keys as column headers and values as columns, that can be
imported into a pandas ``DataFrame``.
For instance the below given table
+------------+-----------+------------+-----------------+---+---------+
|param_kernel|param_gamma|param_degree|split0_test_score|...|rank_t...|
+============+===========+============+=================+===+=========+
| 'poly' | -- | 2 | 0.80 |...| 2 |
+------------+-----------+------------+-----------------+---+---------+
| 'poly' | -- | 3 | 0.70 |...| 4 |
+------------+-----------+------------+-----------------+---+---------+
| 'rbf' | 0.1 | -- | 0.80 |...| 3 |
+------------+-----------+------------+-----------------+---+---------+
| 'rbf' | 0.2 | -- | 0.93 |...| 1 |
+------------+-----------+------------+-----------------+---+---------+
will be represented by a ``cv_results_`` dict of::
{
'param_kernel': masked_array(data = ['poly', 'poly', 'rbf', 'rbf'],
mask = [False False False False]...)
'param_gamma': masked_array(data = [-- -- 0.1 0.2],
mask = [ True True False False]...),
'param_degree': masked_array(data = [2.0 3.0 -- --],
mask = [False False True True]...),
'split0_test_score' : [0.80, 0.70, 0.80, 0.93],
'split1_test_score' : [0.82, 0.50, 0.70, 0.78],
'mean_test_score' : [0.81, 0.60, 0.75, 0.85],
'std_test_score' : [0.01, 0.10, 0.05, 0.08],
'rank_test_score' : [2, 4, 3, 1],
'split0_train_score' : [0.80, 0.92, 0.70, 0.93],
'split1_train_score' : [0.82, 0.55, 0.70, 0.87],
'mean_train_score' : [0.81, 0.74, 0.70, 0.90],
'std_train_score' : [0.01, 0.19, 0.00, 0.03],
'mean_fit_time' : [0.73, 0.63, 0.43, 0.49],
'std_fit_time' : [0.01, 0.02, 0.01, 0.01],
'mean_score_time' : [0.01, 0.06, 0.04, 0.04],
'std_score_time' : [0.00, 0.00, 0.00, 0.01],
'params' : [{'kernel': 'poly', 'degree': 2}, ...],
}
NOTE
The key ``'params'`` is used to store a list of parameter
settings dicts for all the parameter candidates.
The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and
``std_score_time`` are all in seconds.
For multi-metric evaluation, the scores for all the scorers are
available in the ``cv_results_`` dict at the keys ending with that
scorer's name (``'_<scorer_name>'``) instead of ``'_score'`` shown
above. ('split0_test_precision', 'mean_train_precision' etc.)
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if ``refit=False``.
See ``refit`` parameter for more information on allowed values.
best_score_ : float
Mean cross-validated score of the best_estimator
For multi-metric evaluation, this is present only if ``refit`` is
specified.
This attribute is not available if ``refit`` is a function.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
For multi-metric evaluation, this is present only if ``refit`` is
specified.
best_index_ : int
The index (of the ``cv_results_`` arrays) which corresponds to the best
candidate parameter setting.
The dict at ``search.cv_results_['params'][search.best_index_]`` gives
the parameter setting for the best model, that gives the highest
mean score (``search.best_score_``).
For multi-metric evaluation, this is present only if ``refit`` is
specified.
scorer_ : function or a dict
Scorer function used on the held out data to choose the best
parameters for the model.
For multi-metric evaluation, this attribute holds the validated
``scoring`` dict which maps the scorer key to the scorer callable.
n_splits_ : int
The number of cross-validation splits (folds/iterations).
refit_time_ : float
Seconds used for refitting the best model on the whole dataset.
This is present only if ``refit`` is not False.
.. versionadded:: 0.20
Notes
-----
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a hyperparameter grid.
:func:`sklearn.model_selection.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
_required_parameters = ["estimator", "param_grid"]
@_deprecate_positional_args
def __init__(self, estimator, param_grid, *, scoring=None,
n_jobs=None, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs',
error_score=np.nan, return_train_score=False):
super().__init__(
estimator=estimator, scoring=scoring,
n_jobs=n_jobs, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score,
return_train_score=return_train_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def _run_search(self, evaluate_candidates):
"""Search all candidates in param_grid"""
evaluate_candidates(ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" and a "score" method.
It also implements "score_samples", "predict", "predict_proba",
"decision_function", "transform" and "inverse_transform" if they are
implemented in the estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated search over parameter settings.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
.. versionadded:: 0.14
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_distributions : dict or list of dicts
Dictionary with parameters names (`str`) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
If a list of dicts is given, first a dict is sampled uniformly, and
then a parameter is sampled using that dict as above.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : str, callable, list/tuple or dict, default=None
A single str (see :ref:`scoring_parameter`) or a callable
(see :ref:`scoring`) to evaluate the predictions on the test set.
For evaluating multiple metrics, either give a list of (unique) strings
or a dict with names as keys and callables as values.
NOTE that when using custom scorers, each scorer should return a single
value. Metric functions returning a list/array of values can be wrapped
into multiple scorers that return one value each.
See :ref:`multimetric_grid_search` for an example.
If None, the estimator's score method is used.
n_jobs : int, default=None
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionchanged:: v0.20
`n_jobs` default changed from 1 to None
pre_dispatch : int, or str, default=None
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A str, giving an expression as a function of n_jobs,
as in '2*n_jobs'
cv : int, cross-validation generator or an iterable, default=None
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
.. versionchanged:: 0.22
``cv`` default value if None changed from 3-fold to 5-fold.
refit : bool, str, or callable, default=True
Refit an estimator using the best found parameters on the whole
dataset.
For multiple metric evaluation, this needs to be a `str` denoting the
scorer that would be used to find the best parameters for refitting
the estimator at the end.
Where there are considerations other than maximum score in
choosing a best estimator, ``refit`` can be set to a function which
returns the selected ``best_index_`` given the ``cv_results``. In that
case, the ``best_estimator_`` and ``best_params_`` will be set
according to the returned ``best_index_`` while the ``best_score_``
attribute will not be available.
The refitted estimator is made available at the ``best_estimator_``
attribute and permits using ``predict`` directly on this
``RandomizedSearchCV`` instance.
Also for multiple metric evaluation, the attributes ``best_index_``,
``best_score_`` and ``best_params_`` will only be available if
``refit`` is set and all of them will be determined w.r.t this specific
scorer.
See ``scoring`` parameter to know more about multiple metric
evaluation.
.. versionchanged:: 0.20
Support for callable added.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int or RandomState instance, default=None
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Pass an int for reproducible output across multiple
function calls.
See :term:`Glossary <random_state>`.
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
return_train_score : bool, default=False
If ``False``, the ``cv_results_`` attribute will not include training
scores.
Computing training scores is used to get insights on how different
parameter settings impact the overfitting/underfitting trade-off.
However computing the scores on the training set can be computationally
expensive and is not strictly required to select the parameters that
yield the best generalization performance.
.. versionadded:: 0.19
.. versionchanged:: 0.21
Default value was changed from ``True`` to ``False``
Attributes
----------
cv_results_ : dict of numpy (masked) ndarrays
A dict with keys as column headers and values as columns, that can be
imported into a pandas ``DataFrame``.
For instance the below given table
+--------------+-------------+-------------------+---+---------------+
| param_kernel | param_gamma | split0_test_score |...|rank_test_score|
+==============+=============+===================+===+===============+
| 'rbf' | 0.1 | 0.80 |...| 1 |
+--------------+-------------+-------------------+---+---------------+
| 'rbf' | 0.2 | 0.84 |...| 3 |
+--------------+-------------+-------------------+---+---------------+
| 'rbf' | 0.3 | 0.70 |...| 2 |
+--------------+-------------+-------------------+---+---------------+
will be represented by a ``cv_results_`` dict of::
{
'param_kernel' : masked_array(data = ['rbf', 'rbf', 'rbf'],
mask = False),
'param_gamma' : masked_array(data = [0.1 0.2 0.3], mask = False),
'split0_test_score' : [0.80, 0.84, 0.70],
'split1_test_score' : [0.82, 0.50, 0.70],
'mean_test_score' : [0.81, 0.67, 0.70],
'std_test_score' : [0.01, 0.24, 0.00],
'rank_test_score' : [1, 3, 2],
'split0_train_score' : [0.80, 0.92, 0.70],
'split1_train_score' : [0.82, 0.55, 0.70],
'mean_train_score' : [0.81, 0.74, 0.70],
'std_train_score' : [0.01, 0.19, 0.00],
'mean_fit_time' : [0.73, 0.63, 0.43],
'std_fit_time' : [0.01, 0.02, 0.01],
'mean_score_time' : [0.01, 0.06, 0.04],
'std_score_time' : [0.00, 0.00, 0.00],
'params' : [{'kernel' : 'rbf', 'gamma' : 0.1}, ...],
}
NOTE
The key ``'params'`` is used to store a list of parameter
settings dicts for all the parameter candidates.
The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and
``std_score_time`` are all in seconds.
For multi-metric evaluation, the scores for all the scorers are
available in the ``cv_results_`` dict at the keys ending with that
scorer's name (``'_<scorer_name>'``) instead of ``'_score'`` shown
above. ('split0_test_precision', 'mean_train_precision' etc.)
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if ``refit=False``.
For multi-metric evaluation, this attribute is present only if
``refit`` is specified.
See ``refit`` parameter for more information on allowed values.
best_score_ : float
Mean cross-validated score of the best_estimator.
For multi-metric evaluation, this is not available if ``refit`` is
``False``. See ``refit`` parameter for more information.
This attribute is not available if ``refit`` is a function.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
For multi-metric evaluation, this is not available if ``refit`` is
``False``. See ``refit`` parameter for more information.
best_index_ : int
The index (of the ``cv_results_`` arrays) which corresponds to the best
candidate parameter setting.
The dict at ``search.cv_results_['params'][search.best_index_]`` gives
the parameter setting for the best model, that gives the highest
mean score (``search.best_score_``).
For multi-metric evaluation, this is not available if ``refit`` is
``False``. See ``refit`` parameter for more information.
scorer_ : function or a dict
Scorer function used on the held out data to choose the best
parameters for the model.
For multi-metric evaluation, this attribute holds the validated
``scoring`` dict which maps the scorer key to the scorer callable.
n_splits_ : int
The number of cross-validation splits (folds/iterations).
refit_time_ : float
Seconds used for refitting the best model on the whole dataset.
This is present only if ``refit`` is not False.
.. versionadded:: 0.20
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settings, constructed from
param_distributions.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.model_selection import RandomizedSearchCV
>>> from scipy.stats import uniform
>>> iris = load_iris()
>>> logistic = LogisticRegression(solver='saga', tol=1e-2, max_iter=200,
... random_state=0)
>>> distributions = dict(C=uniform(loc=0, scale=4),
... penalty=['l2', 'l1'])
>>> clf = RandomizedSearchCV(logistic, distributions, random_state=0)
>>> search = clf.fit(iris.data, iris.target)
>>> search.best_params_
{'C': 2..., 'penalty': 'l1'}
"""
_required_parameters = ["estimator", "param_distributions"]
@_deprecate_positional_args
def __init__(self, estimator, param_distributions, *, n_iter=10,
scoring=None, n_jobs=None, refit=True,
cv=None, verbose=0, pre_dispatch='2*n_jobs',
random_state=None, error_score=np.nan,
return_train_score=False):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super().__init__(
estimator=estimator, scoring=scoring,
n_jobs=n_jobs, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score,
return_train_score=return_train_score)
def _run_search(self, evaluate_candidates):
"""Search n_iter candidates from param_distributions"""
evaluate_candidates(ParameterSampler(
self.param_distributions, self.n_iter,
random_state=self.random_state))
|
bsd-3-clause
|
glennq/scikit-learn
|
sklearn/covariance/__init__.py
|
389
|
1157
|
"""
The :mod:`sklearn.covariance` module includes methods and algorithms to
robustly estimate the covariance of features given a set of points. The
precision matrix defined as the inverse of the covariance is also estimated.
Covariance estimation is closely related to the theory of Gaussian Graphical
Models.
"""
from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance, \
log_likelihood
from .shrunk_covariance_ import shrunk_covariance, ShrunkCovariance, \
ledoit_wolf, ledoit_wolf_shrinkage, \
LedoitWolf, oas, OAS
from .robust_covariance import fast_mcd, MinCovDet
from .graph_lasso_ import graph_lasso, GraphLasso, GraphLassoCV
from .outlier_detection import EllipticEnvelope
__all__ = ['EllipticEnvelope',
'EmpiricalCovariance',
'GraphLasso',
'GraphLassoCV',
'LedoitWolf',
'MinCovDet',
'OAS',
'ShrunkCovariance',
'empirical_covariance',
'fast_mcd',
'graph_lasso',
'ledoit_wolf',
'ledoit_wolf_shrinkage',
'log_likelihood',
'oas',
'shrunk_covariance']
|
bsd-3-clause
|
arahuja/scikit-learn
|
benchmarks/bench_covertype.py
|
154
|
7296
|
"""
===========================
Covertype dataset benchmark
===========================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART
(decision tree), RandomForest and Extra-Trees on the forest covertype dataset
of Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is
low dimensional with 54 features and a sparsity of approx. 23%. Here, we
consider the task of predicting class 1 (spruce/fir). The classification
performance of SGD is competitive with Liblinear while being two orders of
magnitude faster to train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
liblinear 15.9744s 0.0705s 0.2305
GaussianNB 3.0666s 0.3884s 0.4841
SGD 1.0558s 0.1152s 0.2300
CART 79.4296s 0.0523s 0.0469
RandomForest 1190.1620s 0.5881s 0.0243
ExtraTrees 640.3194s 0.6495s 0.0198
The same task has been used in a number of papers including:
* `"SVM Optimization: Inverse Dependence on Training Set Size"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
* `"Pegasos: Primal estimated sub-gradient solver for svm"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
* `"Training Linear SVMs in Linear Time"
<www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] http://archive.ics.uci.edu/ml/datasets/Covertype
"""
from __future__ import division, print_function
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Arnaud Joly <arnaud.v.joly@gmail.com>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_covtype, get_data_home
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import zero_one_loss
from sklearn.externals.joblib import Memory
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'covertype_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='C', random_state=13):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_covtype(download_if_missing=True, shuffle=True,
random_state=random_state)
X = check_array(data['data'], dtype=dtype, order=order)
y = (data['target'] != 1).astype(np.int)
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 522911
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
## Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
return X_train, X_test, y_train, y_test
ESTIMATORS = {
'GBRT': GradientBoostingClassifier(n_estimators=250),
'ExtraTrees': ExtraTreesClassifier(n_estimators=20),
'RandomForest': RandomForestClassifier(n_estimators=20),
'CART': DecisionTreeClassifier(min_samples_split=5),
'SGD': SGDClassifier(alpha=0.001, n_iter=2),
'GaussianNB': GaussianNB(),
'liblinear': LinearSVC(loss="l2", penalty="l2", C=1000, dual=False,
tol=1e-3)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['liblinear', 'GaussianNB', 'SGD', 'CART'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=13, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(
order=args["order"], random_state=args["random_seed"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of train samples:".ljust(25),
X_train.shape[0], np.sum(y_train == 1),
np.sum(y_train == 0), int(X_train.nbytes / 1e6)))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of test samples:".ljust(25),
X_test.shape[0], np.sum(y_test == 1),
np.sum(y_test == 0), int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("%s %s %s %s"
% ("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 44)
for name in sorted(args["classifiers"], key=error.get):
print("%s %s %s %s" % (name.ljust(12),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % error[name]).center(10)))
print()
|
bsd-3-clause
|
GuLinux/PySpectrum
|
moveable_label.py
|
1
|
2748
|
import matplotlib
from matplotlib.text import Text
# code adapted from here: http://matplotlib.org/users/event_handling.html
class MoveableLabel(Text):
lock = None
def __init__(self, axes, on_dblclick, *args, **kwargs):
Text.__init__(self, *args, **kwargs)
self.axes = axes
self.axes.add_artist(self)
self.connections = [
axes.figure.canvas.mpl_connect('button_press_event', self.onclick),
axes.figure.canvas.mpl_connect('button_release_event', self.onrelease),
axes.figure.canvas.mpl_connect('motion_notify_event', self.onmove),
]
self.press = None
self.on_dblclick = on_dblclick
def position(self):
#print(self.get_position())
return self.get_position()
#return self.get_unitless_position()
def onclick(self, event):
if not self.contains(event)[0]: return
if MoveableLabel.lock is not None: return
if event.dblclick:
self.on_dblclick(self)
return
MoveableLabel.lock = self
x0, y0 = self.position()
self.press = x0, y0, event.xdata, event.ydata
canvas = self.axes.figure.canvas
axes = self.axes
self.set_animated(True)
canvas.draw()
self.background = canvas.copy_from_bbox(axes.bbox)
# now redraw just the rectangle
axes.draw_artist(self)
# and blit just the redrawn area
canvas.blit(axes.bbox)
def onmove(self, event):
if event.inaxes != self.axes: return
if MoveableLabel.lock is not self: return
if not self.press: return
x0, y0, xpress, ypress = self.press
if not event.xdata or not event.ydata or not xpress or not ypress: return
dx = event.xdata - xpress
dy = event.ydata - ypress
self.set_x(x0+dx)
self.set_y(y0+dy)
canvas = self.figure.canvas
axes = self.axes
# restore the background region
canvas.restore_region(self.background)
# redraw just the current rectangle
axes.draw_artist(self)
# blit just the redrawn area
canvas.blit(axes.bbox)
def onrelease(self, event):
if MoveableLabel.lock is not self: return
self.press = None
MoveableLabel.lock = None
# turn off the rect animation property and reset the background
self.set_animated(False)
self.background = None
# redraw the full figure
self.figure.canvas.draw()
def remove(self):
for connection in self.connections:
self.axes.figure.canvas.mpl_disconnect(connection)
Text.remove(self)
|
gpl-3.0
|
alex-mitrevski/delta-execution-models
|
rule_learner/geometric_learner.py
|
1
|
7178
|
'''
Copyright 2017 by Alex Mitrevski <aleksandar.mitrevski@h-brs.de>
This file is part of delta-execution-models.
delta-execution-models is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
delta-execution-models is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with delta-execution-models. If not, see <http://www.gnu.org/licenses/>.
'''
import numpy as np
from sklearn.neighbors import KernelDensity
from sklearn.grid_search import GridSearchCV
from sklearn.externals import joblib
from os import makedirs
from os.path import isdir
class GeometricLearner(object):
'''
Author -- Alex Mitrevski
'''
def __init__(self, predicates, manipulated_obj_predicates, manipulated_predicate_idx):
self.predicates = list(predicates)
self.manipulated_obj_predicates = np.array(manipulated_obj_predicates)
self.manipulated_predicate_idx = np.array(manipulated_predicate_idx)
self.object_count = -1
self.predicate_count = len(self.predicates)
def learn_geometric_predicates(self, save_dir, object_key, instance_key, data, object_extraction_cb, predicate_calculation_cb, geometric_data_extraction_cb, entry_mapping_cb):
dir_created = self._create_directory(save_dir)
if not dir_created:
return None
mapping_file_names = list()
data_file_names = list()
all_objects = list()
predicate_vectors = list()
labels = np.zeros(data.shape[0], dtype=bool)
for i in xrange(data.shape[0]):
objects = object_extraction_cb(data[i])
predicate_vectors.append(predicate_calculation_cb(objects))
all_objects.append(objects)
labels[i] = data[i,-1] > 0.
predicate_vectors = np.array(predicate_vectors)
self.object_count = len(objects)
pos_labels_set = np.where(labels)[0]
manipulated_pos_predicate_idx = self.manipulated_predicate_idx[np.where(self.manipulated_obj_predicates==True)[0]]
object_mappings = entry_mapping_cb(self.object_count)
data_object_mappings = dict()
predicate_mapping_dict = dict()
distinct_predicates = list()
for idx in manipulated_pos_predicate_idx:
predicate_idx = idx % self.predicate_count
if predicate_idx in distinct_predicates:
predicate_mapping_dict[predicate_idx].append(idx)
else:
predicate_mapping_dict[predicate_idx] = [idx]
distinct_predicates.append(predicate_idx)
for predicate_idx in predicate_mapping_dict:
positive_data_idx = list()
negative_data_idx = list()
positive_data_object_mapping = list()
negative_data_object_mapping = list()
for idx in predicate_mapping_dict[predicate_idx]:
obj1_idx = object_mappings[idx][0]
obj2_idx = object_mappings[idx][1]
#we only take those data items where both the current predicate and the label are true
positive_data_idx.append(np.intersect1d(pos_labels_set, np.where(predicate_vectors[:,idx]==1)[0]))
positive_data_object_mapping.append((obj1_idx, obj2_idx))
for manipulated_idx in self.manipulated_predicate_idx:
pred_idx = manipulated_idx % self.predicate_count
if pred_idx == predicate_idx:
obj1_idx = object_mappings[manipulated_idx][0]
obj2_idx = object_mappings[manipulated_idx][1]
negative_data_idx.append(np.where(predicate_vectors[:,manipulated_idx]==0)[0])
negative_data_object_mapping.append((obj1_idx, obj2_idx))
positive_data = list()
for i, obj_mapping_idx in enumerate(positive_data_object_mapping):
for j in positive_data_idx[i]:
positive_data.append(geometric_data_extraction_cb(all_objects[j][obj_mapping_idx[0]], all_objects[j][obj_mapping_idx[1]]))
positive_data = np.array(positive_data)
negative_data = list()
for i, obj_mapping_idx in enumerate(negative_data_object_mapping):
for j in negative_data_idx[i]:
negative_data.append(geometric_data_extraction_cb(all_objects[j][obj_mapping_idx[0]], all_objects[j][obj_mapping_idx[1]]))
negative_data = np.array(negative_data)
grid = GridSearchCV(KernelDensity(), {'bandwidth': np.linspace(0.01, 5.0, 150)}, cv=min(10, positive_data.shape[0]))
grid.fit(positive_data)
geom_given_positive = grid.best_estimator_
geom_given_negative = None
if negative_data.shape[0] > 0:
grid = GridSearchCV(KernelDensity(), {'bandwidth': np.linspace(0.01, 5.0, 150)}, cv=min(10, negative_data.shape[0]))
grid.fit(negative_data)
geom_given_negative = grid.best_estimator_
mapping_file_name = save_dir + '/geom_given_' + self.predicates[predicate_idx] + '_' + str(idx) + '.pkl'
data_file_name = save_dir + '/moving_' + self.predicates[predicate_idx] + '_' + str(idx) + '.npy'
mapping_file_names.append(mapping_file_name)
data_file_names.append(data_file_name)
joblib.dump(geom_given_positive, mapping_file_name)
np.save(data_file_name, positive_data)
mapping_file_name = save_dir + '/geom_given_not_' + self.predicates[predicate_idx] + '_' + str(idx) + '.pkl'
data_file_name = save_dir + '/moving_not_' + self.predicates[predicate_idx] + '_' + str(idx) + '.npy'
mapping_file_names.append(mapping_file_name)
data_file_names.append(data_file_name)
if geom_given_negative is not None:
joblib.dump(geom_given_negative, mapping_file_name)
np.save(data_file_name, negative_data)
for idx in manipulated_pos_predicate_idx:
predicate_idx = idx % self.predicate_count
obj1_idx = object_mappings[idx][0]
obj2_idx = object_mappings[idx][1]
if self.predicates[predicate_idx] not in data_object_mappings:
data_object_mappings[self.predicates[predicate_idx]] = list()
data_object_mappings[self.predicates[predicate_idx]].append((obj1_idx, obj2_idx))
return mapping_file_names, data_file_names, data_object_mappings
def _create_directory(self, dir_name):
if isdir(dir_name):
return True
else:
try:
makedirs(dir_name)
return True
except OSError:
return False
|
gpl-3.0
|
parloma/robotcontrol
|
ros/parloma_interaction/scripts/sign_recognizer.py
|
1
|
3118
|
#! /usr/bin/env python
# Copyright (C) 2014 Politecnico di Torino
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#
# This software is developed within the PARLOMA project, which aims
# at developing a communication system for deablinf people (www.parloma.com)
# The PARLOMA project is developed with the Turin node of AsTech laboraroies
# network of Italian CINI (Consorzio Interuniversitario Nazionale di Informatica)
#
# Contributors:
# Ludovico O. Russo (ludovico.russo@polito.it)
import rospy
from std_msgs.msg import String
from parloma_msgs.msg import hand_skeleton
from sklearn.externals import joblib
from math import sqrt, pow
import numpy as np
from std_msgs.msg import String
class SignClassifier:
def __init__(self, forest_file):
self.clf, self.signs_list= joblib.load(forest_file)
def classify_skeleton(self, jointsdists):
prob = self.clf.predict_proba(jointsdists)
mm = prob.argmax()
signRecogIndex = self.signs_list[mm]
prob = prob[0][mm]
return signRecogIndex, prob
class SignClassifierNode:
def __init__(self):
rospy.init_node('sing_recoignizer', anonymous=True)
self.classifier_path = rospy.get_param('~classifier', '../xml/clf_2.plk')
self.signs_topic = rospy.get_param('signs_topic','/signs_topic')
self.skeleton_topic = rospy.get_param('skeleton_topic','/skeleton')
self.classifier = SignClassifier(self.classifier_path)
rospy.Subscriber(self.skeleton_topic, hand_skeleton, self.callback_skeleton)
self.pub = rospy.Publisher(self.signs_topic, String, queue_size=10)
rospy.spin()
def classify_skeleton(self, joints_dists):
signRecogIndex, prob = self.classifier.classify_skeleton(joints_dists)
return signRecogIndex, prob
def callback_skeleton(self, data):
rospy.loginfo(rospy.get_caller_id() + "skeleton: ")
jointdists = self.joints2dist(data.joints)
sign, prob = self.classify_skeleton(jointdists)
if (prob > 0.3):
self.pub.publish(sign)
def joints2dist(self, joints):
dists = []
for i in range(0,len(joints)):
for j in range(i+1,len(joints)):
d = sqrt(pow(joints[i].x - joints[j].x,2) + pow(joints[i].y - joints[j].y,2) + pow(joints[i].z - joints[j].z,2))
dists.append(d)
return np.array(dists)
if __name__ == '__main__':
SignClassifierNode()
|
gpl-2.0
|
geopandas/geopandas
|
geopandas/base.py
|
1
|
112249
|
from warnings import warn
import numpy as np
import pandas as pd
from pandas import DataFrame, Series
from shapely.geometry import box
from shapely.geometry.base import BaseGeometry
from shapely.ops import cascaded_union
from .array import GeometryArray, GeometryDtype
def is_geometry_type(data):
"""
Check if the data is of geometry dtype.
Does not include object array of shapely scalars.
"""
if isinstance(getattr(data, "dtype", None), GeometryDtype):
# GeometryArray, GeoSeries and Series[GeometryArray]
return True
else:
return False
def _delegate_binary_method(op, this, other, align, *args, **kwargs):
# type: (str, GeoSeries, GeoSeries) -> GeoSeries/Series
this = this.geometry
if isinstance(other, GeoPandasBase):
if align and not this.index.equals(other.index):
warn("The indices of the two GeoSeries are different.")
this, other = this.align(other.geometry)
else:
other = other.geometry
a_this = GeometryArray(this.values)
other = GeometryArray(other.values)
elif isinstance(other, BaseGeometry):
a_this = GeometryArray(this.values)
else:
raise TypeError(type(this), type(other))
data = getattr(a_this, op)(other, *args, **kwargs)
return data, this.index
def _binary_geo(op, this, other, align):
# type: (str, GeoSeries, GeoSeries) -> GeoSeries
"""Binary operation on GeoSeries objects that returns a GeoSeries"""
from .geoseries import GeoSeries
geoms, index = _delegate_binary_method(op, this, other, align)
return GeoSeries(geoms.data, index=index, crs=this.crs)
def _binary_op(op, this, other, align, *args, **kwargs):
# type: (str, GeoSeries, GeoSeries, args/kwargs) -> Series[bool/float]
"""Binary operation on GeoSeries objects that returns a Series"""
data, index = _delegate_binary_method(op, this, other, align, *args, **kwargs)
return Series(data, index=index)
def _delegate_property(op, this):
# type: (str, GeoSeries) -> GeoSeries/Series
a_this = GeometryArray(this.geometry.values)
data = getattr(a_this, op)
if isinstance(data, GeometryArray):
from .geoseries import GeoSeries
return GeoSeries(data.data, index=this.index, crs=this.crs)
else:
return Series(data, index=this.index)
def _delegate_geo_method(op, this, *args, **kwargs):
# type: (str, GeoSeries) -> GeoSeries
"""Unary operation that returns a GeoSeries"""
from .geoseries import GeoSeries
a_this = GeometryArray(this.geometry.values)
data = getattr(a_this, op)(*args, **kwargs).data
return GeoSeries(data, index=this.index, crs=this.crs)
class GeoPandasBase(object):
@property
def area(self):
"""Returns a ``Series`` containing the area of each geometry in the
``GeoSeries`` expressed in the units of the CRS.
Examples
--------
>>> from shapely.geometry import Polygon, LineString, Point
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (1, 1), (0, 1)]),
... Polygon([(10, 0), (10, 5), (0, 0)]),
... Polygon([(0, 0), (2, 2), (2, 0)]),
... LineString([(0, 0), (1, 1), (0, 1)]),
... Point(0, 1)
... ]
... )
>>> s
0 POLYGON ((0.00000 0.00000, 1.00000 1.00000, 0....
1 POLYGON ((10.00000 0.00000, 10.00000 5.00000, ...
2 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 2....
3 LINESTRING (0.00000 0.00000, 1.00000 1.00000, ...
4 POINT (0.00000 1.00000)
dtype: geometry
>>> s.area
0 0.5
1 25.0
2 2.0
3 0.0
4 0.0
dtype: float64
See also
--------
GeoSeries.length : measure length
Notes
-----
Area may be invalid for a geographic CRS using degrees as units;
use :meth:`GeoSeries.to_crs` to project geometries to a planar
CRS before using this function.
Every operation in GeoPandas is planar, i.e. the potential third
dimension is not taken into account.
"""
return _delegate_property("area", self)
@property
def crs(self):
"""
The Coordinate Reference System (CRS) represented as a ``pyproj.CRS``
object.
Returns None if the CRS is not set, and to set the value it
:getter: Returns a ``pyproj.CRS`` or None. When setting, the value
can be anything accepted by
:meth:`pyproj.CRS.from_user_input() <pyproj.crs.CRS.from_user_input>`,
such as an authority string (eg "EPSG:4326") or a WKT string.
Examples
--------
>>> s.crs # doctest: +SKIP
<Geographic 2D CRS: EPSG:4326>
Name: WGS 84
Axis Info [ellipsoidal]:
- Lat[north]: Geodetic latitude (degree)
- Lon[east]: Geodetic longitude (degree)
Area of Use:
- name: World
- bounds: (-180.0, -90.0, 180.0, 90.0)
Datum: World Geodetic System 1984
- Ellipsoid: WGS 84
- Prime Meridian: Greenwich
See also
--------
GeoSeries.set_crs : assign CRS
GeoSeries.to_crs : re-project to another CRS
"""
return self.geometry.values.crs
@crs.setter
def crs(self, value):
"""Sets the value of the crs"""
self.geometry.values.crs = value
@property
def geom_type(self):
"""
Returns a ``Series`` of strings specifying the `Geometry Type` of each
object.
Examples
--------
>>> from shapely.geometry import Point, Polygon, LineString
>>> d = {'geometry': [Point(2, 1), Polygon([(0, 0), (1, 1), (1, 0)]),
... LineString([(0, 0), (1, 1)])]}
>>> gdf = geopandas.GeoDataFrame(d, crs="EPSG:4326")
>>> gdf.geom_type
0 Point
1 Polygon
2 LineString
dtype: object
"""
return _delegate_property("geom_type", self)
@property
def type(self):
"""Return the geometry type of each geometry in the GeoSeries"""
return self.geom_type
@property
def length(self):
"""Returns a ``Series`` containing the length of each geometry
expressed in the units of the CRS.
In the case of a (Multi)Polygon it measures the length
of its exterior (i.e. perimeter).
Examples
--------
>>> from shapely.geometry import Polygon, LineString, MultiLineString, Point, \
GeometryCollection
>>> s = geopandas.GeoSeries(
... [
... LineString([(0, 0), (1, 1), (0, 1)]),
... LineString([(10, 0), (10, 5), (0, 0)]),
... MultiLineString([((0, 0), (1, 0)), ((-1, 0), (1, 0))]),
... Polygon([(0, 0), (1, 1), (0, 1)]),
... Point(0, 1),
... GeometryCollection([Point(1, 0), LineString([(10, 0), (10, 5), (0,\
0)])])
... ]
... )
>>> s
0 LINESTRING (0.00000 0.00000, 1.00000 1.00000, ...
1 LINESTRING (10.00000 0.00000, 10.00000 5.00000...
2 MULTILINESTRING ((0.00000 0.00000, 1.00000 0.0...
3 POLYGON ((0.00000 0.00000, 1.00000 1.00000, 0....
4 POINT (0.00000 1.00000)
5 GEOMETRYCOLLECTION (POINT (1.00000 0.00000), L...
dtype: geometry
>>> s.length
0 2.414214
1 16.180340
2 3.000000
3 3.414214
4 0.000000
5 16.180340
dtype: float64
See also
--------
GeoSeries.area : measure area of a polygon
Notes
-----
Length may be invalid for a geographic CRS using degrees as units;
use :meth:`GeoSeries.to_crs` to project geometries to a planar
CRS before using this function.
Every operation in GeoPandas is planar, i.e. the potential third
dimension is not taken into account.
"""
return _delegate_property("length", self)
@property
def is_valid(self):
"""Returns a ``Series`` of ``dtype('bool')`` with value ``True`` for
geometries that are valid.
Examples
--------
An example with one invalid polygon (a bowtie geometry crossing itself)
and one missing geometry:
>>> from shapely.geometry import Polygon
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (1, 1), (0, 1)]),
... Polygon([(0,0), (1, 1), (1, 0), (0, 1)]), # bowtie geometry
... Polygon([(0, 0), (2, 2), (2, 0)]),
... None
... ]
... )
>>> s
0 POLYGON ((0.00000 0.00000, 1.00000 1.00000, 0....
1 POLYGON ((0.00000 0.00000, 1.00000 1.00000, 1....
2 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 2....
3 None
dtype: geometry
>>> s.is_valid
0 True
1 False
2 True
3 False
dtype: bool
"""
return _delegate_property("is_valid", self)
@property
def is_empty(self):
"""
Returns a ``Series`` of ``dtype('bool')`` with value ``True`` for
empty geometries.
Examples
--------
An example of a GeoDataFrame with one empty point, one point and one missing
value:
>>> from shapely.geometry import Point
>>> d = {'geometry': [Point(), Point(2, 1), None]}
>>> gdf = geopandas.GeoDataFrame(d, crs="EPSG:4326")
>>> gdf
geometry
0 GEOMETRYCOLLECTION EMPTY
1 POINT (2.00000 1.00000)
2 None
>>> gdf.is_empty
0 True
1 False
2 False
dtype: bool
See Also
--------
GeoSeries.isna : detect missing values
"""
return _delegate_property("is_empty", self)
@property
def is_simple(self):
"""Returns a ``Series`` of ``dtype('bool')`` with value ``True`` for
geometries that do not cross themselves.
This is meaningful only for `LineStrings` and `LinearRings`.
Examples
--------
>>> from shapely.geometry import LineString
>>> s = geopandas.GeoSeries(
... [
... LineString([(0, 0), (1, 1), (1, -1), (0, 1)]),
... LineString([(0, 0), (1, 1), (1, -1)]),
... ]
... )
>>> s
0 LINESTRING (0.00000 0.00000, 1.00000 1.00000, ...
1 LINESTRING (0.00000 0.00000, 1.00000 1.00000, ...
dtype: geometry
>>> s.is_simple
0 False
1 True
dtype: bool
"""
return _delegate_property("is_simple", self)
@property
def is_ring(self):
"""Returns a ``Series`` of ``dtype('bool')`` with value ``True`` for
features that are closed.
When constructing a LinearRing, the sequence of coordinates may be
explicitly closed by passing identical values in the first and last indices.
Otherwise, the sequence will be implicitly closed by copying the first tuple
to the last index.
Examples
--------
>>> from shapely.geometry import LineString, LinearRing
>>> s = geopandas.GeoSeries(
... [
... LineString([(0, 0), (1, 1), (1, -1)]),
... LineString([(0, 0), (1, 1), (1, -1), (0, 0)]),
... LinearRing([(0, 0), (1, 1), (1, -1)]),
... ]
... )
>>> s
0 LINESTRING (0.00000 0.00000, 1.00000 1.00000, ...
1 LINESTRING (0.00000 0.00000, 1.00000 1.00000, ...
2 LINEARRING (0.00000 0.00000, 1.00000 1.00000, ...
dtype: geometry
>>> s.is_ring
0 False
1 True
2 True
dtype: bool
"""
return _delegate_property("is_ring", self)
@property
def has_z(self):
"""Returns a ``Series`` of ``dtype('bool')`` with value ``True`` for
features that have a z-component.
Notes
------
Every operation in GeoPandas is planar, i.e. the potential third
dimension is not taken into account.
Examples
--------
>>> from shapely.geometry import Point
>>> s = geopandas.GeoSeries(
... [
... Point(0, 1),
... Point(0, 1, 2),
... ]
... )
>>> s
0 POINT (0.00000 1.00000)
1 POINT Z (0.00000 1.00000 2.00000)
dtype: geometry
>>> s.has_z
0 False
1 True
dtype: bool
"""
return _delegate_property("has_z", self)
#
# Unary operations that return a GeoSeries
#
@property
def boundary(self):
"""Returns a ``GeoSeries`` of lower dimensional objects representing
each geometries's set-theoretic `boundary`.
Examples
--------
>>> from shapely.geometry import Polygon, LineString, Point
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (1, 1), (0, 1)]),
... LineString([(0, 0), (1, 1), (1, 0)]),
... Point(0, 0),
... ]
... )
>>> s
0 POLYGON ((0.00000 0.00000, 1.00000 1.00000, 0....
1 LINESTRING (0.00000 0.00000, 1.00000 1.00000, ...
2 POINT (0.00000 0.00000)
dtype: geometry
>>> s.boundary
0 LINESTRING (0.00000 0.00000, 1.00000 1.00000, ...
1 MULTIPOINT (0.00000 0.00000, 1.00000 0.00000)
2 GEOMETRYCOLLECTION EMPTY
dtype: geometry
See also
--------
GeoSeries.exterior : outer boundary (without interior rings)
"""
return _delegate_property("boundary", self)
@property
def centroid(self):
"""Returns a ``GeoSeries`` of points representing the centroid of each
geometry.
Note that centroid does not have to be on or within original geometry.
Examples
--------
>>> from shapely.geometry import Polygon, LineString, Point
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (1, 1), (0, 1)]),
... LineString([(0, 0), (1, 1), (1, 0)]),
... Point(0, 0),
... ]
... )
>>> s
0 POLYGON ((0.00000 0.00000, 1.00000 1.00000, 0....
1 LINESTRING (0.00000 0.00000, 1.00000 1.00000, ...
2 POINT (0.00000 0.00000)
dtype: geometry
>>> s.centroid
0 POINT (0.33333 0.66667)
1 POINT (0.70711 0.50000)
2 POINT (0.00000 0.00000)
dtype: geometry
See also
--------
GeoSeries.representative_point : point guaranteed to be within each geometry
"""
return _delegate_property("centroid", self)
@property
def convex_hull(self):
"""Returns a ``GeoSeries`` of geometries representing the convex hull
of each geometry.
The convex hull of a geometry is the smallest convex `Polygon`
containing all the points in each geometry, unless the number of points
in the geometric object is less than three. For two points, the convex
hull collapses to a `LineString`; for 1, a `Point`.
Examples
--------
>>> from shapely.geometry import Polygon, LineString, Point, MultiPoint
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (1, 1), (0, 1)]),
... LineString([(0, 0), (1, 1), (1, 0)]),
... MultiPoint([(0, 0), (1, 1), (0, 1), (1, 0), (0.5, 0.5)]),
... MultiPoint([(0, 0), (1, 1)]),
... Point(0, 0),
... ]
... )
>>> s
0 POLYGON ((0.00000 0.00000, 1.00000 1.00000, 0....
1 LINESTRING (0.00000 0.00000, 1.00000 1.00000, ...
2 MULTIPOINT (0.00000 0.00000, 1.00000 1.00000, ...
3 MULTIPOINT (0.00000 0.00000, 1.00000 1.00000)
4 POINT (0.00000 0.00000)
dtype: geometry
>>> s.convex_hull
0 POLYGON ((0.00000 0.00000, 0.00000 1.00000, 1....
1 POLYGON ((0.00000 0.00000, 1.00000 1.00000, 1....
2 POLYGON ((0.00000 0.00000, 0.00000 1.00000, 1....
3 LINESTRING (0.00000 0.00000, 1.00000 1.00000)
4 POINT (0.00000 0.00000)
dtype: geometry
See also
--------
GeoSeries.envelope : bounding rectangle geometry
"""
return _delegate_property("convex_hull", self)
@property
def envelope(self):
"""Returns a ``GeoSeries`` of geometries representing the envelope of
each geometry.
The envelope of a geometry is the bounding rectangle. That is, the
point or smallest rectangular polygon (with sides parallel to the
coordinate axes) that contains the geometry.
Examples
--------
>>> from shapely.geometry import Polygon, LineString, Point, MultiPoint
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (1, 1), (0, 1)]),
... LineString([(0, 0), (1, 1), (1, 0)]),
... MultiPoint([(0, 0), (1, 1)]),
... Point(0, 0),
... ]
... )
>>> s
0 POLYGON ((0.00000 0.00000, 1.00000 1.00000, 0....
1 LINESTRING (0.00000 0.00000, 1.00000 1.00000, ...
2 MULTIPOINT (0.00000 0.00000, 1.00000 1.00000)
3 POINT (0.00000 0.00000)
dtype: geometry
>>> s.envelope
0 POLYGON ((0.00000 0.00000, 1.00000 0.00000, 1....
1 POLYGON ((0.00000 0.00000, 1.00000 0.00000, 1....
2 POLYGON ((0.00000 0.00000, 1.00000 0.00000, 1....
3 POINT (0.00000 0.00000)
dtype: geometry
See also
--------
GeoSeries.convex_hull : convex hull geometry
"""
return _delegate_property("envelope", self)
@property
def exterior(self):
"""Returns a ``GeoSeries`` of LinearRings representing the outer
boundary of each polygon in the GeoSeries.
Applies to GeoSeries containing only Polygons. Returns ``None``` for
other geometry types.
Examples
--------
>>> from shapely.geometry import Polygon, Point
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (1, 1), (0, 1)]),
... Polygon([(1, 0), (2, 1), (0, 0)]),
... Point(0, 1)
... ]
... )
>>> s
0 POLYGON ((0.00000 0.00000, 1.00000 1.00000, 0....
1 POLYGON ((1.00000 0.00000, 2.00000 1.00000, 0....
2 POINT (0.00000 1.00000)
dtype: geometry
>>> s.exterior
0 LINEARRING (0.00000 0.00000, 1.00000 1.00000, ...
1 LINEARRING (1.00000 0.00000, 2.00000 1.00000, ...
2 None
dtype: geometry
See also
--------
GeoSeries.boundary : complete set-theoretic boundary
GeoSeries.interiors : list of inner rings of each polygon
"""
# TODO: return empty geometry for non-polygons
return _delegate_property("exterior", self)
@property
def interiors(self):
"""Returns a ``Series`` of List representing the
inner rings of each polygon in the GeoSeries.
Applies to GeoSeries containing only Polygons.
Returns
----------
inner_rings: Series of List
Inner rings of each polygon in the GeoSeries.
Examples
--------
>>> from shapely.geometry import Polygon
>>> s = geopandas.GeoSeries(
... [
... Polygon(
... [(0, 0), (0, 5), (5, 5), (5, 0)],
... [[(1, 1), (2, 1), (1, 2)], [(1, 4), (2, 4), (2, 3)]],
... ),
... Polygon([(1, 0), (2, 1), (0, 0)]),
... ]
... )
>>> s
0 POLYGON ((0.00000 0.00000, 0.00000 5.00000, 5....
1 POLYGON ((1.00000 0.00000, 2.00000 1.00000, 0....
dtype: geometry
>>> s.interiors
0 [LINEARRING (1 1, 2 1, 1 2, 1 1), LINEARRING (...
1 []
dtype: object
See also
--------
GeoSeries.exterior : outer boundary
"""
return _delegate_property("interiors", self)
def representative_point(self):
"""Returns a ``GeoSeries`` of (cheaply computed) points that are
guaranteed to be within each geometry.
Examples
--------
>>> from shapely.geometry import Polygon, LineString, Point
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (1, 1), (0, 1)]),
... LineString([(0, 0), (1, 1), (1, 0)]),
... Point(0, 0),
... ]
... )
>>> s
0 POLYGON ((0.00000 0.00000, 1.00000 1.00000, 0....
1 LINESTRING (0.00000 0.00000, 1.00000 1.00000, ...
2 POINT (0.00000 0.00000)
dtype: geometry
>>> s.representative_point()
0 POINT (0.25000 0.50000)
1 POINT (1.00000 1.00000)
2 POINT (0.00000 0.00000)
dtype: geometry
See also
--------
GeoSeries.centroid : geometric centroid
"""
return _delegate_geo_method("representative_point", self)
#
# Reduction operations that return a Shapely geometry
#
@property
def cascaded_union(self):
"""Deprecated: Return the unary_union of all geometries"""
return cascaded_union(np.asarray(self.geometry.values))
@property
def unary_union(self):
"""Returns a geometry containing the union of all geometries in the
``GeoSeries``.
Examples
--------
>>> from shapely.geometry import box
>>> s = geopandas.GeoSeries([box(0,0,1,1), box(0,0,2,2)])
>>> s
0 POLYGON ((1.00000 0.00000, 1.00000 1.00000, 0....
1 POLYGON ((2.00000 0.00000, 2.00000 2.00000, 0....
dtype: geometry
>>> union = s.unary_union
>>> print(union)
POLYGON ((0 1, 0 2, 2 2, 2 0, 1 0, 0 0, 0 1))
"""
return self.geometry.values.unary_union()
#
# Binary operations that return a pandas Series
#
def contains(self, other, align=True):
"""Returns a ``Series`` of ``dtype('bool')`` with value ``True`` for
each aligned geometry that contains `other`.
An object is said to contain `other` if its `interior` contains the
`boundary` and `interior` of the other object and their boundaries do
not touch at all.
This is the inverse of :meth:`within` in the sense that the expression
``a.contains(b) == b.within(a)`` always evaluates to ``True``.
The operation works on a 1-to-1 row-wise manner:
.. image:: ../../../_static/binary_op-01.svg
:align: center
Parameters
----------
other : GeoSeries or geometric object
The GeoSeries (elementwise) or geometric object to test if is
contained.
align : bool (default True)
If True, automatically aligns GeoSeries based on their indices.
If False, the order of elements is preserved.
Returns
-------
Series (bool)
Examples
--------
>>> from shapely.geometry import Polygon, LineString, Point
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (1, 1), (0, 1)]),
... LineString([(0, 0), (0, 2)]),
... LineString([(0, 0), (0, 1)]),
... Point(0, 1),
... ],
... index=range(0, 4),
... )
>>> s2 = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (2, 2), (0, 2)]),
... Polygon([(0, 0), (1, 2), (0, 2)]),
... LineString([(0, 0), (0, 2)]),
... Point(0, 1),
... ],
... index=range(1, 5),
... )
>>> s
0 POLYGON ((0.00000 0.00000, 1.00000 1.00000, 0....
1 LINESTRING (0.00000 0.00000, 0.00000 2.00000)
2 LINESTRING (0.00000 0.00000, 0.00000 1.00000)
3 POINT (0.00000 1.00000)
dtype: geometry
>>> s2
1 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
2 POLYGON ((0.00000 0.00000, 1.00000 2.00000, 0....
3 LINESTRING (0.00000 0.00000, 0.00000 2.00000)
4 POINT (0.00000 1.00000)
dtype: geometry
We can check if each geometry of GeoSeries contains a single
geometry:
.. image:: ../../../_static/binary_op-03.svg
:align: center
>>> point = Point(0, 1)
>>> s.contains(point)
0 False
1 True
2 False
3 True
dtype: bool
We can also check two GeoSeries against each other, row by row.
The GeoSeries above have different indices. We can either align both GeoSeries
based on index values and compare elements with the same index using
``align=True`` or ignore index and compare elements based on their matching
order using ``align=False``:
.. image:: ../../../_static/binary_op-02.svg
>>> s2.contains(s, align=True)
0 False
1 False
2 False
3 True
4 False
dtype: bool
>>> s2.contains(s, align=False)
1 True
2 False
3 True
4 True
dtype: bool
Notes
-----
This method works in a row-wise manner. It does not check if an element
of one GeoSeries ``contains`` *any* element of the other one.
See also
--------
GeoSeries.within
"""
return _binary_op("contains", self, other, align)
def geom_equals(self, other, align=True):
"""Returns a ``Series`` of ``dtype('bool')`` with value ``True`` for
each aligned geometry equal to `other`.
An object is said to be equal to `other` if its set-theoretic
`boundary`, `interior`, and `exterior` coincides with those of the
other.
The operation works on a 1-to-1 row-wise manner:
.. image:: ../../../_static/binary_op-01.svg
:align: center
Parameters
----------
other : GeoSeries or geometric object
The GeoSeries (elementwise) or geometric object to test for
equality.
align : bool (default True)
If True, automatically aligns GeoSeries based on their indices.
If False, the order of elements is preserved.
Returns
-------
Series (bool)
Examples
--------
>>> from shapely.geometry import Polygon, LineString, Point
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (2, 2), (0, 2)]),
... Polygon([(0, 0), (1, 2), (0, 2)]),
... LineString([(0, 0), (0, 2)]),
... Point(0, 1),
... ],
... )
>>> s2 = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (2, 2), (0, 2)]),
... Polygon([(0, 0), (1, 2), (0, 2)]),
... Point(0, 1),
... LineString([(0, 0), (0, 2)]),
... ],
... index=range(1, 5),
... )
>>> s
0 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
1 POLYGON ((0.00000 0.00000, 1.00000 2.00000, 0....
2 LINESTRING (0.00000 0.00000, 0.00000 2.00000)
3 POINT (0.00000 1.00000)
dtype: geometry
>>> s2
1 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
2 POLYGON ((0.00000 0.00000, 1.00000 2.00000, 0....
3 POINT (0.00000 1.00000)
4 LINESTRING (0.00000 0.00000, 0.00000 2.00000)
dtype: geometry
We can check if each geometry of GeoSeries contains a single
geometry:
.. image:: ../../../_static/binary_op-03.svg
:align: center
>>> polygon = Polygon([(0, 0), (2, 2), (0, 2)])
>>> s.geom_equals(polygon)
0 True
1 False
2 False
3 False
dtype: bool
We can also check two GeoSeries against each other, row by row.
The GeoSeries above have different indices. We can either align both GeoSeries
based on index values and compare elements with the same index using
``align=True`` or ignore index and compare elements based on their matching
order using ``align=False``:
.. image:: ../../../_static/binary_op-02.svg
>>> s.geom_equals(s2)
0 False
1 False
2 False
3 True
4 False
dtype: bool
>>> s.geom_equals(s2, align=False)
0 True
1 True
2 False
3 False
dtype: bool
Notes
-----
This method works in a row-wise manner. It does not check if an element
of one GeoSeries is equal to *any* element of the other one.
See also
--------
GeoSeries.geom_almost_equals
GeoSeries.geom_equals_exact
"""
return _binary_op("geom_equals", self, other, align)
def geom_almost_equals(self, other, decimal=6, align=True):
"""Returns a ``Series`` of ``dtype('bool')`` with value ``True`` if
each aligned geometry is approximately equal to `other`.
Approximate equality is tested at all points to the specified `decimal`
place precision.
The operation works on a 1-to-1 row-wise manner:
.. image:: ../../../_static/binary_op-01.svg
:align: center
Parameters
----------
other : GeoSeries or geometric object
The GeoSeries (elementwise) or geometric object to compare to.
decimal : int
Decimal place presion used when testing for approximate equality.
align : bool (default True)
If True, automatically aligns GeoSeries based on their indices.
If False, the order of elements is preserved.
Returns
-------
Series (bool)
Examples
--------
>>> from shapely.geometry import Point
>>> s = geopandas.GeoSeries(
... [
... Point(0, 1.1),
... Point(0, 1.01),
... Point(0, 1.001),
... ],
... )
>>> s
0 POINT (0.00000 1.10000)
1 POINT (0.00000 1.01000)
2 POINT (0.00000 1.00100)
dtype: geometry
>>> s.geom_almost_equals(Point(0, 1), decimal=2)
0 False
1 False
2 True
dtype: bool
>>> s.geom_almost_equals(Point(0, 1), decimal=1)
0 False
1 True
2 True
dtype: bool
Notes
-----
This method works in a row-wise manner. It does not check if an element
of one GeoSeries is equal to *any* element of the other one.
See also
--------
GeoSeries.geom_equals
GeoSeries.geom_equals_exact
"""
return _binary_op(
"geom_almost_equals", self, other, decimal=decimal, align=align
)
def geom_equals_exact(self, other, tolerance, align=True):
"""Return True for all geometries that equal aligned *other* to a given
tolerance, else False.
The operation works on a 1-to-1 row-wise manner:
.. image:: ../../../_static/binary_op-01.svg
:align: center
Parameters
----------
other : GeoSeries or geometric object
The GeoSeries (elementwise) or geometric object to compare to.
tolerance : float
Decimal place presion used when testing for approximate equality.
align : bool (default True)
If True, automatically aligns GeoSeries based on their indices.
If False, the order of elements is preserved.
Returns
-------
Series (bool)
Examples
--------
>>> from shapely.geometry import Point
>>> s = geopandas.GeoSeries(
... [
... Point(0, 1.1),
... Point(0, 1.0),
... Point(0, 1.2),
... ]
... )
>>> s
0 POINT (0.00000 1.10000)
1 POINT (0.00000 1.00000)
2 POINT (0.00000 1.20000)
dtype: geometry
>>> s.geom_equals_exact(Point(0, 1), tolerance=0.1)
0 False
1 True
2 False
dtype: bool
>>> s.geom_equals_exact(Point(0, 1), tolerance=0.15)
0 True
1 True
2 False
dtype: bool
Notes
-----
This method works in a row-wise manner. It does not check if an element
of one GeoSeries is equal to *any* element of the other one.
See also
--------
GeoSeries.geom_equals
GeoSeries.geom_almost_equals
"""
return _binary_op(
"geom_equals_exact", self, other, tolerance=tolerance, align=align
)
def crosses(self, other, align=True):
"""Returns a ``Series`` of ``dtype('bool')`` with value ``True`` for
each aligned geometry that cross `other`.
An object is said to cross `other` if its `interior` intersects the
`interior` of the other but does not contain it, and the dimension of
the intersection is less than the dimension of the one or the other.
The operation works on a 1-to-1 row-wise manner:
.. image:: ../../../_static/binary_op-01.svg
:align: center
Parameters
----------
other : GeoSeries or geometric object
The GeoSeries (elementwise) or geometric object to test if is
crossed.
align : bool (default True)
If True, automatically aligns GeoSeries based on their indices.
If False, the order of elements is preserved.
Returns
-------
Series (bool)
Examples
--------
>>> from shapely.geometry import Polygon, LineString, Point
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (2, 2), (0, 2)]),
... LineString([(0, 0), (2, 2)]),
... LineString([(2, 0), (0, 2)]),
... Point(0, 1),
... ],
... )
>>> s2 = geopandas.GeoSeries(
... [
... LineString([(1, 0), (1, 3)]),
... LineString([(2, 0), (0, 2)]),
... Point(1, 1),
... Point(0, 1),
... ],
... index=range(1, 5),
... )
>>> s
0 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
1 LINESTRING (0.00000 0.00000, 2.00000 2.00000)
2 LINESTRING (2.00000 0.00000, 0.00000 2.00000)
3 POINT (0.00000 1.00000)
dtype: geometry
>>> s2
1 LINESTRING (1.00000 0.00000, 1.00000 3.00000)
2 LINESTRING (2.00000 0.00000, 0.00000 2.00000)
3 POINT (1.00000 1.00000)
4 POINT (0.00000 1.00000)
dtype: geometry
We can check if each geometry of GeoSeries crosses a single
geometry:
.. image:: ../../../_static/binary_op-03.svg
:align: center
>>> line = LineString([(-1, 1), (3, 1)])
>>> s.crosses(line)
0 True
1 True
2 True
3 False
dtype: bool
We can also check two GeoSeries against each other, row by row.
The GeoSeries above have different indices. We can either align both GeoSeries
based on index values and compare elements with the same index using
``align=True`` or ignore index and compare elements based on their matching
order using ``align=False``:
.. image:: ../../../_static/binary_op-02.svg
>>> s.crosses(s2, align=True)
0 False
1 True
2 False
3 False
4 False
dtype: bool
>>> s.crosses(s2, align=False)
0 True
1 True
2 False
3 False
dtype: bool
Notice that a line does not cross a point that it contains.
Notes
-----
This method works in a row-wise manner. It does not check if an element
of one GeoSeries ``crosses`` *any* element of the other one.
See also
--------
GeoSeries.disjoint
GeoSeries.intersects
"""
return _binary_op("crosses", self, other, align)
def disjoint(self, other, align=True):
"""Returns a ``Series`` of ``dtype('bool')`` with value ``True`` for
each aligned geometry disjoint to `other`.
An object is said to be disjoint to `other` if its `boundary` and
`interior` does not intersect at all with those of the other.
The operation works on a 1-to-1 row-wise manner:
.. image:: ../../../_static/binary_op-01.svg
:align: center
Parameters
----------
other : GeoSeries or geometric object
The GeoSeries (elementwise) or geometric object to test if is
disjoint.
align : bool (default True)
If True, automatically aligns GeoSeries based on their indices.
If False, the order of elements is preserved.
Returns
-------
Series (bool)
Examples
--------
>>> from shapely.geometry import Polygon, LineString, Point
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (2, 2), (0, 2)]),
... LineString([(0, 0), (2, 2)]),
... LineString([(2, 0), (0, 2)]),
... Point(0, 1),
... ],
... )
>>> s2 = geopandas.GeoSeries(
... [
... Polygon([(-1, 0), (-1, 2), (0, -2)]),
... LineString([(0, 0), (0, 1)]),
... Point(1, 1),
... Point(0, 0),
... ],
... )
>>> s
0 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
1 LINESTRING (0.00000 0.00000, 2.00000 2.00000)
2 LINESTRING (2.00000 0.00000, 0.00000 2.00000)
3 POINT (0.00000 1.00000)
dtype: geometry
>>> s2
0 POLYGON ((-1.00000 0.00000, -1.00000 2.00000, ...
1 LINESTRING (0.00000 0.00000, 0.00000 1.00000)
2 POINT (1.00000 1.00000)
3 POINT (0.00000 0.00000)
dtype: geometry
We can check each geometry of GeoSeries to a single
geometry:
.. image:: ../../../_static/binary_op-03.svg
:align: center
>>> line = LineString([(0, 0), (2, 0)])
>>> s.disjoint(line)
0 False
1 False
2 False
3 True
dtype: bool
We can also check two GeoSeries against each other, row by row.
We can either align both GeoSeries
based on index values and compare elements with the same index using
``align=True`` or ignore index and compare elements based on their matching
order using ``align=False``:
.. image:: ../../../_static/binary_op-02.svg
>>> s.disjoint(s2)
0 True
1 False
2 False
3 True
dtype: bool
Notes
-----
This method works in a row-wise manner. It does not check if an element
of one GeoSeries is equal to *any* element of the other one.
See also
--------
GeoSeries.intersects
GeoSeries.touches
"""
return _binary_op("disjoint", self, other, align)
def intersects(self, other, align=True):
"""Returns a ``Series`` of ``dtype('bool')`` with value ``True`` for
each aligned geometry that intersects `other`.
An object is said to intersect `other` if its `boundary` and `interior`
intersects in any way with those of the other.
The operation works on a 1-to-1 row-wise manner:
.. image:: ../../../_static/binary_op-01.svg
:align: center
Parameters
----------
other : GeoSeries or geometric object
The GeoSeries (elementwise) or geometric object to test if is
intersected.
align : bool (default True)
If True, automatically aligns GeoSeries based on their indices.
If False, the order of elements is preserved.
Returns
-------
Series (bool)
Examples
--------
>>> from shapely.geometry import Polygon, LineString, Point
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (2, 2), (0, 2)]),
... LineString([(0, 0), (2, 2)]),
... LineString([(2, 0), (0, 2)]),
... Point(0, 1),
... ],
... )
>>> s2 = geopandas.GeoSeries(
... [
... LineString([(1, 0), (1, 3)]),
... LineString([(2, 0), (0, 2)]),
... Point(1, 1),
... Point(0, 1),
... ],
... index=range(1, 5),
... )
>>> s
0 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
1 LINESTRING (0.00000 0.00000, 2.00000 2.00000)
2 LINESTRING (2.00000 0.00000, 0.00000 2.00000)
3 POINT (0.00000 1.00000)
dtype: geometry
>>> s2
1 LINESTRING (1.00000 0.00000, 1.00000 3.00000)
2 LINESTRING (2.00000 0.00000, 0.00000 2.00000)
3 POINT (1.00000 1.00000)
4 POINT (0.00000 1.00000)
dtype: geometry
We can check if each geometry of GeoSeries crosses a single
geometry:
.. image:: ../../../_static/binary_op-03.svg
:align: center
>>> line = LineString([(-1, 1), (3, 1)])
>>> s.intersects(line)
0 True
1 True
2 True
3 True
dtype: bool
We can also check two GeoSeries against each other, row by row.
The GeoSeries above have different indices. We can either align both GeoSeries
based on index values and compare elements with the same index using
``align=True`` or ignore index and compare elements based on their matching
order using ``align=False``:
.. image:: ../../../_static/binary_op-02.svg
>>> s.intersects(s2, align=True)
0 False
1 True
2 True
3 False
4 False
dtype: bool
>>> s.intersects(s2, align=False)
0 True
1 True
2 True
3 True
dtype: bool
Notes
-----
This method works in a row-wise manner. It does not check if an element
of one GeoSeries ``crosses`` *any* element of the other one.
See also
--------
GeoSeries.disjoint
GeoSeries.crosses
GeoSeries.touches
GeoSeries.intersection
"""
return _binary_op("intersects", self, other, align)
def overlaps(self, other, align=True):
"""Returns True for all aligned geometries that overlap *other*, else False.
Geometries overlaps if they have more than one but not all
points in common, have the same dimension, and the intersection of the
interiors of the geometries has the same dimension as the geometries
themselves.
The operation works on a 1-to-1 row-wise manner:
.. image:: ../../../_static/binary_op-01.svg
:align: center
Parameters
----------
other : GeoSeries or geometric object
The GeoSeries (elementwise) or geometric object to test if
overlaps.
align : bool (default True)
If True, automatically aligns GeoSeries based on their indices.
If False, the order of elements is preserved.
Returns
-------
Series (bool)
Examples
--------
>>> from shapely.geometry import Polygon, LineString, MultiPoint, Point
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (2, 2), (0, 2)]),
... Polygon([(0, 0), (2, 2), (0, 2)]),
... LineString([(0, 0), (2, 2)]),
... MultiPoint([(0, 0), (0, 1)]),
... ],
... )
>>> s2 = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (2, 0), (0, 2)]),
... LineString([(0, 1), (1, 1)]),
... LineString([(1, 1), (3, 3)]),
... Point(0, 1),
... ],
... index=range(1, 5),
... )
>>> s
0 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
1 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
2 LINESTRING (0.00000 0.00000, 2.00000 2.00000)
3 MULTIPOINT (0.00000 0.00000, 0.00000 1.00000)
dtype: geometry
>>> s2
1 POLYGON ((0.00000 0.00000, 2.00000 0.00000, 0....
2 LINESTRING (0.00000 1.00000, 1.00000 1.00000)
3 LINESTRING (1.00000 1.00000, 3.00000 3.00000)
4 POINT (0.00000 1.00000)
dtype: geometry
We can check if each geometry of GeoSeries overlaps a single
geometry:
.. image:: ../../../_static/binary_op-03.svg
:align: center
>>> polygon = Polygon([(0, 0), (1, 0), (1, 1), (0, 1)])
>>> s.overlaps(polygon)
0 True
1 True
2 False
3 False
dtype: bool
We can also check two GeoSeries against each other, row by row.
The GeoSeries above have different indices. We can either align both GeoSeries
based on index values and compare elements with the same index using
``align=True`` or ignore index and compare elements based on their matching
order using ``align=False``:
.. image:: ../../../_static/binary_op-02.svg
>>> s.overlaps(s2)
0 False
1 True
2 False
3 False
4 False
dtype: bool
>>> s.overlaps(s2, align=False)
0 True
1 False
2 True
3 False
dtype: bool
Notes
-----
This method works in a row-wise manner. It does not check if an element
of one GeoSeries ``overlaps`` *any* element of the other one.
See also
--------
GeoSeries.crosses
GeoSeries.intersects
"""
return _binary_op("overlaps", self, other, align)
def touches(self, other, align=True):
"""Returns a ``Series`` of ``dtype('bool')`` with value ``True`` for
each aligned geometry that touches `other`.
An object is said to touch `other` if it has at least one point in
common with `other` and its interior does not intersect with any part
of the other. Overlapping features therefore do not touch.
The operation works on a 1-to-1 row-wise manner:
.. image:: ../../../_static/binary_op-01.svg
:align: center
Parameters
----------
other : GeoSeries or geometric object
The GeoSeries (elementwise) or geometric object to test if is
touched.
align : bool (default True)
If True, automatically aligns GeoSeries based on their indices.
If False, the order of elements is preserved.
Returns
-------
Series (bool)
Examples
--------
>>> from shapely.geometry import Polygon, LineString, MultiPoint, Point
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (2, 2), (0, 2)]),
... Polygon([(0, 0), (2, 2), (0, 2)]),
... LineString([(0, 0), (2, 2)]),
... MultiPoint([(0, 0), (0, 1)]),
... ],
... )
>>> s2 = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (-2, 0), (0, -2)]),
... LineString([(0, 1), (1, 1)]),
... LineString([(1, 1), (3, 0)]),
... Point(0, 1),
... ],
... index=range(1, 5),
... )
>>> s
0 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
1 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
2 LINESTRING (0.00000 0.00000, 2.00000 2.00000)
3 MULTIPOINT (0.00000 0.00000, 0.00000 1.00000)
dtype: geometry
>>> s2
1 POLYGON ((0.00000 0.00000, -2.00000 0.00000, 0...
2 LINESTRING (0.00000 1.00000, 1.00000 1.00000)
3 LINESTRING (1.00000 1.00000, 3.00000 0.00000)
4 POINT (0.00000 1.00000)
dtype: geometry
We can check if each geometry of GeoSeries touches a single
geometry:
.. image:: ../../../_static/binary_op-03.svg
:align: center
>>> line = LineString([(0, 0), (-1, -2)])
>>> s.touches(line)
0 True
1 True
2 True
3 True
dtype: bool
We can also check two GeoSeries against each other, row by row.
The GeoSeries above have different indices. We can either align both GeoSeries
based on index values and compare elements with the same index using
``align=True`` or ignore index and compare elements based on their matching
order using ``align=False``:
.. image:: ../../../_static/binary_op-02.svg
>>> s.touches(s2, align=True)
0 False
1 True
2 True
3 False
4 False
dtype: bool
>>> s.touches(s2, align=False)
0 True
1 False
2 True
3 False
dtype: bool
Notes
-----
This method works in a row-wise manner. It does not check if an element
of one GeoSeries ``touches`` *any* element of the other one.
See also
--------
GeoSeries.overlaps
GeoSeries.intersects
"""
return _binary_op("touches", self, other, align)
def within(self, other, align=True):
"""Returns a ``Series`` of ``dtype('bool')`` with value ``True`` for
each aligned geometry that is within `other`.
An object is said to be within `other` if its `boundary` and `interior`
intersects only with the `interior` of the other (not its `boundary` or
`exterior`).
This is the inverse of :meth:`contains` in the sense that the
expression ``a.within(b) == b.contains(a)`` always evaluates to
``True``.
The operation works on a 1-to-1 row-wise manner:
.. image:: ../../../_static/binary_op-01.svg
:align: center
Parameters
----------
other : GeoSeries or geometric object
The GeoSeries (elementwise) or geometric object to test if each
geometry is within.
align : bool (default True)
If True, automatically aligns GeoSeries based on their indices.
If False, the order of elements is preserved.
Returns
-------
Series (bool)
Examples
--------
>>> from shapely.geometry import Polygon, LineString, Point
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (2, 2), (0, 2)]),
... Polygon([(0, 0), (1, 2), (0, 2)]),
... LineString([(0, 0), (0, 2)]),
... Point(0, 1),
... ],
... )
>>> s2 = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (1, 1), (0, 1)]),
... LineString([(0, 0), (0, 2)]),
... LineString([(0, 0), (0, 1)]),
... Point(0, 1),
... ],
... index=range(1, 5),
... )
>>> s
0 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
1 POLYGON ((0.00000 0.00000, 1.00000 2.00000, 0....
2 LINESTRING (0.00000 0.00000, 0.00000 2.00000)
3 POINT (0.00000 1.00000)
dtype: geometry
>>> s2
1 POLYGON ((0.00000 0.00000, 1.00000 1.00000, 0....
2 LINESTRING (0.00000 0.00000, 0.00000 2.00000)
3 LINESTRING (0.00000 0.00000, 0.00000 1.00000)
4 POINT (0.00000 1.00000)
dtype: geometry
We can check if each geometry of GeoSeries is within a single
geometry:
.. image:: ../../../_static/binary_op-03.svg
:align: center
>>> polygon = Polygon([(0, 0), (2, 2), (0, 2)])
>>> s.within(polygon)
0 True
1 True
2 False
3 False
dtype: bool
We can also check two GeoSeries against each other, row by row.
The GeoSeries above have different indices. We can either align both GeoSeries
based on index values and compare elements with the same index using
``align=True`` or ignore index and compare elements based on their matching
order using ``align=False``:
.. image:: ../../../_static/binary_op-02.svg
>>> s2.within(s)
0 False
1 False
2 True
3 False
4 False
dtype: bool
>>> s2.within(s, align=False)
1 True
2 False
3 True
4 True
dtype: bool
Notes
-----
This method works in a row-wise manner. It does not check if an element
of one GeoSeries is ``within`` *any* element of the other one.
See also
--------
GeoSeries.contains
"""
return _binary_op("within", self, other, align)
def covers(self, other, align=True):
"""
Returns a ``Series`` of ``dtype('bool')`` with value ``True`` for
each aligned geometry that is entirely covering `other`.
An object A is said to cover another object B if no points of B lie
in the exterior of A.
The operation works on a 1-to-1 row-wise manner:
.. image:: ../../../_static/binary_op-01.svg
:align: center
See
https://lin-ear-th-inking.blogspot.com/2007/06/subtleties-of-ogc-covers-spatial.html
for reference.
Parameters
----------
other : Geoseries or geometric object
The Geoseries (elementwise) or geometric object to check is being covered.
align : bool (default True)
If True, automatically aligns GeoSeries based on their indices.
If False, the order of elements is preserved.
Returns
-------
Series (bool)
Examples
--------
>>> from shapely.geometry import Polygon, LineString, Point
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (2, 0), (2, 2), (0, 2)]),
... Polygon([(0, 0), (2, 2), (0, 2)]),
... LineString([(0, 0), (2, 2)]),
... Point(0, 0),
... ],
... )
>>> s2 = geopandas.GeoSeries(
... [
... Polygon([(0.5, 0.5), (1.5, 0.5), (1.5, 1.5), (0.5, 1.5)]),
... Polygon([(0, 0), (2, 0), (2, 2), (0, 2)]),
... LineString([(1, 1), (1.5, 1.5)]),
... Point(0, 0),
... ],
... index=range(1, 5),
... )
>>> s
0 POLYGON ((0.00000 0.00000, 2.00000 0.00000, 2....
1 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
2 LINESTRING (0.00000 0.00000, 2.00000 2.00000)
3 POINT (0.00000 0.00000)
dtype: geometry
>>> s2
1 POLYGON ((0.50000 0.50000, 1.50000 0.50000, 1....
2 POLYGON ((0.00000 0.00000, 2.00000 0.00000, 2....
3 LINESTRING (1.00000 1.00000, 1.50000 1.50000)
4 POINT (0.00000 0.00000)
dtype: geometry
We can check if each geometry of GeoSeries covers a single
geometry:
.. image:: ../../../_static/binary_op-03.svg
:align: center
>>> poly = Polygon([(0, 0), (2, 0), (2, 2), (0, 2)])
>>> s.covers(poly)
0 True
1 False
2 False
3 False
dtype: bool
We can also check two GeoSeries against each other, row by row.
The GeoSeries above have different indices. We can either align both GeoSeries
based on index values and compare elements with the same index using
``align=True`` or ignore index and compare elements based on their matching
order using ``align=False``:
.. image:: ../../../_static/binary_op-02.svg
>>> s.covers(s2, align=True)
0 False
1 False
2 False
3 False
4 False
dtype: bool
>>> s.covers(s2, align=False)
0 True
1 False
2 True
3 True
dtype: bool
Notes
-----
This method works in a row-wise manner. It does not check if an element
of one GeoSeries ``covers`` *any* element of the other one.
See also
--------
GeoSeries.covered_by
GeoSeries.overlaps
"""
return _binary_op("covers", self, other, align)
def covered_by(self, other, align=True):
"""
Returns a ``Series`` of ``dtype('bool')`` with value ``True`` for
each aligned geometry that is entirely covered by `other`.
An object A is said to cover another object B if no points of B lie
in the exterior of A.
The operation works on a 1-to-1 row-wise manner:
.. image:: ../../../_static/binary_op-01.svg
:align: center
See
https://lin-ear-th-inking.blogspot.com/2007/06/subtleties-of-ogc-covers-spatial.html
for reference.
Parameters
----------
other : Geoseries or geometric object
The Geoseries (elementwise) or geometric object to check is being covered.
align : bool (default True)
If True, automatically aligns GeoSeries based on their indices.
If False, the order of elements is preserved.
Returns
-------
Series (bool)
Examples
--------
>>> from shapely.geometry import Polygon, LineString, Point
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0.5, 0.5), (1.5, 0.5), (1.5, 1.5), (0.5, 1.5)]),
... Polygon([(0, 0), (2, 0), (2, 2), (0, 2)]),
... LineString([(1, 1), (1.5, 1.5)]),
... Point(0, 0),
... ],
... )
>>> s2 = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (2, 0), (2, 2), (0, 2)]),
... Polygon([(0, 0), (2, 2), (0, 2)]),
... LineString([(0, 0), (2, 2)]),
... Point(0, 0),
... ],
... index=range(1, 5),
... )
>>> s
0 POLYGON ((0.50000 0.50000, 1.50000 0.50000, 1....
1 POLYGON ((0.00000 0.00000, 2.00000 0.00000, 2....
2 LINESTRING (1.00000 1.00000, 1.50000 1.50000)
3 POINT (0.00000 0.00000)
dtype: geometry
>>> s2
1 POLYGON ((0.00000 0.00000, 2.00000 0.00000, 2....
2 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
3 LINESTRING (0.00000 0.00000, 2.00000 2.00000)
4 POINT (0.00000 0.00000)
dtype: geometry
We can check if each geometry of GeoSeries is covered by a single
geometry:
.. image:: ../../../_static/binary_op-03.svg
:align: center
>>> poly = Polygon([(0, 0), (2, 0), (2, 2), (0, 2)])
>>> s.covered_by(poly)
0 True
1 True
2 True
3 True
dtype: bool
We can also check two GeoSeries against each other, row by row.
The GeoSeries above have different indices. We can either align both GeoSeries
based on index values and compare elements with the same index using
``align=True`` or ignore index and compare elements based on their matching
order using ``align=False``:
.. image:: ../../../_static/binary_op-02.svg
>>> s.covered_by(s2, align=True)
0 False
1 True
2 True
3 True
4 False
dtype: bool
>>> s.covered_by(s2, align=False)
0 True
1 False
2 True
3 True
dtype: bool
Notes
-----
This method works in a row-wise manner. It does not check if an element
of one GeoSeries is ``covered_by`` *any* element of the other one.
See also
--------
GeoSeries.covers
GeoSeries.overlaps
"""
return _binary_op("covered_by", self, other, align)
def distance(self, other, align=True):
"""Returns a ``Series`` containing the distance to aligned `other`.
The operation works on a 1-to-1 row-wise manner:
.. image:: ../../../_static/binary_op-01.svg
:align: center
Parameters
----------
other : Geoseries or geometric object
The Geoseries (elementwise) or geometric object to find the
distance to.
align : bool (default True)
If True, automatically aligns GeoSeries based on their indices.
If False, the order of elements is preserved.
Returns
-------
Series (float)
Examples
--------
>>> from shapely.geometry import Polygon, LineString, Point
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (1, 0), (1, 1)]),
... Polygon([(0, 0), (-1, 0), (-1, 1)]),
... LineString([(1, 1), (0, 0)]),
... Point(0, 0),
... ],
... )
>>> s2 = geopandas.GeoSeries(
... [
... Polygon([(0.5, 0.5), (1.5, 0.5), (1.5, 1.5), (0.5, 1.5)]),
... Point(3, 1),
... LineString([(1, 0), (2, 0)]),
... Point(0, 1),
... ],
... index=range(1, 5),
... )
>>> s
0 POLYGON ((0.00000 0.00000, 1.00000 0.00000, 1....
1 POLYGON ((0.00000 0.00000, -1.00000 0.00000, -...
2 LINESTRING (1.00000 1.00000, 0.00000 0.00000)
3 POINT (0.00000 0.00000)
dtype: geometry
>>> s2
1 POLYGON ((0.50000 0.50000, 1.50000 0.50000, 1....
2 POINT (3.00000 1.00000)
3 LINESTRING (1.00000 0.00000, 2.00000 0.00000)
4 POINT (0.00000 1.00000)
dtype: geometry
We can check the distance of each geometry of GeoSeries to a single
geometry:
.. image:: ../../../_static/binary_op-03.svg
:align: center
>>> point = Point(-1, 0)
>>> s.distance(point)
0 1.0
1 0.0
2 1.0
3 1.0
dtype: float64
We can also check two GeoSeries against each other, row by row.
The GeoSeries above have different indices. We can either align both GeoSeries
based on index values and use elements with the same index using
``align=True`` or ignore index and use elements based on their matching
order using ``align=False``:
.. image:: ../../../_static/binary_op-02.svg
>>> s.distance(s2, align=True)
0 NaN
1 0.707107
2 2.000000
3 1.000000
4 NaN
dtype: float64
>>> s.distance(s2, align=False)
0 0.000000
1 3.162278
2 0.707107
3 1.000000
dtype: float64
"""
return _binary_op("distance", self, other, align)
#
# Binary operations that return a GeoSeries
#
def difference(self, other, align=True):
"""Returns a ``GeoSeries`` of the points in each aligned geometry that
are not in `other`.
.. image:: ../../../_static/binary_geo-difference.svg
:align: center
The operation works on a 1-to-1 row-wise manner:
.. image:: ../../../_static/binary_op-01.svg
:align: center
Parameters
----------
other : Geoseries or geometric object
The Geoseries (elementwise) or geometric object to find the
difference to.
align : bool (default True)
If True, automatically aligns GeoSeries based on their indices.
If False, the order of elements is preserved.
Returns
-------
GeoSeries
Examples
--------
>>> from shapely.geometry import Polygon, LineString, Point
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (2, 2), (0, 2)]),
... Polygon([(0, 0), (2, 2), (0, 2)]),
... LineString([(0, 0), (2, 2)]),
... LineString([(2, 0), (0, 2)]),
... Point(0, 1),
... ],
... )
>>> s2 = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (1, 1), (0, 1)]),
... LineString([(1, 0), (1, 3)]),
... LineString([(2, 0), (0, 2)]),
... Point(1, 1),
... Point(0, 1),
... ],
... index=range(1, 6),
... )
>>> s
0 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
1 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
2 LINESTRING (0.00000 0.00000, 2.00000 2.00000)
3 LINESTRING (2.00000 0.00000, 0.00000 2.00000)
4 POINT (0.00000 1.00000)
dtype: geometry
>>> s2
1 POLYGON ((0.00000 0.00000, 1.00000 1.00000, 0....
2 LINESTRING (1.00000 0.00000, 1.00000 3.00000)
3 LINESTRING (2.00000 0.00000, 0.00000 2.00000)
4 POINT (1.00000 1.00000)
5 POINT (0.00000 1.00000)
dtype: geometry
We can do difference of each geometry and a single
shapely geometry:
.. image:: ../../../_static/binary_op-03.svg
:align: center
>>> s.difference(Polygon([(0, 0), (1, 1), (0, 1)]))
0 POLYGON ((0.00000 2.00000, 2.00000 2.00000, 1....
1 POLYGON ((0.00000 2.00000, 2.00000 2.00000, 1....
2 LINESTRING (1.00000 1.00000, 2.00000 2.00000)
3 MULTILINESTRING ((2.00000 0.00000, 1.00000 1.0...
4 POINT EMPTY
dtype: geometry
We can also check two GeoSeries against each other, row by row.
The GeoSeries above have different indices. We can either align both GeoSeries
based on index values and compare elements with the same index using
``align=True`` or ignore index and compare elements based on their matching
order using ``align=False``:
.. image:: ../../../_static/binary_op-02.svg
>>> s.difference(s2, align=True)
0 None
1 POLYGON ((0.00000 2.00000, 2.00000 2.00000, 1....
2 MULTILINESTRING ((0.00000 0.00000, 1.00000 1.0...
3 LINESTRING EMPTY
4 POINT (0.00000 1.00000)
5 None
dtype: geometry
>>> s.difference(s2, align=False)
0 POLYGON ((0.00000 2.00000, 2.00000 2.00000, 1....
1 POLYGON ((0.00000 0.00000, 0.00000 2.00000, 1....
2 MULTILINESTRING ((0.00000 0.00000, 1.00000 1.0...
3 LINESTRING (2.00000 0.00000, 0.00000 2.00000)
4 POINT EMPTY
dtype: geometry
See Also
--------
GeoSeries.symmetric_difference
GeoSeries.union
GeoSeries.intersection
"""
return _binary_geo("difference", self, other, align)
def symmetric_difference(self, other, align=True):
"""Returns a ``GeoSeries`` of the symmetric difference of points in
each aligned geometry with `other`.
For each geometry, the symmetric difference consists of points in the
geometry not in `other`, and points in `other` not in the geometry.
.. image:: ../../../_static/binary_geo-symm_diff.svg
:align: center
The operation works on a 1-to-1 row-wise manner:
.. image:: ../../../_static/binary_op-01.svg
:align: center
Parameters
----------
other : Geoseries or geometric object
The Geoseries (elementwise) or geometric object to find the
symmetric difference to.
align : bool (default True)
If True, automatically aligns GeoSeries based on their indices.
If False, the order of elements is preserved.
Returns
-------
GeoSeries
Examples
--------
>>> from shapely.geometry import Polygon, LineString, Point
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (2, 2), (0, 2)]),
... Polygon([(0, 0), (2, 2), (0, 2)]),
... LineString([(0, 0), (2, 2)]),
... LineString([(2, 0), (0, 2)]),
... Point(0, 1),
... ],
... )
>>> s2 = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (1, 1), (0, 1)]),
... LineString([(1, 0), (1, 3)]),
... LineString([(2, 0), (0, 2)]),
... Point(1, 1),
... Point(0, 1),
... ],
... index=range(1, 6),
... )
>>> s
0 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
1 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
2 LINESTRING (0.00000 0.00000, 2.00000 2.00000)
3 LINESTRING (2.00000 0.00000, 0.00000 2.00000)
4 POINT (0.00000 1.00000)
dtype: geometry
>>> s2
1 POLYGON ((0.00000 0.00000, 1.00000 1.00000, 0....
2 LINESTRING (1.00000 0.00000, 1.00000 3.00000)
3 LINESTRING (2.00000 0.00000, 0.00000 2.00000)
4 POINT (1.00000 1.00000)
5 POINT (0.00000 1.00000)
dtype: geometry
We can do symmetric difference of each geometry and a single
shapely geometry:
.. image:: ../../../_static/binary_op-03.svg
:align: center
>>> s.symmetric_difference(Polygon([(0, 0), (1, 1), (0, 1)]))
0 POLYGON ((0.00000 2.00000, 2.00000 2.00000, 1....
1 POLYGON ((0.00000 2.00000, 2.00000 2.00000, 1....
2 GEOMETRYCOLLECTION (POLYGON ((0.00000 0.00000,...
3 GEOMETRYCOLLECTION (POLYGON ((0.00000 0.00000,...
4 POLYGON ((0.00000 1.00000, 1.00000 1.00000, 0....
dtype: geometry
We can also check two GeoSeries against each other, row by row.
The GeoSeries above have different indices. We can either align both GeoSeries
based on index values and compare elements with the same index using
``align=True`` or ignore index and compare elements based on their matching
order using ``align=False``:
.. image:: ../../../_static/binary_op-02.svg
>>> s.symmetric_difference(s2, align=True)
0 None
1 POLYGON ((0.00000 2.00000, 2.00000 2.00000, 1....
2 MULTILINESTRING ((0.00000 0.00000, 1.00000 1.0...
3 LINESTRING EMPTY
4 MULTIPOINT (0.00000 1.00000, 1.00000 1.00000)
5 None
dtype: geometry
>>> s.symmetric_difference(s2, align=False)
0 POLYGON ((0.00000 2.00000, 2.00000 2.00000, 1....
1 GEOMETRYCOLLECTION (POLYGON ((0.00000 0.00000,...
2 MULTILINESTRING ((0.00000 0.00000, 1.00000 1.0...
3 LINESTRING (2.00000 0.00000, 0.00000 2.00000)
4 POINT EMPTY
dtype: geometry
See Also
--------
GeoSeries.difference
GeoSeries.union
GeoSeries.intersection
"""
return _binary_geo("symmetric_difference", self, other, align)
def union(self, other, align=True):
"""Returns a ``GeoSeries`` of the union of points in each aligned geometry with
`other`.
.. image:: ../../../_static/binary_geo-union.svg
:align: center
The operation works on a 1-to-1 row-wise manner:
.. image:: ../../../_static/binary_op-01.svg
:align: center
Parameters
----------
other : Geoseries or geometric object
The Geoseries (elementwise) or geometric object to find the union
with.
align : bool (default True)
If True, automatically aligns GeoSeries based on their indices.
If False, the order of elements is preserved.
Returns
-------
GeoSeries
Examples
--------
>>> from shapely.geometry import Polygon, LineString, Point
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (2, 2), (0, 2)]),
... Polygon([(0, 0), (2, 2), (0, 2)]),
... LineString([(0, 0), (2, 2)]),
... LineString([(2, 0), (0, 2)]),
... Point(0, 1),
... ],
... )
>>> s2 = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (1, 1), (0, 1)]),
... LineString([(1, 0), (1, 3)]),
... LineString([(2, 0), (0, 2)]),
... Point(1, 1),
... Point(0, 1),
... ],
... index=range(1, 6),
... )
>>> s
0 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
1 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
2 LINESTRING (0.00000 0.00000, 2.00000 2.00000)
3 LINESTRING (2.00000 0.00000, 0.00000 2.00000)
4 POINT (0.00000 1.00000)
dtype: geometry
>>> s2
1 POLYGON ((0.00000 0.00000, 1.00000 1.00000, 0....
2 LINESTRING (1.00000 0.00000, 1.00000 3.00000)
3 LINESTRING (2.00000 0.00000, 0.00000 2.00000)
4 POINT (1.00000 1.00000)
5 POINT (0.00000 1.00000)
dtype: geometry
We can do union of each geometry and a single
shapely geometry:
.. image:: ../../../_static/binary_op-03.svg
:align: center
>>> s.union(Polygon([(0, 0), (1, 1), (0, 1)]))
0 POLYGON ((0.00000 0.00000, 0.00000 1.00000, 0....
1 POLYGON ((0.00000 0.00000, 0.00000 1.00000, 0....
2 GEOMETRYCOLLECTION (POLYGON ((0.00000 0.00000,...
3 GEOMETRYCOLLECTION (POLYGON ((0.00000 0.00000,...
4 POLYGON ((0.00000 1.00000, 1.00000 1.00000, 0....
dtype: geometry
We can also check two GeoSeries against each other, row by row.
The GeoSeries above have different indices. We can either align both GeoSeries
based on index values and compare elements with the same index using
``align=True`` or ignore index and compare elements based on their matching
order using ``align=False``:
.. image:: ../../../_static/binary_op-02.svg
>>> s.union(s2, align=True)
0 None
1 POLYGON ((0.00000 0.00000, 0.00000 1.00000, 0....
2 MULTILINESTRING ((0.00000 0.00000, 1.00000 1.0...
3 LINESTRING (2.00000 0.00000, 0.00000 2.00000)
4 MULTIPOINT (0.00000 1.00000, 1.00000 1.00000)
5 None
dtype: geometry
>>> s.union(s2, align=False)
0 POLYGON ((0.00000 0.00000, 0.00000 1.00000, 0....
1 GEOMETRYCOLLECTION (POLYGON ((0.00000 0.00000,...
2 MULTILINESTRING ((0.00000 0.00000, 1.00000 1.0...
3 LINESTRING (2.00000 0.00000, 0.00000 2.00000)
4 POINT (0.00000 1.00000)
dtype: geometry
See Also
--------
GeoSeries.symmetric_difference
GeoSeries.difference
GeoSeries.intersection
"""
return _binary_geo("union", self, other, align)
def intersection(self, other, align=True):
"""Returns a ``GeoSeries`` of the intersection of points in each
aligned geometry with `other`.
.. image:: ../../../_static/binary_geo-intersection.svg
:align: center
The operation works on a 1-to-1 row-wise manner:
.. image:: ../../../_static/binary_op-01.svg
:align: center
Parameters
----------
other : Geoseries or geometric object
The Geoseries (elementwise) or geometric object to find the
intersection with.
align : bool (default True)
If True, automatically aligns GeoSeries based on their indices.
If False, the order of elements is preserved.
Returns
-------
GeoSeries
Examples
--------
>>> from shapely.geometry import Polygon, LineString, Point
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (2, 2), (0, 2)]),
... Polygon([(0, 0), (2, 2), (0, 2)]),
... LineString([(0, 0), (2, 2)]),
... LineString([(2, 0), (0, 2)]),
... Point(0, 1),
... ],
... )
>>> s2 = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (1, 1), (0, 1)]),
... LineString([(1, 0), (1, 3)]),
... LineString([(2, 0), (0, 2)]),
... Point(1, 1),
... Point(0, 1),
... ],
... index=range(1, 6),
... )
>>> s
0 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
1 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
2 LINESTRING (0.00000 0.00000, 2.00000 2.00000)
3 LINESTRING (2.00000 0.00000, 0.00000 2.00000)
4 POINT (0.00000 1.00000)
dtype: geometry
>>> s2
1 POLYGON ((0.00000 0.00000, 1.00000 1.00000, 0....
2 LINESTRING (1.00000 0.00000, 1.00000 3.00000)
3 LINESTRING (2.00000 0.00000, 0.00000 2.00000)
4 POINT (1.00000 1.00000)
5 POINT (0.00000 1.00000)
dtype: geometry
We can also do intersection of each geometry and a single
shapely geometry:
.. image:: ../../../_static/binary_op-03.svg
:align: center
>>> s.intersection(Polygon([(0, 0), (1, 1), (0, 1)]))
0 POLYGON ((0.00000 0.00000, 0.00000 1.00000, 1....
1 POLYGON ((0.00000 0.00000, 0.00000 1.00000, 1....
2 LINESTRING (0.00000 0.00000, 1.00000 1.00000)
3 POINT (1.00000 1.00000)
4 POINT (0.00000 1.00000)
dtype: geometry
We can also check two GeoSeries against each other, row by row.
The GeoSeries above have different indices. We can either align both GeoSeries
based on index values and compare elements with the same index using
``align=True`` or ignore index and compare elements based on their matching
order using ``align=False``:
.. image:: ../../../_static/binary_op-02.svg
>>> s.intersection(s2, align=True)
0 None
1 POLYGON ((0.00000 0.00000, 0.00000 1.00000, 1....
2 POINT (1.00000 1.00000)
3 LINESTRING (2.00000 0.00000, 0.00000 2.00000)
4 POINT EMPTY
5 None
dtype: geometry
>>> s.intersection(s2, align=False)
0 POLYGON ((0.00000 0.00000, 0.00000 1.00000, 1....
1 LINESTRING (1.00000 1.00000, 1.00000 2.00000)
2 POINT (1.00000 1.00000)
3 POINT (1.00000 1.00000)
4 POINT (0.00000 1.00000)
dtype: geometry
See Also
--------
GeoSeries.difference
GeoSeries.symmetric_difference
GeoSeries.union
"""
return _binary_geo("intersection", self, other, align)
#
# Other operations
#
@property
def bounds(self):
"""Returns a ``DataFrame`` with columns ``minx``, ``miny``, ``maxx``,
``maxy`` values containing the bounds for each geometry.
See ``GeoSeries.total_bounds`` for the limits of the entire series.
Examples
--------
>>> from shapely.geometry import Point, Polygon, LineString
>>> d = {'geometry': [Point(2, 1), Polygon([(0, 0), (1, 1), (1, 0)]),
... LineString([(0, 1), (1, 2)])]}
>>> gdf = geopandas.GeoDataFrame(d, crs="EPSG:4326")
>>> gdf.bounds
minx miny maxx maxy
0 2.0 1.0 2.0 1.0
1 0.0 0.0 1.0 1.0
2 0.0 1.0 1.0 2.0
"""
bounds = GeometryArray(self.geometry.values).bounds
return DataFrame(
bounds, columns=["minx", "miny", "maxx", "maxy"], index=self.index
)
@property
def total_bounds(self):
"""Returns a tuple containing ``minx``, ``miny``, ``maxx``, ``maxy``
values for the bounds of the series as a whole.
See ``GeoSeries.bounds`` for the bounds of the geometries contained in
the series.
Examples
--------
>>> from shapely.geometry import Point, Polygon, LineString
>>> d = {'geometry': [Point(3, -1), Polygon([(0, 0), (1, 1), (1, 0)]),
... LineString([(0, 1), (1, 2)])]}
>>> gdf = geopandas.GeoDataFrame(d, crs="EPSG:4326")
>>> gdf.total_bounds
array([ 0., -1., 3., 2.])
"""
return GeometryArray(self.geometry.values).total_bounds
@property
def sindex(self):
"""Generate the spatial index
Creates R-tree spatial index based on ``pygeos.STRtree`` or
``rtree.index.Index``.
Note that the spatial index may not be fully
initialized until the first use.
Examples
--------
>>> from shapely.geometry import box
>>> s = geopandas.GeoSeries(geopandas.points_from_xy(range(5), range(5)))
>>> s
0 POINT (0.00000 0.00000)
1 POINT (1.00000 1.00000)
2 POINT (2.00000 2.00000)
3 POINT (3.00000 3.00000)
4 POINT (4.00000 4.00000)
dtype: geometry
Query the spatial index with a single geometry based on the bounding box:
>>> s.sindex.query(box(1, 1, 3, 3))
array([1, 2, 3])
Query the spatial index with a single geometry based on the predicate:
>>> s.sindex.query(box(1, 1, 3, 3), predicate="contains")
array([2])
Query the spatial index with an array of geometries based on the bounding
box:
>>> s2 = geopandas.GeoSeries([box(1, 1, 3, 3), box(4, 4, 5, 5)])
>>> s2
0 POLYGON ((3.00000 1.00000, 3.00000 3.00000, 1....
1 POLYGON ((5.00000 4.00000, 5.00000 5.00000, 4....
dtype: geometry
>>> s.sindex.query_bulk(s2)
array([[0, 0, 0, 1],
[1, 2, 3, 4]])
Query the spatial index with an array of geometries based on the predicate:
>>> s.sindex.query_bulk(s2, predicate="contains")
array([[0],
[2]])
"""
return self.geometry.values.sindex
@property
def has_sindex(self):
"""Check the existence of the spatial index without generating it.
Use the `.sindex` attribute on a GeoDataFrame or GeoSeries
to generate a spatial index if it does not yet exist,
which may take considerable time based on the underlying index
implementation.
Note that the underlying spatial index may not be fully
initialized until the first use.
Examples
--------
>>> from shapely.geometry import Point
>>> d = {'geometry': [Point(1, 2), Point(2, 1)]}
>>> gdf = geopandas.GeoDataFrame(d)
>>> gdf.has_sindex
False
>>> index = gdf.sindex
>>> gdf.has_sindex
True
Returns
-------
bool
`True` if the spatial index has been generated or
`False` if not.
"""
return self.geometry.values.has_sindex
def buffer(self, distance, resolution=16, **kwargs):
"""Returns a ``GeoSeries`` of geometries representing all points within
a given ``distance`` of each geometric object.
See http://shapely.readthedocs.io/en/latest/manual.html#object.buffer
for details.
Parameters
----------
distance : float, np.array, pd.Series
The radius of the buffer. If np.array or pd.Series are used
then it must have same length as the GeoSeries.
resolution : int (optional, default 16)
The resolution of the buffer around each vertex.
Examples
--------
>>> from shapely.geometry import Point, LineString, Polygon
>>> s = geopandas.GeoSeries(
... [
... Point(0, 0),
... LineString([(1, -1), (1, 0), (2, 0), (2, 1)]),
... Polygon([(3, -1), (4, 0), (3, 1)]),
... ]
... )
>>> s
0 POINT (0.00000 0.00000)
1 LINESTRING (1.00000 -1.00000, 1.00000 0.00000,...
2 POLYGON ((3.00000 -1.00000, 4.00000 0.00000, 3...
dtype: geometry
>>> s.buffer(0.2)
0 POLYGON ((0.20000 0.00000, 0.19904 -0.01960, 0...
1 POLYGON ((0.80000 0.00000, 0.80096 0.01960, 0....
2 POLYGON ((2.80000 -1.00000, 2.80000 1.00000, 2...
dtype: geometry
``**kwargs`` accept further specification as ``join_style`` and ``cap_style``.
See the following illustration of different options.
.. plot:: _static/code/buffer.py
"""
# TODO: update docstring based on pygeos after shapely 2.0
if isinstance(distance, pd.Series):
if not self.index.equals(distance.index):
raise ValueError(
"Index values of distance sequence does "
"not match index values of the GeoSeries"
)
distance = np.asarray(distance)
return _delegate_geo_method(
"buffer", self, distance, resolution=resolution, **kwargs
)
def simplify(self, *args, **kwargs):
"""Returns a ``GeoSeries`` containing a simplified representation of
each geometry.
The algorithm (Douglas-Peucker) recursively splits the original line
into smaller parts and connects these parts’ endpoints
by a straight line. Then, it removes all points whose distance
to the straight line is smaller than `tolerance`. It does not
move any points and it always preserves endpoints of
the original line or polygon.
See http://shapely.readthedocs.io/en/latest/manual.html#object.simplify
for details
Parameters
----------
tolerance : float
All parts of a simplified geometry will be no more than
`tolerance` distance from the original. It has the same units
as the coordinate reference system of the GeoSeries.
For example, using `tolerance=100` in a projected CRS with meters
as units means a distance of 100 meters in reality.
preserve_topology: bool (default True)
False uses a quicker algorithm, but may produce self-intersecting
or otherwise invalid geometries.
Notes
-----
Invalid geometric objects may result from simplification that does not
preserve topology and simplification may be sensitive to the order of
coordinates: two geometries differing only in order of coordinates may be
simplified differently.
Examples
--------
>>> from shapely.geometry import Point, LineString
>>> s = geopandas.GeoSeries(
... [Point(0, 0).buffer(1), LineString([(0, 0), (1, 10), (0, 20)])]
... )
>>> s
0 POLYGON ((1.00000 0.00000, 0.99518 -0.09802, 0...
1 LINESTRING (0.00000 0.00000, 1.00000 10.00000,...
dtype: geometry
>>> s.simplify(1)
0 POLYGON ((1.00000 0.00000, 0.00000 -1.00000, -...
1 LINESTRING (0.00000 0.00000, 0.00000 20.00000)
dtype: geometry
"""
return _delegate_geo_method("simplify", self, *args, **kwargs)
def relate(self, other, align=True):
"""
Returns the DE-9IM intersection matrices for the geometries
The operation works on a 1-to-1 row-wise manner:
.. image:: ../../../_static/binary_op-01.svg
:align: center
Parameters
----------
other : BaseGeometry or GeoSeries
The other geometry to computed
the DE-9IM intersection matrices from.
align : bool (default True)
If True, automatically aligns GeoSeries based on their indices.
If False, the order of elements is preserved.
Returns
----------
spatial_relations: Series of strings
The DE-9IM intersection matrices which describe
the spatial relations of the other geometry.
Examples
--------
>>> from shapely.geometry import Polygon, LineString, Point
>>> s = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (2, 2), (0, 2)]),
... Polygon([(0, 0), (2, 2), (0, 2)]),
... LineString([(0, 0), (2, 2)]),
... LineString([(2, 0), (0, 2)]),
... Point(0, 1),
... ],
... )
>>> s2 = geopandas.GeoSeries(
... [
... Polygon([(0, 0), (1, 1), (0, 1)]),
... LineString([(1, 0), (1, 3)]),
... LineString([(2, 0), (0, 2)]),
... Point(1, 1),
... Point(0, 1),
... ],
... index=range(1, 6),
... )
>>> s
0 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
1 POLYGON ((0.00000 0.00000, 2.00000 2.00000, 0....
2 LINESTRING (0.00000 0.00000, 2.00000 2.00000)
3 LINESTRING (2.00000 0.00000, 0.00000 2.00000)
4 POINT (0.00000 1.00000)
dtype: geometry
>>> s2
1 POLYGON ((0.00000 0.00000, 1.00000 1.00000, 0....
2 LINESTRING (1.00000 0.00000, 1.00000 3.00000)
3 LINESTRING (2.00000 0.00000, 0.00000 2.00000)
4 POINT (1.00000 1.00000)
5 POINT (0.00000 1.00000)
dtype: geometry
We can relate each geometry and a single
shapely geometry:
.. image:: ../../../_static/binary_op-03.svg
:align: center
>>> s.relate(Polygon([(0, 0), (1, 1), (0, 1)]))
0 212F11FF2
1 212F11FF2
2 F11F00212
3 F01FF0212
4 F0FFFF212
dtype: object
We can also check two GeoSeries against each other, row by row.
The GeoSeries above have different indices. We can either align both GeoSeries
based on index values and compare elements with the same index using
``align=True`` or ignore index and compare elements based on their matching
order using ``align=False``:
.. image:: ../../../_static/binary_op-02.svg
>>> s.relate(s2, align=True)
0 None
1 212F11FF2
2 0F1FF0102
3 1FFF0FFF2
4 FF0FFF0F2
5 None
dtype: object
>>> s.relate(s2, align=False)
0 212F11FF2
1 1F20F1102
2 0F1FF0102
3 0F1FF0FF2
4 0FFFFFFF2
dtype: object
"""
return _binary_op("relate", self, other, align)
def project(self, other, normalized=False, align=True):
"""
Return the distance along each geometry nearest to *other*
The operation works on a 1-to-1 row-wise manner:
.. image:: ../../../_static/binary_op-01.svg
:align: center
The project method is the inverse of interpolate.
Parameters
----------
other : BaseGeometry or GeoSeries
The *other* geometry to computed projected point from.
normalized : boolean
If normalized is True, return the distance normalized to
the length of the object.
align : bool (default True)
If True, automatically aligns GeoSeries based on their indices.
If False, the order of elements is preserved.
Returns
-------
Series
Examples
--------
>>> from shapely.geometry import LineString, Point
>>> s = geopandas.GeoSeries(
... [
... LineString([(0, 0), (2, 0), (0, 2)]),
... LineString([(0, 0), (2, 2)]),
... LineString([(2, 0), (0, 2)]),
... ],
... )
>>> s2 = geopandas.GeoSeries(
... [
... Point(1, 0),
... Point(1, 0),
... Point(2, 1),
... ],
... index=range(1, 4),
... )
>>> s
0 LINESTRING (0.00000 0.00000, 2.00000 0.00000, ...
1 LINESTRING (0.00000 0.00000, 2.00000 2.00000)
2 LINESTRING (2.00000 0.00000, 0.00000 2.00000)
dtype: geometry
>>> s2
1 POINT (1.00000 0.00000)
2 POINT (1.00000 0.00000)
3 POINT (2.00000 1.00000)
dtype: geometry
We can project each geometry on a single
shapely geometry:
.. image:: ../../../_static/binary_op-03.svg
:align: center
>>> s.project(Point(1, 0))
0 1.000000
1 0.707107
2 0.707107
dtype: float64
We can also check two GeoSeries against each other, row by row.
The GeoSeries above have different indices. We can either align both GeoSeries
based on index values and project elements with the same index using
``align=True`` or ignore index and project elements based on their matching
order using ``align=False``:
.. image:: ../../../_static/binary_op-02.svg
>>> s.project(s2, align=True)
0 NaN
1 0.707107
2 0.707107
3 NaN
dtype: float64
>>> s.project(s2, align=False)
0 1.000000
1 0.707107
2 0.707107
dtype: float64
See also
--------
GeoSeries.interpolate
"""
return _binary_op("project", self, other, normalized=normalized, align=align)
def interpolate(self, distance, normalized=False):
"""
Return a point at the specified distance along each geometry
Parameters
----------
distance : float or Series of floats
Distance(s) along the geometries at which a point should be
returned. If np.array or pd.Series are used then it must have
same length as the GeoSeries.
normalized : boolean
If normalized is True, distance will be interpreted as a fraction
of the geometric object's length.
"""
if isinstance(distance, pd.Series):
if not self.index.equals(distance.index):
raise ValueError(
"Index values of distance sequence does "
"not match index values of the GeoSeries"
)
distance = np.asarray(distance)
return _delegate_geo_method(
"interpolate", self, distance, normalized=normalized
)
def affine_transform(self, matrix):
"""Return a ``GeoSeries`` with translated geometries.
See http://shapely.readthedocs.io/en/stable/manual.html#shapely.affinity.affine_transform
for details.
Parameters
----------
matrix: List or tuple
6 or 12 items for 2D or 3D transformations respectively.
For 2D affine transformations,
the 6 parameter matrix is ``[a, b, d, e, xoff, yoff]``
For 3D affine transformations,
the 12 parameter matrix is ``[a, b, c, d, e, f, g, h, i, xoff, yoff, zoff]``
Examples
--------
>>> from shapely.geometry import Point, LineString, Polygon
>>> s = geopandas.GeoSeries(
... [
... Point(1, 1),
... LineString([(1, -1), (1, 0)]),
... Polygon([(3, -1), (4, 0), (3, 1)]),
... ]
... )
>>> s
0 POINT (1.00000 1.00000)
1 LINESTRING (1.00000 -1.00000, 1.00000 0.00000)
2 POLYGON ((3.00000 -1.00000, 4.00000 0.00000, 3...
dtype: geometry
>>> s.affine_transform([2, 3, 2, 4, 5, 2])
0 POINT (10.00000 8.00000)
1 LINESTRING (4.00000 0.00000, 7.00000 4.00000)
2 POLYGON ((8.00000 4.00000, 13.00000 10.00000, ...
dtype: geometry
""" # noqa (E501 link is longer than max line length)
return _delegate_geo_method("affine_transform", self, matrix)
def translate(self, xoff=0.0, yoff=0.0, zoff=0.0):
"""Returns a ``GeoSeries`` with translated geometries.
See http://shapely.readthedocs.io/en/latest/manual.html#shapely.affinity.translate
for details.
Parameters
----------
xoff, yoff, zoff : float, float, float
Amount of offset along each dimension.
xoff, yoff, and zoff for translation along the x, y, and z
dimensions respectively.
Examples
--------
>>> from shapely.geometry import Point, LineString, Polygon
>>> s = geopandas.GeoSeries(
... [
... Point(1, 1),
... LineString([(1, -1), (1, 0)]),
... Polygon([(3, -1), (4, 0), (3, 1)]),
... ]
... )
>>> s
0 POINT (1.00000 1.00000)
1 LINESTRING (1.00000 -1.00000, 1.00000 0.00000)
2 POLYGON ((3.00000 -1.00000, 4.00000 0.00000, 3...
dtype: geometry
>>> s.translate(2, 3)
0 POINT (3.00000 4.00000)
1 LINESTRING (3.00000 2.00000, 3.00000 3.00000)
2 POLYGON ((5.00000 2.00000, 6.00000 3.00000, 5....
dtype: geometry
""" # noqa (E501 link is longer than max line length)
return _delegate_geo_method("translate", self, xoff, yoff, zoff)
def rotate(self, angle, origin="center", use_radians=False):
"""Returns a ``GeoSeries`` with rotated geometries.
See http://shapely.readthedocs.io/en/latest/manual.html#shapely.affinity.rotate
for details.
Parameters
----------
angle : float
The angle of rotation can be specified in either degrees (default)
or radians by setting use_radians=True. Positive angles are
counter-clockwise and negative are clockwise rotations.
origin : string, Point, or tuple (x, y)
The point of origin can be a keyword 'center' for the bounding box
center (default), 'centroid' for the geometry's centroid, a Point
object or a coordinate tuple (x, y).
use_radians : boolean
Whether to interpret the angle of rotation as degrees or radians
Examples
--------
>>> from shapely.geometry import Point, LineString, Polygon
>>> s = geopandas.GeoSeries(
... [
... Point(1, 1),
... LineString([(1, -1), (1, 0)]),
... Polygon([(3, -1), (4, 0), (3, 1)]),
... ]
... )
>>> s
0 POINT (1.00000 1.00000)
1 LINESTRING (1.00000 -1.00000, 1.00000 0.00000)
2 POLYGON ((3.00000 -1.00000, 4.00000 0.00000, 3...
dtype: geometry
>>> s.rotate(90)
0 POINT (1.00000 1.00000)
1 LINESTRING (1.50000 -0.50000, 0.50000 -0.50000)
2 POLYGON ((4.50000 -0.50000, 3.50000 0.50000, 2...
dtype: geometry
>>> s.rotate(90, origin=(0, 0))
0 POINT (-1.00000 1.00000)
1 LINESTRING (1.00000 1.00000, 0.00000 1.00000)
2 POLYGON ((1.00000 3.00000, 0.00000 4.00000, -1...
dtype: geometry
"""
return _delegate_geo_method(
"rotate", self, angle, origin=origin, use_radians=use_radians
)
def scale(self, xfact=1.0, yfact=1.0, zfact=1.0, origin="center"):
"""Returns a ``GeoSeries`` with scaled geometries.
The geometries can be scaled by different factors along each
dimension. Negative scale factors will mirror or reflect coordinates.
See http://shapely.readthedocs.io/en/latest/manual.html#shapely.affinity.scale
for details.
Parameters
----------
xfact, yfact, zfact : float, float, float
Scaling factors for the x, y, and z dimensions respectively.
origin : string, Point, or tuple
The point of origin can be a keyword 'center' for the 2D bounding
box center (default), 'centroid' for the geometry's 2D centroid, a
Point object or a coordinate tuple (x, y, z).
Examples
--------
>>> from shapely.geometry import Point, LineString, Polygon
>>> s = geopandas.GeoSeries(
... [
... Point(1, 1),
... LineString([(1, -1), (1, 0)]),
... Polygon([(3, -1), (4, 0), (3, 1)]),
... ]
... )
>>> s
0 POINT (1.00000 1.00000)
1 LINESTRING (1.00000 -1.00000, 1.00000 0.00000)
2 POLYGON ((3.00000 -1.00000, 4.00000 0.00000, 3...
dtype: geometry
>>> s.scale(2, 3)
0 POINT (1.00000 1.00000)
1 LINESTRING (1.00000 -2.00000, 1.00000 1.00000)
2 POLYGON ((2.50000 -3.00000, 4.50000 0.00000, 2...
dtype: geometry
>>> s.scale(2, 3, origin=(0, 0))
0 POINT (2.00000 3.00000)
1 LINESTRING (2.00000 -3.00000, 2.00000 0.00000)
2 POLYGON ((6.00000 -3.00000, 8.00000 0.00000, 6...
dtype: geometry
"""
return _delegate_geo_method("scale", self, xfact, yfact, zfact, origin=origin)
def skew(self, xs=0.0, ys=0.0, origin="center", use_radians=False):
"""Returns a ``GeoSeries`` with skewed geometries.
The geometries are sheared by angles along the x and y dimensions.
See http://shapely.readthedocs.io/en/latest/manual.html#shapely.affinity.skew
for details.
Parameters
----------
xs, ys : float, float
The shear angle(s) for the x and y axes respectively. These can be
specified in either degrees (default) or radians by setting
use_radians=True.
origin : string, Point, or tuple (x, y)
The point of origin can be a keyword 'center' for the bounding box
center (default), 'centroid' for the geometry's centroid, a Point
object or a coordinate tuple (x, y).
use_radians : boolean
Whether to interpret the shear angle(s) as degrees or radians
Examples
--------
>>> from shapely.geometry import Point, LineString, Polygon
>>> s = geopandas.GeoSeries(
... [
... Point(1, 1),
... LineString([(1, -1), (1, 0)]),
... Polygon([(3, -1), (4, 0), (3, 1)]),
... ]
... )
>>> s
0 POINT (1.00000 1.00000)
1 LINESTRING (1.00000 -1.00000, 1.00000 0.00000)
2 POLYGON ((3.00000 -1.00000, 4.00000 0.00000, 3...
dtype: geometry
>>> s.skew(45, 30)
0 POINT (1.00000 1.00000)
1 LINESTRING (0.50000 -1.00000, 1.50000 0.00000)
2 POLYGON ((2.00000 -1.28868, 4.00000 0.28868, 4...
dtype: geometry
>>> s.skew(45, 30, origin=(0, 0))
0 POINT (2.00000 1.57735)
1 LINESTRING (0.00000 -0.42265, 1.00000 0.57735)
2 POLYGON ((2.00000 0.73205, 4.00000 2.30940, 4....
dtype: geometry
"""
return _delegate_geo_method(
"skew", self, xs, ys, origin=origin, use_radians=use_radians
)
@property
def cx(self):
"""
Coordinate based indexer to select by intersection with bounding box.
Format of input should be ``.cx[xmin:xmax, ymin:ymax]``. Any of
``xmin``, ``xmax``, ``ymin``, and ``ymax`` can be provided, but input
must include a comma separating x and y slices. That is, ``.cx[:, :]``
will return the full series/frame, but ``.cx[:]`` is not implemented.
Examples
--------
>>> from shapely.geometry import LineString, Point
>>> s = geopandas.GeoSeries(
... [Point(0, 0), Point(1, 2), Point(3, 3), LineString([(0, 0), (3, 3)])]
... )
>>> s
0 POINT (0.00000 0.00000)
1 POINT (1.00000 2.00000)
2 POINT (3.00000 3.00000)
3 LINESTRING (0.00000 0.00000, 3.00000 3.00000)
dtype: geometry
>>> s.cx[0:1, 0:1]
0 POINT (0.00000 0.00000)
3 LINESTRING (0.00000 0.00000, 3.00000 3.00000)
dtype: geometry
>>> s.cx[:, 1:]
1 POINT (1.00000 2.00000)
2 POINT (3.00000 3.00000)
3 LINESTRING (0.00000 0.00000, 3.00000 3.00000)
dtype: geometry
"""
return _CoordinateIndexer(self)
def equals(self, other):
"""
Test whether two objects contain the same elements.
This function allows two GeoSeries or GeoDataFrames to be compared
against each other to see if they have the same shape and elements.
Missing values in the same location are considered equal. The
row/column index do not need to have the same type (as long as the
values are still considered equal), but the dtypes of the respective
columns must be the same.
Parameters
----------
other : GeoSeries or GeoDataFrame
The other GeoSeries or GeoDataFrame to be compared with the first.
Returns
-------
bool
True if all elements are the same in both objects, False
otherwise.
"""
# we override this because pandas is using `self._constructor` in the
# isinstance check (https://github.com/geopandas/geopandas/issues/1420)
if not isinstance(other, type(self)):
return False
return self._data.equals(other._data)
class _CoordinateIndexer(object):
# see docstring GeoPandasBase.cx property above
def __init__(self, obj):
self.obj = obj
def __getitem__(self, key):
obj = self.obj
xs, ys = key
# handle numeric values as x and/or y coordinate index
if type(xs) is not slice:
xs = slice(xs, xs)
if type(ys) is not slice:
ys = slice(ys, ys)
# don't know how to handle step; should this raise?
if xs.step is not None or ys.step is not None:
warn("Ignoring step - full interval is used.")
if xs.start is None or xs.stop is None or ys.start is None or ys.stop is None:
xmin, ymin, xmax, ymax = obj.total_bounds
bbox = box(
xs.start if xs.start is not None else xmin,
ys.start if ys.start is not None else ymin,
xs.stop if xs.stop is not None else xmax,
ys.stop if ys.stop is not None else ymax,
)
idx = obj.intersects(bbox)
return obj[idx]
|
bsd-3-clause
|
tks0123456789/kaggle-Otto
|
exp_NN3_TRI_max_epochs.py
|
2
|
5997
|
"""
Experiment for TRI + NN3
Aim: To find the best max_epochs for TRI(k_min = 2, k_max = 4,5) + NN3(1024, 1024, 1024)
max_epochs: [22, 24, ... ,98, 100]
Averaging 20 models
Summary
epochs loss
k_min k_max
2 4 76 0.421093
5 86 0.420173
Time: 5:04:31 on i7-4790k 32G MEM GTX660
"""
import numpy as np
import scipy as sp
import pandas as pd
from pylearn2.models import mlp
from pylearn2.models.mlp import RectifiedLinear, Softmax, MLP
from pylearn2.costs.mlp.dropout import Dropout
from pylearn2.training_algorithms import sgd, learning_rule
from pylearn2.termination_criteria import EpochCounter
from pylearn2.datasets import DenseDesignMatrix
from pylearn2.train import Train
from theano.compat.python2x import OrderedDict
import theano.tensor as T
from theano import function
import pickle
import sklearn.preprocessing as pp
from sklearn import cross_validation
from sklearn.cross_validation import StratifiedKFold
from sklearn.preprocessing import scale
from sklearn.metrics import log_loss
from sklearn.grid_search import ParameterGrid
from datetime import datetime
import os
from utility import *
from predict import predict
import pylab
path = os.getcwd() + '/'
path_log = path + 'logs/'
file_train = path + 'train.csv'
training = pd.read_csv(file_train, index_col = 0)
num_train = training.shape[0]
y = training['target'].values
yMat = pd.get_dummies(training['target']).values
X = training.iloc[:,:93].values
scaler = pp.StandardScaler()
kf = cross_validation.StratifiedKFold(y, n_folds=5, shuffle = True, random_state = 345)
for train_idx, valid_idx in kf:
break
y_train = yMat[train_idx]
y_valid = yMat[valid_idx]
# [l1, l2, l3, output]
# params: k_min, k_max, epochs
nIter = 20
m = 130
po = .6
epochs = 20
epochs_add = 2
n_add = 40
bs = 64
mm = .97
lr = .01
dim = 1024
ir = .05
ip = .8
ir_out = .05
mcn_out = 3.5
scores = []
param_grid = {'k_min': [2], 'k_max': [4, 5]}
t0 = datetime.now()
for params in ParameterGrid(param_grid):
k_min, k_max = params['k_min'], params['k_max']
predAll = [np.zeros(y_valid.shape) for s in range(n_add)]
for i in range(nIter):
seed = i + 9198
R = col_k_ones_matrix(X.shape[1], m, k_min = k_min, k_max = k_max, seed = seed)
np.random.seed(seed + 33)
R.data = np.random.choice([1, -1], R.data.size)
X3 = X * R
X1 = np.sign(X3) * np.abs(X3) ** po
X2 = scaler.fit_transform(X1)
training = DenseDesignMatrix(X = X2[train_idx], y = yMat[train_idx])
l1 = RectifiedLinear(layer_name='l1', irange = ir, dim = dim, max_col_norm = 1.)
l2 = RectifiedLinear(layer_name='l2', irange = ir, dim = dim, max_col_norm = 1.)
l3 = RectifiedLinear(layer_name='l3', irange = ir, dim = dim, max_col_norm = 1.)
output = Softmax(layer_name='y', n_classes = 9, irange = ir,
max_col_norm = mcn_out)
mdl = MLP([l1, l2, l3, output], nvis = X2.shape[1])
trainer = sgd.SGD(learning_rate=lr,
batch_size=bs,
learning_rule=learning_rule.Momentum(mm),
cost=Dropout(default_input_include_prob=ip,
default_input_scale=1/ip),
termination_criterion=EpochCounter(epochs),seed = seed)
decay = sgd.LinearDecayOverEpoch(start=2, saturate=20, decay_factor= .1)
experiment = Train(dataset = training, model=mdl, algorithm=trainer, extensions=[decay])
experiment.main_loop()
epochs_current = epochs
for s in range(n_add):
trainer = sgd.SGD(learning_rate=lr * .1,
batch_size=bs,
learning_rule=learning_rule.Momentum(mm),
cost=Dropout(default_input_include_prob=ip,
default_input_scale=1/ip),
termination_criterion=EpochCounter(epochs_add),seed = seed)
experiment = Train(dataset = training, model=mdl, algorithm=trainer)
experiment.main_loop()
epochs_current += epochs_add
pred0 = predict(mdl, X2[train_idx].astype(np.float32))
pred1 = predict(mdl, X2[valid_idx].astype(np.float32))
predAll[s] += pred1
scores.append({'k_min':k_min, 'k_max':k_max,
'epochs':epochs_current, 'nModels':i + 1, 'seed':seed,
'valid':log_loss(y_valid, pred1),
'train':log_loss(y_train, pred0),
'valid_avg':log_loss(y_valid, predAll[s] / (i + 1))})
print scores[-1], datetime.now() - t0
df = pd.DataFrame(scores)
if os.path.exists(path_log) is False:
print 'mkdir', path_log
os.mkdir(path_log)
df.to_csv(path_log + 'exp_NN3_TRI_max_epochs.csv')
keys = ['k_min', 'k_max', 'epochs']
grouped = df.groupby(keys)
print 'Best'
print pd.DataFrame({'epochs':grouped['valid_avg'].last().unstack().idxmin(1),
'loss':grouped['valid_avg'].last().unstack().min(1)})
# epochs loss
# k_min k_max
# 2 4 76 0.421093
# 5 86 0.420173
# Figure for k_max == 4
grouped = df[df['k_max'] == 4].groupby('epochs')
g = grouped[['train', 'valid']].mean()
g['valid_avg'] = grouped['valid_avg'].last()
print g.iloc[[0,1,26,27,28,38,39],:]
# train valid valid_avg
# epochs
# 22 0.280855 0.478790 0.431065
# 24 0.274300 0.479380 0.430083
# 74 0.173661 0.504325 0.422263
# 76 0.170654 0.505458 0.421093
# 78 0.167444 0.506752 0.421296
# 98 0.142868 0.519850 0.422619
# 100 0.140718 0.521398 0.422675
ax = g.plot()
ax.set_title('TRI+NN3 k_min=2, k_max=4')
ax.set_ylabel('Logloss')
fig = ax.get_figure()
fig.savefig(path_log + 'exp_NN3_TRI_max_epochs.png')
|
mit
|
cathyyul/sumo
|
docs/tutorial/san_pablo_dam/data/analyzeData.py
|
6
|
3287
|
import sys
import os
import math
import numpy as np
def getAttr(line, which):
beg = line.find(which)
beg = line.find('"', beg)
end = line.find('"', beg+1)
return line[beg+1:end]
# this is from here: http://code.activestate.com/recipes/389639
class Ddict(dict):
def __init__(self, default=None):
self.default = default
def __getitem__(self, key):
if not self.has_key(key):
self[key] = self.default()
return dict.__getitem__(self, key)
# os.system('run-an-external-command')
# os.getcwd()
# os.chdir()
f = open(sys.argv[1],'r')
data = f.readlines()
f.close()
dd = Ddict( lambda: Ddict( lambda: 0))
# f1 = open('raw-results.txt','w')
f1 = open('tmp.txt','w')
for i in range(1,len(data)):
if data[i].find('<interval')!=-1:
ll = data[i].split('"')
nn = int(getAttr(data[i], "nVehContrib"))#int(ll[7])
lane = int(getAttr(data[i], "id")[-1:])#int(ll[5])
tt = float(getAttr(data[i], "begin"))#float(ll[1])
itt = int(tt)
if nn>0:
print >> f1,tt,lane,nn,ll[9],ll[11],ll[13],ll[15]
dd[itt][lane] = nn
f1.close()
maxLanes = 0
dt2OneHour = 6.0
for t in dd.iterkeys():
if len(dd[t])>maxLanes:
maxLanes = len(dd[t])
tVec = np.zeros( len(dd), dtype=int)
QVec = np.zeros( len(dd), dtype=int)
xVec = np.zeros( (len(dd), maxLanes), dtype=float)
qVec = np.zeros( (len(dd), maxLanes), dtype=float)
vecIndx = 0
f = open('lane-shares.txt','w')
#for t,v in dd.items():
for t in sorted(dd.iterkeys()):
# qTot = math.fsum(dd[t])
qTot = sum(dd[t].values())
nrm = 0.0
if qTot:
nrm = 1.0/qTot
s = repr(t) + ' ' + repr(qTot) + ' '
tVec[vecIndx] = t
QVec[vecIndx] = dt2OneHour*qTot
for lane in range(maxLanes):
share = 0.0
if dd[t].has_key(lane):
share = nrm*dd[t][lane]
s = s + repr(share) + ' '
xVec[vecIndx,lane] = share
qVec[vecIndx,lane] = dt2OneHour*dd[t][lane]
# print >> f,t,qTot,lane,share
vecIndx += 1
print >> f, s
f.close()
try:
import matplotlib.pyplot as plt
plt.rcParams['xtick.direction'] = 'out'
plt.rcParams['ytick.direction'] = 'out'
# y =
n = len(qVec)
for lane in range(maxLanes):
desc = 'lane: ' + repr(lane)
plt.plot(tVec, qVec[range(n),lane], label=desc)
# plt.plot(tVec, qVec[range(n),0], 'r-',tVec, qVec[range(n),1], 'g-',tVec, qVec[range(n),2], 'b-')
# plt.plot(tVec, QVec, 'r-')
plt.ylabel('lane flows')
plt.xlabel('time [s]')
plt.legend()
bname = 'flows-over-time-' + repr(maxLanes)
plt.savefig(bname+'.eps')
plt.savefig(bname+'.pdf')
plt.savefig(bname+'.png')
plt.savefig(bname+'.svg')
# try:
# import pyemf
# plt.savefig('shares-over-time.emf')
# except :
# print '# no emf support'
# plt.show()
plt.close()
# ## next plot:
for lane in range(maxLanes):
desc = 'lane: ' + repr(lane)
plt.plot(QVec, xVec[range(n),lane], 'o', markersize=10, label=desc)
# plt.plot(tVec, qVec[range(n),0], 'r-',tVec, qVec[range(n),1], 'g-',tVec, qVec[range(n),2], 'b-')
# plt.plot(tVec, QVec, 'r-')
plt.ylabel('lane shares')
plt.xlabel('total flow [veh/h]')
plt.legend()
bname = 'shares-vs-flow-' + repr(maxLanes)
plt.savefig(bname+'.eps')
plt.savefig(bname+'.pdf')
plt.savefig(bname+'.png')
plt.savefig(bname+'.svg')
# plt.show()
plt.close()
except ImportError:
print 'no matplotlib, falling back to gnuplot'
os.system('gnuplot do-some-plots.gnu')
|
gpl-3.0
|
UNR-AERIAL/scikit-learn
|
examples/linear_model/plot_theilsen.py
|
232
|
3615
|
"""
====================
Theil-Sen Regression
====================
Computes a Theil-Sen Regression on a synthetic dataset.
See :ref:`theil_sen_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the Theil-Sen
estimator is robust against outliers. It has a breakdown point of about 29.3%
in case of a simple linear regression which means that it can tolerate
arbitrary corrupted data (outliers) of up to 29.3% in the two-dimensional
case.
The estimation of the model is done by calculating the slopes and intercepts
of a subpopulation of all possible combinations of p subsample points. If an
intercept is fitted, p must be greater than or equal to n_features + 1. The
final slope and intercept is then defined as the spatial median of these
slopes and intercepts.
In certain cases Theil-Sen performs better than :ref:`RANSAC
<ransac_regression>` which is also a robust method. This is illustrated in the
second example below where outliers with respect to the x-axis perturb RANSAC.
Tuning the ``residual_threshold`` parameter of RANSAC remedies this but in
general a priori knowledge about the data and the nature of the outliers is
needed.
Due to the computational complexity of Theil-Sen it is recommended to use it
only for small problems in terms of number of samples and features. For larger
problems the ``max_subpopulation`` parameter restricts the magnitude of all
possible combinations of p subsample points to a randomly chosen subset and
therefore also limits the runtime. Therefore, Theil-Sen is applicable to larger
problems with the drawback of losing some of its mathematical properties since
it then works on a random subset.
"""
# Author: Florian Wilhelm -- <florian.wilhelm@gmail.com>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model import RANSACRegressor
print(__doc__)
estimators = [('OLS', LinearRegression()),
('Theil-Sen', TheilSenRegressor(random_state=42)),
('RANSAC', RANSACRegressor(random_state=42)), ]
##############################################################################
# Outliers only in the y direction
np.random.seed(0)
n_samples = 200
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
w = 3.
c = 2.
noise = 0.1 * np.random.randn(n_samples)
y = w * x + c + noise
# 10% outliers
y[-20:] += -20 * x[-20:]
X = x[:, np.newaxis]
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 3])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
##############################################################################
# Outliers in the X direction
np.random.seed(0)
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
noise = 0.1 * np.random.randn(n_samples)
y = 3 * x + 2 + noise
# 10% outliers
x[-20:] = 9.9
y[-20:] += 22
X = x[:, np.newaxis]
plt.figure()
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 10])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
plt.show()
|
bsd-3-clause
|
CarterBain/AlephNull
|
alephnull/finance/trading.py
|
1
|
10824
|
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import bisect
import logbook
import datetime
import pandas as pd
from alephnull.data.loader import load_market_data
from alephnull.utils import tradingcalendar
from alephnull.utils.tradingcalendar import get_early_closes
log = logbook.Logger('Trading')
# The financial simulations in zipline depend on information
# about the benchmark index and the risk free rates of return.
# The benchmark index defines the benchmark returns used in
# the calculation of performance metrics such as alpha/beta. Many
# components, including risk, performance, transforms, and
# batch_transforms, need access to a calendar of trading days and
# market hours. The TradingEnvironment maintains two time keeping
# facilities:
# - a DatetimeIndex of trading days for calendar calculations
# - a timezone name, which should be local to the exchange
# hosting the benchmark index. All dates are normalized to UTC
# for serialization and storage, and the timezone is used to
# ensure proper rollover through daylight savings and so on.
#
# This module maintains a global variable, environment, which is
# subsequently referenced directly by zipline financial
# components. To set the environment, you can set the property on
# the module directly:
# from zipline.finance import trading
# trading.environment = TradingEnvironment()
#
# or if you want to switch the environment for a limited context
# you can use a TradingEnvironment in a with clause:
# lse = TradingEnvironment(bm_index="^FTSE", exchange_tz="Europe/London")
# with lse:
# # the code here will have lse as the global trading.environment
# algo.run(start, end)
#
# User code will not normally need to use TradingEnvironment
# directly. If you are extending zipline's core financial
# compponents and need to use the environment, you must import the module
# NOT the variable. If you import the module, you will get a
# reference to the environment at import time, which will prevent
# your code from responding to user code that changes the global
# state.
environment = None
class TradingEnvironment(object):
def __init__(
self,
load=None,
bm_symbol='^GSPC',
exchange_tz="US/Eastern",
max_date=None,
extra_dates=None
):
self.prev_environment = self
self.bm_symbol = bm_symbol
if not load:
load = load_market_data
self.benchmark_returns, treasury_curves_map = \
load(self.bm_symbol)
self.treasury_curves = pd.DataFrame(treasury_curves_map).T
if max_date:
self.treasury_curves = self.treasury_curves.ix[:max_date, :]
self.full_trading_day = datetime.timedelta(hours=6, minutes=30)
self.early_close_trading_day = datetime.timedelta(hours=3, minutes=30)
self.exchange_tz = exchange_tz
bi = self.benchmark_returns.index
if max_date:
self.trading_days = bi[bi <= max_date].copy()
else:
self.trading_days = bi.copy()
if len(self.benchmark_returns) and extra_dates:
for extra_date in extra_dates:
extra_date = extra_date.replace(hour=0, minute=0, second=0,
microsecond=0)
if extra_date not in self.trading_days:
self.trading_days = self.trading_days + \
pd.DatetimeIndex([extra_date])
self.first_trading_day = self.trading_days[0]
self.last_trading_day = self.trading_days[-1]
self.early_closes = get_early_closes(self.first_trading_day,
self.last_trading_day)
self.open_and_closes = tradingcalendar.open_and_closes.ix[
self.trading_days]
def __enter__(self, *args, **kwargs):
global environment
self.prev_environment = environment
environment = self
# return value here is associated with "as such_and_such" on the
# with clause.
return self
def __exit__(self, exc_type, exc_val, exc_tb):
global environment
environment = self.prev_environment
# signal that any exceptions need to be propagated up the
# stack.
return False
def normalize_date(self, test_date):
test_date = pd.Timestamp(test_date, tz='UTC')
return pd.tseries.tools.normalize_date(test_date)
def utc_dt_in_exchange(self, dt):
return pd.Timestamp(dt).tz_convert(self.exchange_tz)
def exchange_dt_in_utc(self, dt):
return pd.Timestamp(dt, tz=self.exchange_tz).tz_convert('UTC')
def is_market_hours(self, test_date):
if not self.is_trading_day(test_date):
return False
mkt_open, mkt_close = self.get_open_and_close(test_date)
return test_date >= mkt_open and test_date <= mkt_close
def is_trading_day(self, test_date):
dt = self.normalize_date(test_date)
return (dt in self.trading_days)
def next_trading_day(self, test_date):
dt = self.normalize_date(test_date)
delta = datetime.timedelta(days=1)
while dt <= self.last_trading_day:
dt += delta
if dt in self.trading_days:
return dt
return None
def days_in_range(self, start, end):
mask = ((self.trading_days >= start) &
(self.trading_days <= end))
return self.trading_days[mask]
def next_open_and_close(self, start_date):
"""
Given the start_date, returns the next open and close of
the market.
"""
next_open = self.next_trading_day(start_date)
if next_open is None:
raise Exception(
"Attempt to backtest beyond available history. \
Last successful date: %s" % self.last_trading_day)
return self.get_open_and_close(next_open)
def get_open_and_close(self, day):
todays_minutes = self.open_and_closes.ix[day.date()]
return todays_minutes['market_open'], todays_minutes['market_close']
def market_minutes_for_day(self, midnight):
market_open, market_close = self.get_open_and_close(midnight)
return pd.date_range(market_open, market_close, freq='T')
def trading_day_distance(self, first_date, second_date):
first_date = self.normalize_date(first_date)
second_date = self.normalize_date(second_date)
# TODO: May be able to replace the following with searchsorted.
# Find leftmost item greater than or equal to day
i = bisect.bisect_left(self.trading_days, first_date)
if i == len(self.trading_days): # nothing found
return None
j = bisect.bisect_left(self.trading_days, second_date)
if j == len(self.trading_days):
return None
return j - i
def get_index(self, dt):
"""
Return the index of the given @dt, or the index of the preceding
trading day if the given dt is not in the trading calendar.
"""
ndt = self.normalize_date(dt)
if ndt in self.trading_days:
return self.trading_days.searchsorted(ndt)
else:
return self.trading_days.searchsorted(ndt) - 1
class SimulationParameters(object):
def __init__(self, period_start, period_end,
capital_base=10e3,
emission_rate='daily',
data_frequency='daily'):
global environment
if not environment:
# This is the global environment for trading simulation.
environment = TradingEnvironment()
self.period_start = period_start
self.period_end = period_end
self.capital_base = capital_base
self.emission_rate = emission_rate
self.data_frequency = data_frequency
assert self.period_start <= self.period_end, \
"Period start falls after period end."
assert self.period_start <= environment.last_trading_day, \
"Period start falls after the last known trading day."
assert self.period_end >= environment.first_trading_day, \
"Period end falls before the first known trading day."
self.first_open = self.calculate_first_open()
self.last_close = self.calculate_last_close()
start_index = \
environment.get_index(self.first_open)
end_index = environment.get_index(self.last_close)
# take an inclusive slice of the environment's
# trading_days.
self.trading_days = \
environment.trading_days[start_index:end_index + 1]
def calculate_first_open(self):
"""
Finds the first trading day on or after self.period_start.
"""
first_open = self.period_start
one_day = datetime.timedelta(days=1)
while not environment.is_trading_day(first_open):
first_open = first_open + one_day
mkt_open, _ = environment.get_open_and_close(first_open)
return mkt_open
def calculate_last_close(self):
"""
Finds the last trading day on or before self.period_end
"""
last_close = self.period_end
one_day = datetime.timedelta(days=1)
while not environment.is_trading_day(last_close):
last_close = last_close - one_day
_, mkt_close = environment.get_open_and_close(last_close)
return mkt_close
@property
def days_in_period(self):
"""return the number of trading days within the period [start, end)"""
return len(self.trading_days)
def __repr__(self):
return """
{class_name}(
period_start={period_start},
period_end={period_end},
capital_base={capital_base},
data_frequency={data_frequency},
emission_rate={emission_rate},
first_open={first_open},
last_close={last_close})\
""".format(class_name=self.__class__.__name__,
period_start=self.period_start,
period_end=self.period_end,
capital_base=self.capital_base,
data_frequency=self.data_frequency,
emission_rate=self.emission_rate,
first_open=self.first_open,
last_close=self.last_close)
|
apache-2.0
|
GuessWhoSamFoo/pandas
|
pandas/tests/groupby/test_rank.py
|
3
|
13101
|
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Series, concat
from pandas.util import testing as tm
def test_rank_apply():
lev1 = tm.rands_array(10, 100)
lev2 = tm.rands_array(10, 130)
lab1 = np.random.randint(0, 100, size=500)
lab2 = np.random.randint(0, 130, size=500)
df = DataFrame({'value': np.random.randn(500),
'key1': lev1.take(lab1),
'key2': lev2.take(lab2)})
result = df.groupby(['key1', 'key2']).value.rank()
expected = [piece.value.rank()
for key, piece in df.groupby(['key1', 'key2'])]
expected = concat(expected, axis=0)
expected = expected.reindex(result.index)
tm.assert_series_equal(result, expected)
result = df.groupby(['key1', 'key2']).value.rank(pct=True)
expected = [piece.value.rank(pct=True)
for key, piece in df.groupby(['key1', 'key2'])]
expected = concat(expected, axis=0)
expected = expected.reindex(result.index)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("grps", [
['qux'], ['qux', 'quux']])
@pytest.mark.parametrize("vals", [
[2, 2, 8, 2, 6],
[pd.Timestamp('2018-01-02'), pd.Timestamp('2018-01-02'),
pd.Timestamp('2018-01-08'), pd.Timestamp('2018-01-02'),
pd.Timestamp('2018-01-06')]])
@pytest.mark.parametrize("ties_method,ascending,pct,exp", [
('average', True, False, [2., 2., 5., 2., 4.]),
('average', True, True, [0.4, 0.4, 1.0, 0.4, 0.8]),
('average', False, False, [4., 4., 1., 4., 2.]),
('average', False, True, [.8, .8, .2, .8, .4]),
('min', True, False, [1., 1., 5., 1., 4.]),
('min', True, True, [0.2, 0.2, 1.0, 0.2, 0.8]),
('min', False, False, [3., 3., 1., 3., 2.]),
('min', False, True, [.6, .6, .2, .6, .4]),
('max', True, False, [3., 3., 5., 3., 4.]),
('max', True, True, [0.6, 0.6, 1.0, 0.6, 0.8]),
('max', False, False, [5., 5., 1., 5., 2.]),
('max', False, True, [1., 1., .2, 1., .4]),
('first', True, False, [1., 2., 5., 3., 4.]),
('first', True, True, [0.2, 0.4, 1.0, 0.6, 0.8]),
('first', False, False, [3., 4., 1., 5., 2.]),
('first', False, True, [.6, .8, .2, 1., .4]),
('dense', True, False, [1., 1., 3., 1., 2.]),
('dense', True, True, [1. / 3., 1. / 3., 3. / 3., 1. / 3., 2. / 3.]),
('dense', False, False, [3., 3., 1., 3., 2.]),
('dense', False, True, [3. / 3., 3. / 3., 1. / 3., 3. / 3., 2. / 3.]),
])
def test_rank_args(grps, vals, ties_method, ascending, pct, exp):
key = np.repeat(grps, len(vals))
vals = vals * len(grps)
df = DataFrame({'key': key, 'val': vals})
result = df.groupby('key').rank(method=ties_method,
ascending=ascending, pct=pct)
exp_df = DataFrame(exp * len(grps), columns=['val'])
tm.assert_frame_equal(result, exp_df)
@pytest.mark.parametrize("grps", [
['qux'], ['qux', 'quux']])
@pytest.mark.parametrize("vals", [
[-np.inf, -np.inf, np.nan, 1., np.nan, np.inf, np.inf],
])
@pytest.mark.parametrize("ties_method,ascending,na_option,exp", [
('average', True, 'keep', [1.5, 1.5, np.nan, 3, np.nan, 4.5, 4.5]),
('average', True, 'top', [3.5, 3.5, 1.5, 5., 1.5, 6.5, 6.5]),
('average', True, 'bottom', [1.5, 1.5, 6.5, 3., 6.5, 4.5, 4.5]),
('average', False, 'keep', [4.5, 4.5, np.nan, 3, np.nan, 1.5, 1.5]),
('average', False, 'top', [6.5, 6.5, 1.5, 5., 1.5, 3.5, 3.5]),
('average', False, 'bottom', [4.5, 4.5, 6.5, 3., 6.5, 1.5, 1.5]),
('min', True, 'keep', [1., 1., np.nan, 3., np.nan, 4., 4.]),
('min', True, 'top', [3., 3., 1., 5., 1., 6., 6.]),
('min', True, 'bottom', [1., 1., 6., 3., 6., 4., 4.]),
('min', False, 'keep', [4., 4., np.nan, 3., np.nan, 1., 1.]),
('min', False, 'top', [6., 6., 1., 5., 1., 3., 3.]),
('min', False, 'bottom', [4., 4., 6., 3., 6., 1., 1.]),
('max', True, 'keep', [2., 2., np.nan, 3., np.nan, 5., 5.]),
('max', True, 'top', [4., 4., 2., 5., 2., 7., 7.]),
('max', True, 'bottom', [2., 2., 7., 3., 7., 5., 5.]),
('max', False, 'keep', [5., 5., np.nan, 3., np.nan, 2., 2.]),
('max', False, 'top', [7., 7., 2., 5., 2., 4., 4.]),
('max', False, 'bottom', [5., 5., 7., 3., 7., 2., 2.]),
('first', True, 'keep', [1., 2., np.nan, 3., np.nan, 4., 5.]),
('first', True, 'top', [3., 4., 1., 5., 2., 6., 7.]),
('first', True, 'bottom', [1., 2., 6., 3., 7., 4., 5.]),
('first', False, 'keep', [4., 5., np.nan, 3., np.nan, 1., 2.]),
('first', False, 'top', [6., 7., 1., 5., 2., 3., 4.]),
('first', False, 'bottom', [4., 5., 6., 3., 7., 1., 2.]),
('dense', True, 'keep', [1., 1., np.nan, 2., np.nan, 3., 3.]),
('dense', True, 'top', [2., 2., 1., 3., 1., 4., 4.]),
('dense', True, 'bottom', [1., 1., 4., 2., 4., 3., 3.]),
('dense', False, 'keep', [3., 3., np.nan, 2., np.nan, 1., 1.]),
('dense', False, 'top', [4., 4., 1., 3., 1., 2., 2.]),
('dense', False, 'bottom', [3., 3., 4., 2., 4., 1., 1.])
])
def test_infs_n_nans(grps, vals, ties_method, ascending, na_option, exp):
# GH 20561
key = np.repeat(grps, len(vals))
vals = vals * len(grps)
df = DataFrame({'key': key, 'val': vals})
result = df.groupby('key').rank(method=ties_method,
ascending=ascending,
na_option=na_option)
exp_df = DataFrame(exp * len(grps), columns=['val'])
tm.assert_frame_equal(result, exp_df)
@pytest.mark.parametrize("grps", [
['qux'], ['qux', 'quux']])
@pytest.mark.parametrize("vals", [
[2, 2, np.nan, 8, 2, 6, np.nan, np.nan],
[pd.Timestamp('2018-01-02'), pd.Timestamp('2018-01-02'), np.nan,
pd.Timestamp('2018-01-08'), pd.Timestamp('2018-01-02'),
pd.Timestamp('2018-01-06'), np.nan, np.nan]
])
@pytest.mark.parametrize("ties_method,ascending,na_option,pct,exp", [
('average', True, 'keep', False,
[2., 2., np.nan, 5., 2., 4., np.nan, np.nan]),
('average', True, 'keep', True,
[0.4, 0.4, np.nan, 1.0, 0.4, 0.8, np.nan, np.nan]),
('average', False, 'keep', False,
[4., 4., np.nan, 1., 4., 2., np.nan, np.nan]),
('average', False, 'keep', True,
[.8, 0.8, np.nan, 0.2, 0.8, 0.4, np.nan, np.nan]),
('min', True, 'keep', False,
[1., 1., np.nan, 5., 1., 4., np.nan, np.nan]),
('min', True, 'keep', True,
[0.2, 0.2, np.nan, 1.0, 0.2, 0.8, np.nan, np.nan]),
('min', False, 'keep', False,
[3., 3., np.nan, 1., 3., 2., np.nan, np.nan]),
('min', False, 'keep', True,
[.6, 0.6, np.nan, 0.2, 0.6, 0.4, np.nan, np.nan]),
('max', True, 'keep', False,
[3., 3., np.nan, 5., 3., 4., np.nan, np.nan]),
('max', True, 'keep', True,
[0.6, 0.6, np.nan, 1.0, 0.6, 0.8, np.nan, np.nan]),
('max', False, 'keep', False,
[5., 5., np.nan, 1., 5., 2., np.nan, np.nan]),
('max', False, 'keep', True,
[1., 1., np.nan, 0.2, 1., 0.4, np.nan, np.nan]),
('first', True, 'keep', False,
[1., 2., np.nan, 5., 3., 4., np.nan, np.nan]),
('first', True, 'keep', True,
[0.2, 0.4, np.nan, 1.0, 0.6, 0.8, np.nan, np.nan]),
('first', False, 'keep', False,
[3., 4., np.nan, 1., 5., 2., np.nan, np.nan]),
('first', False, 'keep', True,
[.6, 0.8, np.nan, 0.2, 1., 0.4, np.nan, np.nan]),
('dense', True, 'keep', False,
[1., 1., np.nan, 3., 1., 2., np.nan, np.nan]),
('dense', True, 'keep', True,
[1. / 3., 1. / 3., np.nan, 3. / 3., 1. / 3., 2. / 3., np.nan, np.nan]),
('dense', False, 'keep', False,
[3., 3., np.nan, 1., 3., 2., np.nan, np.nan]),
('dense', False, 'keep', True,
[3. / 3., 3. / 3., np.nan, 1. / 3., 3. / 3., 2. / 3., np.nan, np.nan]),
('average', True, 'bottom', False, [2., 2., 7., 5., 2., 4., 7., 7.]),
('average', True, 'bottom', True,
[0.25, 0.25, 0.875, 0.625, 0.25, 0.5, 0.875, 0.875]),
('average', False, 'bottom', False, [4., 4., 7., 1., 4., 2., 7., 7.]),
('average', False, 'bottom', True,
[0.5, 0.5, 0.875, 0.125, 0.5, 0.25, 0.875, 0.875]),
('min', True, 'bottom', False, [1., 1., 6., 5., 1., 4., 6., 6.]),
('min', True, 'bottom', True,
[0.125, 0.125, 0.75, 0.625, 0.125, 0.5, 0.75, 0.75]),
('min', False, 'bottom', False, [3., 3., 6., 1., 3., 2., 6., 6.]),
('min', False, 'bottom', True,
[0.375, 0.375, 0.75, 0.125, 0.375, 0.25, 0.75, 0.75]),
('max', True, 'bottom', False, [3., 3., 8., 5., 3., 4., 8., 8.]),
('max', True, 'bottom', True,
[0.375, 0.375, 1., 0.625, 0.375, 0.5, 1., 1.]),
('max', False, 'bottom', False, [5., 5., 8., 1., 5., 2., 8., 8.]),
('max', False, 'bottom', True,
[0.625, 0.625, 1., 0.125, 0.625, 0.25, 1., 1.]),
('first', True, 'bottom', False, [1., 2., 6., 5., 3., 4., 7., 8.]),
('first', True, 'bottom', True,
[0.125, 0.25, 0.75, 0.625, 0.375, 0.5, 0.875, 1.]),
('first', False, 'bottom', False, [3., 4., 6., 1., 5., 2., 7., 8.]),
('first', False, 'bottom', True,
[0.375, 0.5, 0.75, 0.125, 0.625, 0.25, 0.875, 1.]),
('dense', True, 'bottom', False, [1., 1., 4., 3., 1., 2., 4., 4.]),
('dense', True, 'bottom', True,
[0.25, 0.25, 1., 0.75, 0.25, 0.5, 1., 1.]),
('dense', False, 'bottom', False, [3., 3., 4., 1., 3., 2., 4., 4.]),
('dense', False, 'bottom', True,
[0.75, 0.75, 1., 0.25, 0.75, 0.5, 1., 1.])
])
def test_rank_args_missing(grps, vals, ties_method, ascending,
na_option, pct, exp):
key = np.repeat(grps, len(vals))
vals = vals * len(grps)
df = DataFrame({'key': key, 'val': vals})
result = df.groupby('key').rank(method=ties_method,
ascending=ascending,
na_option=na_option, pct=pct)
exp_df = DataFrame(exp * len(grps), columns=['val'])
tm.assert_frame_equal(result, exp_df)
@pytest.mark.parametrize("pct,exp", [
(False, [3., 3., 3., 3., 3.]),
(True, [.6, .6, .6, .6, .6])])
def test_rank_resets_each_group(pct, exp):
df = DataFrame(
{'key': ['a', 'a', 'a', 'a', 'a', 'b', 'b', 'b', 'b', 'b'],
'val': [1] * 10}
)
result = df.groupby('key').rank(pct=pct)
exp_df = DataFrame(exp * 2, columns=['val'])
tm.assert_frame_equal(result, exp_df)
def test_rank_avg_even_vals():
df = DataFrame({'key': ['a'] * 4, 'val': [1] * 4})
result = df.groupby('key').rank()
exp_df = DataFrame([2.5, 2.5, 2.5, 2.5], columns=['val'])
tm.assert_frame_equal(result, exp_df)
@pytest.mark.parametrize("ties_method", [
'average', 'min', 'max', 'first', 'dense'])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("na_option", ["keep", "top", "bottom"])
@pytest.mark.parametrize("pct", [True, False])
@pytest.mark.parametrize("vals", [
['bar', 'bar', 'foo', 'bar', 'baz'],
['bar', np.nan, 'foo', np.nan, 'baz']
])
def test_rank_object_raises(ties_method, ascending, na_option,
pct, vals):
df = DataFrame({'key': ['foo'] * 5, 'val': vals})
with pytest.raises(TypeError, match="not callable"):
df.groupby('key').rank(method=ties_method,
ascending=ascending,
na_option=na_option, pct=pct)
@pytest.mark.parametrize("na_option", [True, "bad", 1])
@pytest.mark.parametrize("ties_method", [
'average', 'min', 'max', 'first', 'dense'])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("pct", [True, False])
@pytest.mark.parametrize("vals", [
['bar', 'bar', 'foo', 'bar', 'baz'],
['bar', np.nan, 'foo', np.nan, 'baz'],
[1, np.nan, 2, np.nan, 3]
])
def test_rank_naoption_raises(ties_method, ascending, na_option, pct, vals):
df = DataFrame({'key': ['foo'] * 5, 'val': vals})
msg = "na_option must be one of 'keep', 'top', or 'bottom'"
with pytest.raises(ValueError, match=msg):
df.groupby('key').rank(method=ties_method,
ascending=ascending,
na_option=na_option, pct=pct)
def test_rank_empty_group():
# see gh-22519
column = "A"
df = DataFrame({
"A": [0, 1, 0],
"B": [1., np.nan, 2.]
})
result = df.groupby(column).B.rank(pct=True)
expected = Series([0.5, np.nan, 1.0], name="B")
tm.assert_series_equal(result, expected)
result = df.groupby(column).rank(pct=True)
expected = DataFrame({"B": [0.5, np.nan, 1.0]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("input_key,input_value,output_value", [
([1, 2], [1, 1], [1.0, 1.0]),
([1, 1, 2, 2], [1, 2, 1, 2], [0.5, 1.0, 0.5, 1.0]),
([1, 1, 2, 2], [1, 2, 1, np.nan], [0.5, 1.0, 1.0, np.nan]),
([1, 1, 2], [1, 2, np.nan], [0.5, 1.0, np.nan])
])
def test_rank_zero_div(input_key, input_value, output_value):
# GH 23666
df = DataFrame({"A": input_key, "B": input_value})
result = df.groupby("A").rank(method="dense", pct=True)
expected = DataFrame({"B": output_value})
tm.assert_frame_equal(result, expected)
|
bsd-3-clause
|
Sentient07/scikit-learn
|
examples/tree/plot_tree_regression_multioutput.py
|
73
|
1854
|
"""
===================================================================
Multi-output Decision Tree Regression
===================================================================
An example to illustrate multi-output regression with decision tree.
The :ref:`decision trees <tree>`
is used to predict simultaneously the noisy x and y observations of a circle
given a single underlying feature. As a result, it learns local linear
regressions approximating the circle.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(100, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y[::5, :] += (0.5 - rng.rand(20, 2))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_3 = DecisionTreeRegressor(max_depth=8)
regr_1.fit(X, y)
regr_2.fit(X, y)
regr_3.fit(X, y)
# Predict
X_test = np.arange(-100.0, 100.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
y_3 = regr_3.predict(X_test)
# Plot the results
plt.figure()
s = 50
plt.scatter(y[:, 0], y[:, 1], c="navy", s=s, label="data")
plt.scatter(y_1[:, 0], y_1[:, 1], c="cornflowerblue", s=s, label="max_depth=2")
plt.scatter(y_2[:, 0], y_2[:, 1], c="c", s=s, label="max_depth=5")
plt.scatter(y_3[:, 0], y_3[:, 1], c="orange", s=s, label="max_depth=8")
plt.xlim([-6, 6])
plt.ylim([-6, 6])
plt.xlabel("target 1")
plt.ylabel("target 2")
plt.title("Multi-output Decision Tree Regression")
plt.legend()
plt.show()
|
bsd-3-clause
|
yuxng/Deep_ISM
|
ISM/lib/ism/test.py
|
1
|
15950
|
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Test a Fast R-CNN network on an imdb (image database)."""
from ism.config import cfg, get_output_dir
import argparse
from utils.timer import Timer
import numpy as np
import cv2
import caffe
import cPickle
from utils.blob import im_list_to_blob
import os
import math
import scipy.io
from scipy.optimize import minimize
def _get_image_blob(im, im_depth):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
"""
# RGB
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
processed_ims = []
im_scale_factors = []
assert len(cfg.TEST.SCALES_BASE) == 1
im_scale = cfg.TEST.SCALES_BASE[0]
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
im_scale_factors.append(im_scale)
processed_ims.append(im)
# im_info
im_info = np.hstack((im.shape[:2], im_scale))[np.newaxis, :]
# depth
im_orig = im_depth.astype(np.float32, copy=True)
im_orig = im_orig / im_orig.max() * 255
im_orig = np.tile(im_orig[:,:,np.newaxis], (1,1,3))
im_orig -= cfg.PIXEL_MEANS
processed_ims_depth = []
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
processed_ims_depth.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims, 3)
blob_depth = im_list_to_blob(processed_ims_depth, 3)
return blob, blob_depth, im_info, np.array(im_scale_factors)
def im_detect(net, im, im_depth, num_classes):
"""Detect object classes in an image given boxes on grids.
Arguments:
net (caffe.Net): Fast R-CNN network to use
im (ndarray): color image to test (in BGR order)
boxes (ndarray): R x 4 array of boxes
Returns:
scores (ndarray): R x K array of object class scores (K includes
background as object category 0)
boxes (ndarray): R x (4*K) array of predicted bounding boxes
"""
# compute image blob
im_blob, im_depth_blob, im_info, im_scale_factors = _get_image_blob(im, im_depth)
# reshape network inputs
net.blobs['data_image'].reshape(*(im_blob.shape))
net.blobs['data_depth'].reshape(*(im_depth_blob.shape))
net.blobs['im_info'].reshape(*(im_info.shape))
blobs_out = net.forward(data_image=im_blob.astype(np.float32, copy=False),
data_depth=im_depth_blob.astype(np.float32, copy=False),
im_info=im_info.astype(np.float32, copy=False))
# get outputs
scale = im_info[0, 2]
boxes = blobs_out['rois'][:, 1:].copy() / scale
scores = blobs_out['scores'].copy()
seg_cls_prob = blobs_out['seg_cls_prob']
seg_view_pred = blobs_out['seg_view_pred']
return boxes, scores, seg_cls_prob, seg_view_pred
# backproject pixels into 3D points
def backproject_camera(im_depth, meta_data):
depth = im_depth.astype(np.float32, copy=True) / meta_data['factor_depth']
# get intrinsic matrix
K = meta_data['intrinsic_matrix']
K = np.matrix(K)
Kinv = np.linalg.inv(K)
# compute the 3D points
width = depth.shape[1]
height = depth.shape[0]
points = np.zeros((height, width, 3), dtype=np.float32)
# construct the 2D points matrix
x, y = np.meshgrid(np.arange(width), np.arange(height))
ones = np.ones((height, width), dtype=np.float32)
x2d = np.stack((x, y, ones), axis=2).reshape(width*height, 3)
# backprojection
R = Kinv * x2d.transpose()
# compute the norm
N = np.linalg.norm(R, axis=0)
# normalization
R = np.divide(R, np.tile(N, (3,1)))
# compute the 3D points
X = np.multiply(np.tile(depth.reshape(1, width*height), (3, 1)), R)
points[y, x, 0] = X[0,:].reshape(height, width)
points[y, x, 1] = X[1,:].reshape(height, width)
points[y, x, 2] = X[2,:].reshape(height, width)
# mask
index = np.where(im_depth == 0)
points[index[0], index[1], :] = 0
return points
def loss_pose(x, points, cls_label, azimuth_sin_pred, azimuth_cos_pred, elevation_sin_pred):
""" loss function for pose esimation """
rx = x[0]
ry = x[1]
rz = x[2]
C = x[3:6].reshape((3,1))
# construct rotation matrix
Rx = np.matrix([[1, 0, 0], [0, math.cos(rx), -math.sin(rx)], [0, math.sin(rx), math.cos(rx)]])
Ry = np.matrix([[math.cos(ry), 0, math.sin(ry)], [0, 1, 0], [-math.sin(ry), 0, math.cos(ry)]])
Rz = np.matrix([[math.cos(rz), -math.sin(rz), 0], [math.sin(rz), math.cos(rz), 0], [0, 0, 1]])
R = Rz * Ry * Rx
# transform the points
index = np.where(cls_label > 0)
x3d = points[index[0], index[1], :].transpose()
num = x3d.shape[1]
Cmat = np.tile(C, (1, num))
X = R * (x3d - Cmat)
# compute the azimuth and elevation of each 3D point
r = np.linalg.norm(X, axis=0)
elevation_sin = np.sin(np.pi/2 - np.arccos(np.divide(X[2,:], r)))
azimuth_sin = np.sin(np.arctan2(X[1,:], X[0,:]))
azimuth_cos = np.cos(np.arctan2(X[1,:], X[0,:]))
# compute the loss
loss = (np.mean(np.power(azimuth_sin - azimuth_sin_pred[index[0], index[1]], 2)) +
np.mean(np.power(azimuth_cos - azimuth_cos_pred[index[0], index[1]], 2)) +
np.mean(np.power(elevation_sin - elevation_sin_pred[index[0], index[1]], 2))) / 3
return loss
def pose_estimate(im_depth, meta_data, cls_prob, center_pred):
""" estimate the pose of object from network predication """
# compute 3D points in camera coordinate framework
points = backproject_camera(im_depth, meta_data)
# rescale the 3D point map
height = center_pred.shape[2]
width = center_pred.shape[3]
im_depth_rescale = cv2.resize(im_depth, dsize=(height, width), interpolation=cv2.INTER_NEAREST)
points_rescale = cv2.resize(points, dsize=(height, width), interpolation=cv2.INTER_NEAREST)
# find the max cls labels
num_channels = 3
cls_label = np.argmax(cls_prob, axis = 1).reshape((height, width))
x, y = np.meshgrid(np.arange(width), np.arange(height))
azimuth_sin_pred = center_pred[:, num_channels*cls_label+0, y, x].reshape((height, width))
azimuth_cos_pred = center_pred[:, num_channels*cls_label+1, y, x].reshape((height, width))
elevation_sin_pred = center_pred[:, num_channels*cls_label+2, y, x].reshape((height, width))
# optimization
# initialization
x0 = np.zeros((6,1), dtype=np.float32)
index = np.where(im_depth > 0)
x3d = points[index[0], index[1], :]
x0[3:6] = np.mean(x3d, axis=0).reshape((3,1))
xmin = np.min(x3d, axis=0)
xmax = np.max(x3d, axis=0)
factor = 2
bounds = ((-np.pi, np.pi), (-np.pi, np.pi), (-np.pi, np.pi), (factor*xmin[0], factor*xmax[0]), (factor*xmin[1], factor*xmax[1]), (xmin[2], None))
res = minimize(loss_pose, x0, (points_rescale, cls_label, azimuth_sin_pred, azimuth_cos_pred, elevation_sin_pred), method='SLSQP', bounds=bounds, options={'disp': True})
print res.x
# transform the points
rx = res.x[0]
ry = res.x[1]
rz = res.x[2]
C = res.x[3:6].reshape((3,1))
# construct rotation matrix
Rx = np.matrix([[1, 0, 0], [0, math.cos(rx), -math.sin(rx)], [0, math.sin(rx), math.cos(rx)]])
Ry = np.matrix([[math.cos(ry), 0, math.sin(ry)], [0, 1, 0], [-math.sin(ry), 0, math.cos(ry)]])
Rz = np.matrix([[math.cos(rz), -math.sin(rz), 0], [math.sin(rz), math.cos(rz), 0], [0, 0, 1]])
R = Rz * Ry * Rx
# transform the points
index = np.where(im_depth_rescale > 0)
x3d = points_rescale[index[0], index[1], :].transpose()
num = x3d.shape[1]
Cmat = np.tile(C, (1, num))
points_transform = R * (x3d - Cmat)
return points_rescale, np.array(points_transform)
def hough_voting(cls_prob, center_pred):
""" compute the Hough voting space """
num_channels = 5
num_classes = cls_prob.shape[1]
height = center_pred.shape[2]
width = center_pred.shape[3]
x, y = np.meshgrid(np.arange(width), np.arange(height))
# construct the 2D points matrix
x2d = np.stack((x, y), axis=2).reshape(width*height, 2)
# for each class
for i in range(1, num_classes):
vote = np.zeros((width*height, ), dtype=np.float32)
x1 = np.inf * np.ones((width*height, ), dtype=np.float32)
y1 = np.inf * np.ones((width*height, ), dtype=np.float32)
x2 = -np.inf * np.ones((width*height, ), dtype=np.float32)
y2 = -np.inf * np.ones((width*height, ), dtype=np.float32)
vx = center_pred[:, num_channels*i+0, y, x].reshape((height, width))
vy = center_pred[:, num_channels*i+1, y, x].reshape((height, width))
# compute line norms
norms = np.stack((-vy, vx), axis=2).reshape(width*height, 2)
# for each line
for j in range(width*height):
p = x2d[j, :]
n = norms[j, :].transpose()
# compute point to line distance
d = np.absolute( np.dot(x2d - np.tile(p, (width*height, 1)), n)) / np.linalg.norm(n)
index = np.where(d < 1)[0]
vote[index] = vote[index] + 1
ind = np.where(x1[index] > p[0])[0]
x1[index[ind]] = p[0]
ind = np.where(y1[index] > p[1])[0]
y1[index[ind]] = p[1]
ind = np.where(x2[index] < p[0])[0]
x2[index[ind]] = p[0]
ind = np.where(y2[index] < p[1])[0]
y2[index[ind]] = p[1]
import matplotlib.pyplot as plt
fig = plt.figure()
fig.add_subplot(121)
plt.imshow(cls_prob[:,i,:,:].reshape((height, width)))
fig.add_subplot(122)
plt.imshow(vote.reshape((height, width)))
# draw a bounding box
ind = np.argmax(vote)
plt.gca().add_patch(
plt.Rectangle((x1[ind], y1[ind]), x2[ind] - x1[ind],
y2[ind] - y1[ind], fill=False,
edgecolor='r', linewidth=3)
)
plt.show()
def vis_detections(im, im_depth, boxes, scores, cls_prob, center_pred, points_rescale, points_transform):
"""Visual debugging of detections."""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
# show image
ax = fig.add_subplot(331)
im = im[:, :, (2, 1, 0)]
plt.imshow(im)
ax.set_title('input image')
# show depth
ax = fig.add_subplot(332)
plt.imshow(im_depth)
ax.set_title('input depth')
# show class label
height = center_pred.shape[2]
width = center_pred.shape[3]
cls_label = np.argmax(cls_prob, axis = 1).reshape((height, width))
ax = fig.add_subplot(333)
plt.imshow(cls_label)
ax.set_title('class pred')
# show the target
ax = fig.add_subplot(334)
plt.imshow(im)
for i in xrange(boxes.shape[0]):
roi = boxes[i, :4]
plt.gca().add_patch(plt.Rectangle((roi[0], roi[1]), roi[2] - roi[0],
roi[3] - roi[1], fill=False,
edgecolor='r', linewidth=3))
break
# plt.imshow(cls_label)
# ax.set_title('center pred')
num_channels = 3
x, y = np.meshgrid(np.arange(width), np.arange(height))
# vx = center_pred[:, num_channels*cls_label+0, y, x].reshape((height, width))
# vy = center_pred[:, num_channels*cls_label+1, y, x].reshape((height, width))
azimuth_sin = center_pred[:, num_channels*cls_label+0, y, x].reshape((height, width))
azimuth_cos = center_pred[:, num_channels*cls_label+1, y, x].reshape((height, width))
elevation_sin = center_pred[:, num_channels*cls_label+2, y, x].reshape((height, width))
# for x in xrange(vx.shape[1]):
# for y in xrange(vx.shape[0]):
# if vx[y, x] != 0 and vy[y, x] != 0 and cls_label[y, x] != 0:
# plt.gca().annotate("", xy=(x + 2*vx[y, x], y + 2*vy[y, x]), xycoords='data', xytext=(x, y), textcoords='data',
# arrowprops=dict(arrowstyle="->", connectionstyle="arc3"))
# show the azimuth sin image
ax = fig.add_subplot(335)
plt.imshow(azimuth_sin)
ax.set_title('azimuth sin pred')
# show the azimuth cos image
ax = fig.add_subplot(336)
plt.imshow(azimuth_cos)
ax.set_title('azimuth cos pred')
# show the elevation sin image
ax = fig.add_subplot(337)
plt.imshow(elevation_sin)
ax.set_title('elevation sin pred')
# show the 3D points
if points_rescale.shape[0] > 0:
ax = fig.add_subplot(338, projection='3d')
ax.scatter(points_rescale[:,:,0], points_rescale[:,:,1], points_rescale[:,:,2], c='r', marker='o')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.set_aspect('equal')
ax.set_title('input point cloud')
# show the 3D points transform
if points_transform.shape[1] > 0:
ax = fig.add_subplot(339, projection='3d')
ax.scatter(points_transform[0,:], points_transform[1,:], points_transform[2,:], c='r', marker='o')
ax.scatter(0, 0, 0, c='g', marker='o')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.set_aspect('equal')
ax.set_title('transformed point cloud')
plt.show()
def test_net(net, imdb):
output_dir = get_output_dir(imdb, net)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
det_file = os.path.join(output_dir, 'detections.pkl')
print imdb.name
if os.path.exists(det_file):
return
"""Test a Fast R-CNN network on an image database."""
num_images = len(imdb.image_index)
detections = [[] for _ in xrange(num_images)]
# timers
_t = {'im_detect' : Timer(), 'misc' : Timer()}
# perm = np.random.permutation(np.arange(num_images))
for i in xrange(num_images):
# for i in perm:
im = cv2.imread(imdb.image_path_at(i))
im_depth = cv2.imread(imdb.depth_path_at(i), cv2.IMREAD_UNCHANGED)
# shift
# rows = im.shape[0]
# cols = im.shape[1]
# M = np.float32([[1,0,50],[0,1,25]])
# im = cv2.warpAffine(im,M,(cols,rows))
# rescaling
# im = cv2.resize(im, None, None, fx=0.6, fy=0.6, interpolation=cv2.INTER_LINEAR)
_t['im_detect'].tic()
boxes, scores, seg_cls_prob, seg_view_pred = im_detect(net, im, im_depth, imdb.num_classes)
_t['im_detect'].toc()
_t['misc'].tic()
det = {'boxes': boxes, 'scores': scores, 'seg_cls_prob': seg_cls_prob, 'seg_view_pred': seg_view_pred}
detections[i] = det
_t['misc'].toc()
print 'im_detect: {:d}/{:d} {:.3f}s {:.3f}s' \
.format(i + 1, num_images, _t['im_detect'].average_time, _t['misc'].average_time)
# Hough voting
# hough_voting(cls_prob, center_pred)
# read meta data
meta_data_path = imdb.metadata_path_at(i)
# compute object pose
if os.path.exists(meta_data_path):
meta_data = scipy.io.loadmat(meta_data_path)
points_rescale, points_transform = pose_estimate(im_depth, meta_data, seg_cls_prob, seg_view_pred)
else:
points_rescale = np.zeros((0, 0, 3), dtype=np.float32)
points_transform = np.zeros((3, 0), dtype=np.float32)
vis_detections(im, im_depth, boxes, scores, seg_cls_prob, seg_view_pred, points_rescale, points_transform)
det_file = os.path.join(output_dir, 'detections.pkl')
with open(det_file, 'wb') as f:
cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL)
|
mit
|
ricket1978/ggplot
|
ggplot/components/loess.py
|
13
|
1602
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
"""
loess(formula, data, weights, subset, na.action, model = FALSE,
span = 0.75, enp.target, degree = 2,
parametric = FALSE, drop.square = FALSE, normalize = TRUE,
family = c("gaussian", "symmetric"),
method = c("loess", "model.frame"),
control = loess.control(...), ...)
a formula specifying the numeric response and one to four numeric predictors
(best specified via an interaction, but can also be specified additively).
Will be coerced to a formula if necessary.
"""
import pylab as pl
import pandas as pd
import numpy as np
def loess( x, h, xp, yp ):
"loess func"
"""args:
x => location
h => bandwidth (not sure how to choose this automatically)
xp => vector
yp => vector
example:
X = np.arange(1, 501)
y = np.random.random_integers(low=75, high=130, size=len(X))
data = np.array(zip(X,y))
s1, s2 = [], []
for k in data[:,0]:
s1.append( loess( k, 5, data[:,0], data[:,1] ) )
s2.append( loess( k, 100, data[:,0], data[:,1] ) )
pl.plot( data[:,0], data[:,1], 'o', color="white", markersize=1, linewidth=3 )
pl.plot( data[:,0], np.array(s1), 'k-', data[:,0], np.array(s2), 'k--' )
pl.show()
"""
w = np.exp( -0.5*( ((x-xp)/h)**2 )/np.sqrt(2*np.pi*h**2) )
b = sum(w*xp)*sum(w*yp) - sum(w)*sum(w*xp*yp)
b /= sum(w*xp)**2 - sum(w)*sum(w*xp**2)
a = ( sum(w*yp) - b*sum(w*xp) )/sum(w)
return a + b*x
|
bsd-2-clause
|
mne-tools/mne-tools.github.io
|
0.17/_downloads/de8196571edd5eb8153cbe3f01f1ddef/plot_linear_regression_raw.py
|
11
|
2388
|
"""
========================================
Regression on continuous data (rER[P/F])
========================================
This demonstrates how rER[P/F]s - regressing the continuous data - is a
generalisation of traditional averaging. If all preprocessing steps
are the same, no overlap between epochs exists, and if all
predictors are binary, regression is virtually identical to traditional
averaging.
If overlap exists and/or predictors are continuous, traditional averaging
is inapplicable, but regression can estimate effects, including those of
continuous predictors.
rERPs are described in:
Smith, N. J., & Kutas, M. (2015). Regression-based estimation of ERP
waveforms: II. Non-linear effects, overlap correction, and practical
considerations. Psychophysiology, 52(2), 169-189.
"""
# Authors: Jona Sassenhagen <jona.sassenhagen@gmail.de>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.stats.regression import linear_regression_raw
# Load and preprocess data
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = mne.io.read_raw_fif(raw_fname, preload=True)
raw.pick_types(meg='grad', stim=True, eeg=False)
raw.filter(1, None, fir_design='firwin') # high-pass
# Set up events
events = mne.find_events(raw)
event_id = {'Aud/L': 1, 'Aud/R': 2}
tmin, tmax = -.1, .5
# regular epoching
picks = mne.pick_types(raw.info, meg=True)
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, reject=None,
baseline=None, preload=True, verbose=False)
# rERF
evokeds = linear_regression_raw(raw, events=events, event_id=event_id,
reject=None, tmin=tmin, tmax=tmax)
# linear_regression_raw returns a dict of evokeds
# select conditions similarly to mne.Epochs objects
# plot both results, and their difference
cond = "Aud/L"
fig, (ax1, ax2, ax3) = plt.subplots(3, 1)
params = dict(spatial_colors=True, show=False, ylim=dict(grad=(-200, 200)),
time_unit='s')
epochs[cond].average().plot(axes=ax1, **params)
evokeds[cond].plot(axes=ax2, **params)
contrast = mne.combine_evoked([evokeds[cond], -epochs[cond].average()],
weights='equal')
contrast.plot(axes=ax3, **params)
ax1.set_title("Traditional averaging")
ax2.set_title("rERF")
ax3.set_title("Difference")
plt.show()
|
bsd-3-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.