repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
Eric89GXL/mne-python
|
tutorials/epochs/plot_10_epochs_overview.py
|
4
|
20205
|
# -*- coding: utf-8 -*-
"""
.. _tut-epochs-class:
The Epochs data structure: discontinuous data
=============================================
This tutorial covers the basics of creating and working with :term:`epoched
<epochs>` data. It introduces the :class:`~mne.Epochs` data structure in
detail, including how to load, query, subselect, export, and plot data from an
:class:`~mne.Epochs` object. For more information about visualizing
:class:`~mne.Epochs` objects, see :ref:`tut-visualize-epochs`. For info on
creating an :class:`~mne.Epochs` object from (possibly simulated) data in a
:class:`NumPy array <numpy.ndarray>`, see :ref:`tut_creating_data_structures`.
.. contents:: Page contents
:local:
:depth: 2
As usual we'll start by importing the modules we need:
"""
import os
import mne
###############################################################################
# :class:`~mne.Epochs` objects are a data structure for representing and
# analyzing equal-duration chunks of the EEG/MEG signal. :class:`~mne.Epochs`
# are most often used to represent data that is time-locked to repeated
# experimental events (such as stimulus onsets or subject button presses), but
# can also be used for storing sequential or overlapping frames of a continuous
# signal (e.g., for analysis of resting-state activity; see
# :ref:`fixed-length-events`). Inside an :class:`~mne.Epochs` object, the data
# are stored in an :class:`array <numpy.ndarray>` of shape ``(n_epochs,
# n_channels, n_times)``.
#
# :class:`~mne.Epochs` objects have many similarities with :class:`~mne.io.Raw`
# objects, including:
#
# - They can be loaded from and saved to disk in ``.fif`` format, and their
# data can be exported to a :class:`NumPy array <numpy.ndarray>` through the
# :meth:`~mne.Epochs.get_data` method or to a :class:`Pandas DataFrame
# <pandas.DataFrame>` through the :meth:`~mne.Epochs.to_data_frame` method.
#
# - Both :class:`~mne.Epochs` and :class:`~mne.io.Raw` objects support channel
# selection by index or name, including :meth:`~mne.Epochs.pick`,
# :meth:`~mne.Epochs.pick_channels` and :meth:`~mne.Epochs.pick_types`
# methods.
#
# - :term:`SSP projector <projector>` manipulation is possible through
# :meth:`~mne.Epochs.add_proj`, :meth:`~mne.Epochs.del_proj`, and
# :meth:`~mne.Epochs.plot_projs_topomap` methods.
#
# - Both :class:`~mne.Epochs` and :class:`~mne.io.Raw` objects have
# :meth:`~mne.Epochs.copy`, :meth:`~mne.Epochs.crop`,
# :meth:`~mne.Epochs.time_as_index`, :meth:`~mne.Epochs.filter`, and
# :meth:`~mne.Epochs.resample` methods.
#
# - Both :class:`~mne.Epochs` and :class:`~mne.io.Raw` objects have
# :attr:`~mne.Epochs.times`, :attr:`~mne.Epochs.ch_names`,
# :attr:`~mne.Epochs.proj`, and :class:`info <mne.Info>` attributes.
#
# - Both :class:`~mne.Epochs` and :class:`~mne.io.Raw` objects have built-in
# plotting methods :meth:`~mne.Epochs.plot`, :meth:`~mne.Epochs.plot_psd`,
# and :meth:`~mne.Epochs.plot_psd_topomap`.
#
#
# Creating Epoched data from a ``Raw`` object
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# The example dataset we've been using thus far doesn't include pre-epoched
# data, so in this section we'll load the continuous data and create epochs
# based on the events recorded in the :class:`~mne.io.Raw` object's STIM
# channels. As we often do in these tutorials, we'll :meth:`~mne.io.Raw.crop`
# the :class:`~mne.io.Raw` data to save memory:
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file, verbose=False).crop(tmax=60)
###############################################################################
# As we saw in the :ref:`tut-events-vs-annotations` tutorial, we can extract an
# events array from :class:`~mne.io.Raw` objects using :func:`mne.find_events`:
events = mne.find_events(raw, stim_channel='STI 014')
###############################################################################
# .. note::
#
# We could also have loaded the events from file, using
# :func:`mne.read_events`::
#
# sample_data_events_file = os.path.join(sample_data_folder,
# 'MEG', 'sample',
# 'sample_audvis_raw-eve.fif')
# events_from_file = mne.read_events(sample_data_events_file)
#
# See :ref:`tut-section-events-io` for more details.
#
#
# The :class:`~mne.io.Raw` object and the events array are the bare minimum
# needed to create an :class:`~mne.Epochs` object, which we create with the
# :class:`mne.Epochs` class constructor. However, you will almost surely want
# to change some of the other default parameters. Here we'll change ``tmin``
# and ``tmax`` (the time relative to each event at which to start and end each
# epoch). Note also that the :class:`~mne.Epochs` constructor accepts
# parameters ``reject`` and ``flat`` for rejecting individual epochs based on
# signal amplitude. See the :ref:`tut-reject-epochs-section` section for
# examples.
epochs = mne.Epochs(raw, events, tmin=-0.3, tmax=0.7)
###############################################################################
# You'll see from the output that:
#
# - all 320 events were used to create epochs
#
# - baseline correction was automatically applied (by default, baseline is
# defined as the time span from ``tmin`` to ``0``, but can be customized with
# the ``baseline`` parameter)
#
# - no additional metadata was provided (see :ref:`tut-epochs-metadata` for
# details)
#
# - the projection operators present in the :class:`~mne.io.Raw` file were
# copied over to the :class:`~mne.Epochs` object
#
# If we print the :class:`~mne.Epochs` object, we'll also see a note that the
# epochs are not copied into memory by default, and a count of the number of
# epochs created for each integer Event ID.
print(epochs)
###############################################################################
# Notice that the Event IDs are in quotes; since we didn't provide an event
# dictionary, the :class:`mne.Epochs` constructor created one automatically and
# used the string representation of the integer Event IDs as the dictionary
# keys. This is more clear when viewing the ``event_id`` attribute:
print(epochs.event_id)
###############################################################################
# This time let's pass ``preload=True`` and provide an event dictionary; our
# provided dictionary will get stored as the ``event_id`` attribute and will
# make referencing events and pooling across event types easier:
event_dict = {'auditory/left': 1, 'auditory/right': 2, 'visual/left': 3,
'visual/right': 4, 'face': 5, 'buttonpress': 32}
epochs = mne.Epochs(raw, events, tmin=-0.3, tmax=0.7, event_id=event_dict,
preload=True)
print(epochs.event_id)
del raw # we're done with raw, free up some memory
###############################################################################
# Notice that the output now mentions "1 bad epoch dropped". In the tutorial
# section :ref:`tut-reject-epochs-section` we saw how you can specify channel
# amplitude criteria for rejecting epochs, but here we haven't specified any
# such criteria. In this case, it turns out that the last event was too close
# the end of the (cropped) raw file to accommodate our requested ``tmax`` of
# 0.7 seconds, so the final epoch was dropped because it was too short. Here
# are the ``drop_log`` entries for the last 4 epochs (empty lists indicate
# epochs that were *not* dropped):
print(epochs.drop_log[-4:])
###############################################################################
# .. note::
#
# If you forget to provide the event dictionary to the :class:`~mne.Epochs`
# constructor, you can add it later by assigning to the ``event_id``
# attribute::
#
# epochs.event_id = event_dict
#
#
# Basic visualization of ``Epochs`` objects
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# The :class:`~mne.Epochs` object can be visualized (and browsed interactively)
# using its :meth:`~mne.Epochs.plot` method:
epochs.plot(n_epochs=10)
###############################################################################
# Notice that the individual epochs are sequentially numbered along the bottom
# axis; the event ID associated with the epoch is marked on the top axis;
# epochs are separated by vertical dashed lines; and a vertical solid green
# line marks time=0 for each epoch (i.e., in this case, the stimulus onset
# time for each trial). Epoch plots are interactive (similar to
# :meth:`raw.plot() <mne.io.Raw.plot>`) and have many of the same interactive
# controls as :class:`~mne.io.Raw` plots. Horizontal and vertical scrollbars
# allow browsing through epochs or channels (respectively), and pressing
# :kbd:`?` when the plot is focused will show a help screen with all the
# available controls. See :ref:`tut-visualize-epochs` for more details (as well
# as other ways of visualizing epoched data).
#
#
# .. _tut-section-subselect-epochs:
#
# Subselecting epochs
# ^^^^^^^^^^^^^^^^^^^
#
# Now that we have our :class:`~mne.Epochs` object with our descriptive event
# labels added, we can subselect epochs easily using square brackets. For
# example, we can load all the "catch trials" where the stimulus was a face:
print(epochs['face'])
###############################################################################
# We can also pool across conditions easily, thanks to how MNE-Python handles
# the ``/`` character in epoch labels (using what is sometimes called
# "tag-based indexing"):
# pool across left + right
print(epochs['auditory'])
assert len(epochs['auditory']) == (len(epochs['auditory/left']) +
len(epochs['auditory/right']))
# pool across auditory + visual
print(epochs['left'])
assert len(epochs['left']) == (len(epochs['auditory/left']) +
len(epochs['visual/left']))
###############################################################################
# You can also pool conditions by passing multiple tags as a list. Note that
# MNE-Python will not complain if you ask for tags not present in the object,
# as long as it can find *some* match: the below example is parsed as
# (inclusive) ``'right'`` **or** ``'bottom'``, and you can see from the output
# that it selects only ``auditory/right`` and ``visual/right``.
print(epochs[['right', 'bottom']])
###############################################################################
# However, if no match is found, an error is returned:
try:
print(epochs[['top', 'bottom']])
except KeyError:
print('Tag-based selection with no matches raises a KeyError!')
###############################################################################
# Selecting epochs by index
# ~~~~~~~~~~~~~~~~~~~~~~~~~
#
# :class:`~mne.Epochs` objects can also be indexed with integers, :term:`slices
# <slice>`, or lists of integers. This method of selection ignores event
# labels, so if you want the first 10 epochs of a particular type, you can
# select the type first, then use integers or slices:
print(epochs[:10]) # epochs 0-9
print(epochs[1:8:2]) # epochs 1, 3, 5, 7
print(epochs['buttonpress'][:4]) # first 4 "buttonpress" epochs
print(epochs['buttonpress'][[0, 1, 2, 3]]) # same as previous line
###############################################################################
# Selecting, dropping, and reordering channels
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# You can use the :meth:`~mne.Epochs.pick`, :meth:`~mne.Epochs.pick_channels`,
# :meth:`~mne.Epochs.pick_types`, and :meth:`~mne.Epochs.drop_channels` methods
# to modify which channels are included in an :class:`~mne.Epochs` object. You
# can also use :meth:`~mne.Epochs.reorder_channels` for this purpose; any
# channel names not provided to :meth:`~mne.Epochs.reorder_channels` will be
# dropped. Note that these *channel* selection methods modify the object
# in-place (unlike the square-bracket indexing to select *epochs* seen above)
# so in interactive/exploratory sessions you may want to create a
# :meth:`~mne.Epochs.copy` first.
epochs_eeg = epochs.copy().pick_types(meg=False, eeg=True)
print(epochs_eeg.ch_names)
new_order = ['EEG 002', 'STI 014', 'EOG 061', 'MEG 2521']
epochs_subset = epochs.copy().reorder_channels(new_order)
print(epochs_subset.ch_names)
###############################################################################
del epochs_eeg, epochs_subset
###############################################################################
# Changing channel name and type
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# You can change the name or type of a channel using
# :meth:`~mne.Epochs.rename_channels` or :meth:`~mne.Epochs.set_channel_types`.
# Both methods take :class:`dictionaries <dict>` where the keys are existing
# channel names, and the values are the new name (or type) for that channel.
# Existing channels that are not in the dictionary will be unchanged.
epochs.rename_channels({'EOG 061': 'BlinkChannel'})
epochs.set_channel_types({'EEG 060': 'ecg'})
print(list(zip(epochs.ch_names, epochs.get_channel_types()))[-4:])
###############################################################################
# let's set them back to the correct values before moving on
epochs.rename_channels({'BlinkChannel': 'EOG 061'})
epochs.set_channel_types({'EEG 060': 'eeg'})
###############################################################################
# Selection in the time domain
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# To change the temporal extent of the :class:`~mne.Epochs`, you can use the
# :meth:`~mne.Epochs.crop` method:
shorter_epochs = epochs.copy().crop(tmin=-0.1, tmax=0.1, include_tmax=True)
for name, obj in dict(Original=epochs, Cropped=shorter_epochs).items():
print('{} epochs has {} time samples'
.format(name, obj.get_data().shape[-1]))
###############################################################################
# Cropping removed part of the baseline. When printing the
# cropped :class:`~mne.Epochs`, MNE-Python will inform you about the time
# period that was originally used to perform baseline correction by displaying
# the string "baseline period cropped after baseline correction":
print(shorter_epochs)
###############################################################################
# However, if you wanted to *expand* the time domain of an :class:`~mne.Epochs`
# object, you would need to go back to the :class:`~mne.io.Raw` data and
# recreate the :class:`~mne.Epochs` with different values for ``tmin`` and/or
# ``tmax``.
#
# It is also possible to change the "zero point" that defines the time values
# in an :class:`~mne.Epochs` object, with the :meth:`~mne.Epochs.shift_time`
# method. :meth:`~mne.Epochs.shift_time` allows shifting times relative to the
# current values, or specifying a fixed time to set as the new time value of
# the first sample (deriving the new time values of subsequent samples based on
# the :class:`~mne.Epochs` object's sampling frequency).
# shift times so that first sample of each epoch is at time zero
later_epochs = epochs.copy().shift_time(tshift=0., relative=False)
print(later_epochs.times[:3])
# shift times by a relative amount
later_epochs.shift_time(tshift=-7, relative=True)
print(later_epochs.times[:3])
###############################################################################
del shorter_epochs, later_epochs
###############################################################################
# Note that although time shifting respects the sampling frequency (the spacing
# between samples), it does not enforce the assumption that there is a sample
# occurring at exactly time=0.
#
#
# Extracting data in other forms
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# The :meth:`~mne.Epochs.get_data` method returns the epoched data as a
# :class:`NumPy array <numpy.ndarray>`, of shape ``(n_epochs, n_channels,
# n_times)``; an optional ``picks`` parameter selects a subset of channels by
# index, name, or type:
eog_data = epochs.get_data(picks='EOG 061')
meg_data = epochs.get_data(picks=['mag', 'grad'])
channel_4_6_8 = epochs.get_data(picks=slice(4, 9, 2))
for name, arr in dict(EOG=eog_data, MEG=meg_data, Slice=channel_4_6_8).items():
print('{} contains {} channels'.format(name, arr.shape[1]))
###############################################################################
# Note that if your analysis requires repeatedly extracting single epochs from
# an :class:`~mne.Epochs` object, ``epochs.get_data(item=2)`` will be much
# faster than ``epochs[2].get_data()``, because it avoids the step of
# subsetting the :class:`~mne.Epochs` object first.
#
# You can also export :class:`~mne.Epochs` data to :class:`Pandas DataFrames
# <pandas.DataFrame>`. Here, the :class:`~pandas.DataFrame` index will be
# constructed by converting the time of each sample into milliseconds and
# rounding it to the nearest integer, and combining it with the event types and
# epoch numbers to form a hierarchical :class:`~pandas.MultiIndex`. Each
# channel will appear in a separate column. Then you can use any of Pandas'
# tools for grouping and aggregating data; for example, here we select any
# epochs numbered 10 or less from the ``auditory/left`` condition, and extract
# times between 100 and 107 ms on channels ``EEG 056`` through ``EEG 058``
# (note that slice indexing within Pandas' :obj:`~pandas.DataFrame.loc` is
# inclusive of the endpoint):
df = epochs.to_data_frame(index=['condition', 'epoch', 'time'])
df.sort_index(inplace=True)
print(df.loc[('auditory/left', slice(0, 10), slice(100, 107)),
'EEG 056':'EEG 058'])
del df
###############################################################################
# See the :ref:`tut-epochs-dataframe` tutorial for many more examples of the
# :meth:`~mne.Epochs.to_data_frame` method.
#
#
# Loading and saving ``Epochs`` objects to disk
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# :class:`~mne.Epochs` objects can be loaded and saved in the ``.fif`` format
# just like :class:`~mne.io.Raw` objects, using the :func:`mne.read_epochs`
# function and the :meth:`~mne.Epochs.save` method. Functions are also
# available for loading data that was epoched outside of MNE-Python, such as
# :func:`mne.read_epochs_eeglab` and :func:`mne.read_epochs_kit`.
epochs.save('saved-audiovisual-epo.fif', overwrite=True)
epochs_from_file = mne.read_epochs('saved-audiovisual-epo.fif', preload=False)
###############################################################################
# The MNE-Python naming convention for epochs files is that the file basename
# (the part before the ``.fif`` or ``.fif.gz`` extension) should end with
# ``-epo`` or ``_epo``, and a warning will be issued if the filename you
# provide does not adhere to that convention.
#
# As a final note, be aware that the class of the epochs object is different
# when epochs are loaded from disk rather than generated from a
# :class:`~mne.io.Raw` object:
print(type(epochs))
print(type(epochs_from_file))
###############################################################################
# In almost all cases this will not require changing anything about your code.
# However, if you need to do type checking on epochs objects, you can test
# against the base class that these classes are derived from:
print(all([isinstance(epochs, mne.BaseEpochs),
isinstance(epochs_from_file, mne.BaseEpochs)]))
###############################################################################
# Iterating over ``Epochs``
# ^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Iterating over an :class:`~mne.Epochs` object will yield :class:`arrays
# <numpy.ndarray>` rather than single-trial :class:`~mne.Epochs` objects:
for epoch in epochs[:3]:
print(type(epoch))
###############################################################################
# If you want to iterate over :class:`~mne.Epochs` objects, you can use an
# integer index as the iterator:
for index in range(3):
print(type(epochs[index]))
|
bsd-3-clause
|
dandanvidi/effective-capacity
|
scripts/s_to_v.py
|
3
|
2501
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 28 10:27:12 2016
@author: dan
"""
import matplotlib.pyplot as plt
import sys, os
sys.path.append(os.path.expanduser('~/git/kvivo_max/scripts/'))
#from catalytic_rates import rates
from cobra.io.sbml import create_cobra_model_from_sbml_file
from cobra.manipulation.modify import convert_to_irreversible
import pandas as pd
import numpy as np
from helper import *
import uncertainties.unumpy as unumpy
model = create_cobra_model_from_sbml_file('../data/iJO1366.xml')
convert_to_irreversible(model)
rxns = {r.id:r for r in model.reactions}
gc = pd.DataFrame.from_csv('../data/growth_conditions.csv')
gc = gc[gc.media_key>0]
gc = gc[gc.reference == 'Schmidt et al. 2015']
gc = gc[gc.strain == 'BW25113']
pFVA = pd.DataFrame.from_csv("../data/flux_variability_[mmol_gCDW_h].csv", header=[0,1]).T
gr = gc['growth rate [h-1]'][pFVA.index.levels[0] & gc.index]
gr.sort()
conds = gr.index
ppath = "../../proteomics-collection/"
copies_fL = pd.DataFrame.from_csv(ppath+"meta_abundance[copies_fL].csv")[conds]
mg_gCDW = convert_copies_fL_to_mg_gCDW(copies_fL)
expression_CV = pd.DataFrame.from_csv(ppath+"supporting_data/ecoli_Schmidt_et_al_2015_CV.csv")[conds]
expression_CV.replace(np.nan,0, inplace=True)
expression_CV = expression_CV / 100
expression_std = expression_CV.mul(mg_gCDW.mean(axis=1),axis=0)
umol_gCDW_min = get_umol_gCDW_min_from_pFVA(pFVA)
umol_gCDW_min = umol_gCDW_min.T[conds]
E = mg_gCDW
V = umol_gCDW_min
SA = specific_actitivy(V,E,model)
E_by_reac = V/SA
capacity = get_metabolic_capacity(V,E,model)
usage = get_usage(V,E,model)
capacity_usage = get_capacity_usage(V,E,model)
#standard_error = bootstrap_capacity_usage_error(V,E,model,iterations=1000)
bg = '0.95'
fig = plt.figure(figsize=(8,8))
ax = plt.axes(axisbg=bg)
for i, c in enumerate(conds):
color='#ff4d4d'
if gc['growth mode'][c] == 'batch':
ax.annotate(gc['media_key'][c],(gr[c],capacity_usage[c]+0.01),
ha='center',va='baseline',size=15)
# elif gc['growth mode'][c] == 'chemostat':
# color = '0.5'
plt.scatter(gr[c],capacity_usage[c],c=color,s=80,edgecolor='none')
# ax.errorbar(gr[c],capacity_usage[c],standard_error[c],c='k')
ax.set_xlim(0,1)
ax.set_ylim(0,1)
ax.set_xlabel('growth rate [h$^{-1}$]', size=15)
ax.set_ylabel('capacity usage', size=15)
[tick.label.set_fontsize(15) for tick in ax.xaxis.get_major_ticks()]
[tick.label.set_fontsize(15) for tick in ax.yaxis.get_major_ticks()]
plt.tight_layout()
|
mit
|
leggitta/mne-python
|
mne/tests/test_report.py
|
9
|
8943
|
# Authors: Mainak Jas <mainak@neuro.hut.fi>
# Teon Brooks <teon.brooks@gmail.com>
#
# License: BSD (3-clause)
import os
import os.path as op
import glob
import warnings
import shutil
from nose.tools import assert_true, assert_equal, assert_raises
from mne import Epochs, read_events, pick_types, read_evokeds
from mne.io import Raw
from mne.datasets import testing
from mne.report import Report
from mne.utils import (_TempDir, requires_mayavi, requires_nibabel,
requires_PIL, run_tests_if_main, slow_test)
from mne.viz import plot_trans
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
data_dir = testing.data_path(download=False)
subjects_dir = op.join(data_dir, 'subjects')
report_dir = op.join(data_dir, 'MEG', 'sample')
raw_fname = op.join(report_dir, 'sample_audvis_trunc_raw.fif')
event_fname = op.join(report_dir, 'sample_audvis_trunc_raw-eve.fif')
cov_fname = op.join(report_dir, 'sample_audvis_trunc-cov.fif')
fwd_fname = op.join(report_dir, 'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
trans_fname = op.join(report_dir, 'sample_audvis_trunc-trans.fif')
inv_fname = op.join(report_dir,
'sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif')
mri_fname = op.join(subjects_dir, 'sample', 'mri', 'T1.mgz')
base_dir = op.realpath(op.join(op.dirname(__file__), '..', 'io', 'tests',
'data'))
evoked_fname = op.join(base_dir, 'test-ave.fif')
# Set our plotters to test mode
warnings.simplefilter('always') # enable b/c these tests throw warnings
@slow_test
@testing.requires_testing_data
@requires_PIL
def test_render_report():
"""Test rendering -*.fif files for mne report.
"""
tempdir = _TempDir()
raw_fname_new = op.join(tempdir, 'temp_raw.fif')
event_fname_new = op.join(tempdir, 'temp_raw-eve.fif')
cov_fname_new = op.join(tempdir, 'temp_raw-cov.fif')
fwd_fname_new = op.join(tempdir, 'temp_raw-fwd.fif')
inv_fname_new = op.join(tempdir, 'temp_raw-inv.fif')
for a, b in [[raw_fname, raw_fname_new],
[event_fname, event_fname_new],
[cov_fname, cov_fname_new],
[fwd_fname, fwd_fname_new],
[inv_fname, inv_fname_new]]:
shutil.copyfile(a, b)
# create and add -epo.fif and -ave.fif files
epochs_fname = op.join(tempdir, 'temp-epo.fif')
evoked_fname = op.join(tempdir, 'temp-ave.fif')
raw = Raw(raw_fname_new)
picks = pick_types(raw.info, meg='mag', eeg=False) # faster with one type
epochs = Epochs(raw, read_events(event_fname), 1, -0.2, 0.2, picks=picks)
epochs.save(epochs_fname)
epochs.average().save(evoked_fname)
report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
report.parse_folder(data_path=tempdir, on_error='raise')
assert_true(len(w) >= 1)
# Check correct paths and filenames
fnames = glob.glob(op.join(tempdir, '*.fif'))
for fname in fnames:
assert_true(op.basename(fname) in
[op.basename(x) for x in report.fnames])
assert_true(''.join(report.html).find(op.basename(fname)) != -1)
assert_equal(len(report.fnames), len(fnames))
assert_equal(len(report.html), len(report.fnames))
# Check saving functionality
report.data_path = tempdir
report.save(fname=op.join(tempdir, 'report.html'), open_browser=False)
assert_true(op.isfile(op.join(tempdir, 'report.html')))
assert_equal(len(report.html), len(fnames))
assert_equal(len(report.html), len(report.fnames))
# Check saving same report to new filename
report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False)
assert_true(op.isfile(op.join(tempdir, 'report2.html')))
# Check overwriting file
report.save(fname=op.join(tempdir, 'report.html'), open_browser=False,
overwrite=True)
assert_true(op.isfile(op.join(tempdir, 'report.html')))
# Check pattern matching with multiple patterns
pattern = ['*raw.fif', '*eve.fif']
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
report.parse_folder(data_path=tempdir, pattern=pattern)
assert_true(len(w) >= 1)
fnames = glob.glob(op.join(tempdir, '*.raw')) + \
glob.glob(op.join(tempdir, '*.raw'))
for fname in fnames:
assert_true(op.basename(fname) in
[op.basename(x) for x in report.fnames])
assert_true(''.join(report.html).find(op.basename(fname)) != -1)
@testing.requires_testing_data
@requires_mayavi
@requires_PIL
def test_render_add_sections():
"""Test adding figures/images to section.
"""
tempdir = _TempDir()
import matplotlib.pyplot as plt
report = Report(subjects_dir=subjects_dir)
# Check add_figs_to_section functionality
fig = plt.plot([1, 2], [1, 2])[0].figure
report.add_figs_to_section(figs=fig, # test non-list input
captions=['evoked response'], scale=1.2,
image_format='svg')
assert_raises(ValueError, report.add_figs_to_section, figs=[fig, fig],
captions='H')
assert_raises(ValueError, report.add_figs_to_section, figs=fig,
captions=['foo'], scale=0, image_format='svg')
assert_raises(ValueError, report.add_figs_to_section, figs=fig,
captions=['foo'], scale=1e-10, image_format='svg')
# need to recreate because calls above change size
fig = plt.plot([1, 2], [1, 2])[0].figure
# Check add_images_to_section
img_fname = op.join(tempdir, 'testimage.png')
fig.savefig(img_fname)
report.add_images_to_section(fnames=[img_fname],
captions=['evoked response'])
assert_raises(ValueError, report.add_images_to_section,
fnames=[img_fname, img_fname], captions='H')
evoked = read_evokeds(evoked_fname, condition='Left Auditory',
baseline=(-0.2, 0.0))
fig = plot_trans(evoked.info, trans_fname, subject='sample',
subjects_dir=subjects_dir)
report.add_figs_to_section(figs=fig, # test non-list input
captions='random image', scale=1.2)
@slow_test
@testing.requires_testing_data
@requires_mayavi
@requires_nibabel()
def test_render_mri():
"""Test rendering MRI for mne report.
"""
tempdir = _TempDir()
trans_fname_new = op.join(tempdir, 'temp-trans.fif')
for a, b in [[trans_fname, trans_fname_new]]:
shutil.copyfile(a, b)
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=subjects_dir)
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
report.parse_folder(data_path=tempdir, mri_decim=30, pattern='*',
n_jobs=2)
report.save(op.join(tempdir, 'report.html'), open_browser=False)
@testing.requires_testing_data
@requires_nibabel()
def test_render_mri_without_bem():
"""Test rendering MRI without BEM for mne report.
"""
tempdir = _TempDir()
os.mkdir(op.join(tempdir, 'sample'))
os.mkdir(op.join(tempdir, 'sample', 'mri'))
shutil.copyfile(mri_fname, op.join(tempdir, 'sample', 'mri', 'T1.mgz'))
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=tempdir)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
report.parse_folder(tempdir)
assert_true(len(w) >= 1)
report.save(op.join(tempdir, 'report.html'), open_browser=False)
@testing.requires_testing_data
@requires_nibabel()
def test_add_htmls_to_section():
"""Test adding html str to mne report.
"""
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=subjects_dir)
html = '<b>MNE-Python is AWESOME</b>'
caption, section = 'html', 'html_section'
report.add_htmls_to_section(html, caption, section)
idx = report._sectionlabels.index('report_' + section)
html_compare = report.html[idx]
assert_true(html in html_compare)
def test_validate_input():
report = Report()
items = ['a', 'b', 'c']
captions = ['Letter A', 'Letter B', 'Letter C']
section = 'ABCs'
comments = ['First letter of the alphabet.',
'Second letter of the alphabet',
'Third letter of the alphabet']
assert_raises(ValueError, report._validate_input, items, captions[:-1],
section, comments=None)
assert_raises(ValueError, report._validate_input, items, captions, section,
comments=comments[:-1])
values = report._validate_input(items, captions, section, comments=None)
items_new, captions_new, comments_new = values
assert_equal(len(comments_new), len(items))
run_tests_if_main()
|
bsd-3-clause
|
josdaza/deep-toolbox
|
PyTorch/Seq2Seq/Seq2SeqMain.py
|
1
|
3941
|
from __future__ import unicode_literals, print_function, division
from io import open
import unicodedata,string,re
import random, time, math
#PyTorch
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch import optim
use_cuda = torch.cuda.is_available()
#MatplotLib y Numpy
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
#Importar nuestros submodulos
import data_reader as dr
from Encoder import EncoderRNN
from Decoder import AttnDecoderRNN
import trainer as tr
import evaluator as ev
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def timeSince(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (- %s)' % (asMinutes(s), asMinutes(rs))
def showPlot(points):
plt.figure()
fig, ax = plt.subplots()
# this locator puts ticks at regular intervals
loc = ticker.MultipleLocator(base=0.2)
ax.yaxis.set_major_locator(loc)
plt.plot(points)
# Proceso de Entrenamiento:
# 1) Iniciar Timer
# 2) Inicializar los optimizadores (SGD) y la funcion de costo (Loss)
# 3) Crear set aleatorio de pares de entrenamiento
# 4) Llamar a la funcion tr.train() para actualizar el costo y mostrar progresos
# 5) Graficar progreso de los costos cuando el entrenamiento haya temrinado
def trainIters(config_params, encoder, decoder, n_iters, max_length, print_every=1000, plot_every=100, learning_rate=0.01):
start = time.time()
plot_losses = []
print_loss_total = 0 # Reset every print_every
plot_loss_total = 0 # Reset every plot_every
encoder_optimizer = optim.SGD(encoder.parameters(), lr=learning_rate)
decoder_optimizer = optim.SGD(decoder.parameters(), lr=learning_rate)
# Escoger aleatoriamente n_iters pares de entrenamiento
training_pairs = [tr.variablesFromPair(random.choice(pairs),input_lang, output_lang,int(config_params["RNN"]["eos_token"]))
for i in range(n_iters)]
criterion = nn.NLLLoss()
# Por cada para de entrenamiento (oracion) se llama al EncDec que la procesasa token por token
# y actualiza sus pesos y calcula el nuevo costo
for iter in range(1, n_iters + 1):
training_pair = training_pairs[iter - 1]
input_variable = training_pair[0]
target_variable = training_pair[1]
loss = tr.train(config_params, input_variable, target_variable, encoder,
decoder, encoder_optimizer, decoder_optimizer, criterion, max_length)
print_loss_total += loss
plot_loss_total += loss
if iter % print_every == 0:
print_loss_avg = print_loss_total / print_every
print_loss_total = 0
print('%s (%d %d%%) %.4f' % (timeSince(start, iter / n_iters),
iter, iter / n_iters * 100, print_loss_avg))
if iter % plot_every == 0:
plot_loss_avg = plot_loss_total / plot_every
plot_losses.append(plot_loss_avg)
plot_loss_total = 0
showPlot(plot_losses)
if __name__ == "__main__":
# Cargar y limpiar Datos
config_params,input_lang, output_lang, pairs = dr.prepareData('eng', 'fra', True)
# Inicializar los parametros de las RNN
MAX_LENGTH = int(config_params["Main"]["max_length"])
HIDDEN_SIZE = int(config_params["RNN"]["hidden_size"])
print(random.choice(pairs))
# Crear el Encoder
encoder1 = EncoderRNN(input_lang.n_words, HIDDEN_SIZE)
# Crear el Decoder
attn_decoder1 = AttnDecoderRNN(HIDDEN_SIZE, output_lang.n_words, MAX_LENGTH, 1, dropout_p=0.1)
if use_cuda:
encoder1 = encoder1.cuda()
attn_decoder1 = attn_decoder1.cuda()
# Entrenamiento
trainIters(config_params, encoder1, attn_decoder1, 75000, max_length=MAX_LENGTH, print_every=5000)
# Evaluar Modelo
ev.evaluateRandomly(config_params, encoder1, attn_decoder1)
|
mit
|
pylhc/PyLHC
|
setup.py
|
1
|
2911
|
import pathlib
import setuptools
# The directory containing this file
MODULE_NAME = "pylhc"
TOPLEVEL_DIR = pathlib.Path(__file__).parent.absolute()
ABOUT_FILE = TOPLEVEL_DIR / MODULE_NAME / "__init__.py"
README = TOPLEVEL_DIR / "README.md"
def about_package(init_posixpath: pathlib.Path) -> dict:
"""
Return package information defined with dunders in __init__.py as a dictionary, when
provided with a PosixPath to the __init__.py file.
"""
about_text: str = init_posixpath.read_text()
return {
entry.split(" = ")[0]: entry.split(" = ")[1].strip('"')
for entry in about_text.strip().split("\n")
if entry.startswith("__")
}
ABOUT_PYLHC = about_package(ABOUT_FILE)
with README.open("r") as docs:
long_description = docs.read()
# Dependencies for the module itself
DEPENDENCIES = [
"numpy>=1.19",
"scipy>=1.4.0",
"pandas>=1.0,<1.2", # limit because of https://github.com/pandas-dev/pandas/issues/39872
"matplotlib>=3.2.0",
"pjlsa>=0.0.14",
"pytimber>=2.8.0",
"tfs-pandas>=2.0",
"generic-parser>=1.0.8",
"parse>=1.15.0",
"omc3@https://github.com/pylhc/omc3/tarball/master",
]
EXTRA_DEPENDENCIES = {
"tech": [
"jpype1<0.8.0,>=0.7.3", # limit from pylsa
# "cmmnbuild-dep-manager/@https://gitlab.cern.ch/scripting-tools/cmmnbuild-dep-manager/repository/archive.tar.gz?ref=master",
"pyjapc@https://gitlab.cern.ch/scripting-tools/pyjapc/repository/archive.tar.gz?ref=master",
],
"test": [
"pytest>=5.2",
"pytest-cov>=2.7",
"pytest-regressions>=2.0.0",
"pytest-mpl>=0.11",
],
"doc": ["sphinx", "sphinx_rtd_theme"],
}
EXTRA_DEPENDENCIES.update(
{"all": [elem for list_ in EXTRA_DEPENDENCIES.values() for elem in list_]}
)
setuptools.setup(
name=ABOUT_PYLHC["__title__"],
version=ABOUT_PYLHC["__version__"],
description=ABOUT_PYLHC["__description__"],
long_description=long_description,
long_description_content_type="text/markdown",
author=ABOUT_PYLHC["__author__"],
author_email=ABOUT_PYLHC["__author_email__"],
url=ABOUT_PYLHC["__url__"],
python_requires=">=3.7",
license=ABOUT_PYLHC["__license__"],
classifiers=[
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Scientific/Engineering :: Physics",
"Topic :: Scientific/Engineering :: Visualization",
],
packages=setuptools.find_packages(exclude=["tests*", "doc"]),
include_package_data=True,
install_requires=DEPENDENCIES,
tests_require=EXTRA_DEPENDENCIES["test"],
extras_require=EXTRA_DEPENDENCIES,
)
|
mit
|
neuronalX/workshop_cellular_automaton
|
game_of_life_numpy.py
|
1
|
1537
|
# -----------------------------------------------------------------------------
# From Numpy to Python
# Copyright (2017) Nicolas P. Rougier - BSD license
# More information at https://github.com/rougier/numpy-book
# -----------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
def update(*args):
global Z, M
N = (Z[0:-2, 0:-2] + Z[0:-2, 1:-1] + Z[0:-2, 2:] +
Z[1:-1, 0:-2] + Z[1:-1, 2:] +
Z[2: , 0:-2] + Z[2: , 1:-1] + Z[2: , 2:])
birth = (N == 3) & (Z[1:-1, 1:-1] == 0)
survive = ((N == 2) | (N == 3)) & (Z[1:-1, 1:-1] == 1)
Z[...] = 0
Z[1:-1, 1:-1][birth | survive] = 1
# Show past activities
M[M>0.25] = 0.25
# M[M>0.25] = 0.0
M *= 0.995
M[Z==1] = 1
# Direct activity
# M[...] = Z
im.set_data(M)
Z = np.random.randint(0, 2, (300, 600))
M = np.zeros(Z.shape)
size = np.array(Z.shape)
dpi = 80.0
figsize = size[1]/float(dpi), size[0]/float(dpi)
fig = plt.figure(figsize=figsize, dpi=dpi)
fig.add_axes([0.0, 0.0, 1.0, 1.0], frameon=False)
im = plt.imshow(M, interpolation='nearest', cmap=plt.cm.gray_r, vmin=0, vmax=1)
plt.xticks([]), plt.yticks([])
animation = FuncAnimation(fig, update, interval=10, frames=2000)
# animation.save('game-of-life.mp4', fps=40, dpi=80, bitrate=-1, codec="libx264",
# extra_args=['-pix_fmt', 'yuv420p'],
# metadata={'artist':'Nicolas P. Rougier'})
plt.show()
|
mit
|
hlin117/scikit-learn
|
sklearn/tree/tests/test_tree.py
|
17
|
64758
|
"""
Testing for the tree module (sklearn.tree).
"""
import copy
import pickle
from functools import partial
from itertools import product
import struct
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import check_random_state
from sklearn.exceptions import NotFittedError
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree._tree import TREE_LEAF
from sklearn.tree.tree import CRITERIA_CLF
from sklearn.tree.tree import CRITERIA_REG
from sklearn import datasets
from sklearn.utils import compute_sample_weight
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", "mae", "friedman_mse")
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
presort=True),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
presort=True),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = ["DecisionTreeClassifier", "DecisionTreeRegressor",
"ExtraTreeClassifier", "ExtraTreeRegressor"]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, n_samples=30, n_features=10)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert_equal(s.node_count, d.node_count,
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
# Check classification on a toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
# Check classification on a weighted toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
# Check regression on a toy dataset.
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
# Check on a XOR problem
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
# Check consistency on dataset iris.
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
# Check consistency on dataset boston house prices.
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
# Predict probabilities using DecisionTreeClassifier.
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
# Check the array representation.
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
# Check when y is pure.
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(reg.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
# Check numerical stability.
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
# Check if variable importance before fit raises ValueError.
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
# Check that gini is equivalent to mse for binary output variable
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
# Check max_features.
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
# Test that it gives proper exception on deficient input.
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(NotFittedError, est.predict_proba, X)
est.fit(X, y)
X2 = [[-2, -1, 1]] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_samples_leaf=.6).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_samples_leaf=0.).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_samples_leaf=3.).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=0.0).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=1.1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=2.5).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_impurity_split=-1.0).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_impurity_decrease=-1.0).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
assert_raises(ValueError, est.apply, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
assert_raises(ValueError, clf.apply, Xt)
# apply before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.apply, T)
def test_min_samples_split():
"""Test min_samples_split parameter"""
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
# test for integer parameter
est = TreeEstimator(min_samples_split=10,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
# count samples on nodes, -1 means it is a leaf
node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1]
assert_greater(np.min(node_samples), 9,
"Failed with {0}".format(name))
# test for float parameter
est = TreeEstimator(min_samples_split=0.2,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
# count samples on nodes, -1 means it is a leaf
node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1]
assert_greater(np.min(node_samples), 9,
"Failed with {0}".format(name))
def test_min_samples_leaf():
# Test if leaves contain more than leaf_count training examples
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
# test integer parameter
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
# test float parameter
est = TreeEstimator(min_samples_leaf=0.1,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
# test case with no weights passed in
total_weight = X.shape[0]
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
# Check on dense input
for name in ALL_TREES:
yield check_min_weight_fraction_leaf, name, "iris"
# Check on sparse input
for name in SPARSE_TREES:
yield check_min_weight_fraction_leaf, name, "multilabel", True
def check_min_weight_fraction_leaf_with_min_samples_leaf(name, datasets,
sparse=False):
"""Test the interaction between min_weight_fraction_leaf and min_samples_leaf
when sample_weights is not provided in fit."""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
total_weight = X.shape[0]
TreeEstimator = ALL_TREES[name]
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 3)):
# test integer min_samples_leaf
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
min_samples_leaf=5,
random_state=0)
est.fit(X, y)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
max((total_weight *
est.min_weight_fraction_leaf), 5),
"Failed with {0} "
"min_weight_fraction_leaf={1}, "
"min_samples_leaf={2}".format(name,
est.min_weight_fraction_leaf,
est.min_samples_leaf))
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 3)):
# test float min_samples_leaf
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
min_samples_leaf=.1,
random_state=0)
est.fit(X, y)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
max((total_weight * est.min_weight_fraction_leaf),
(total_weight * est.min_samples_leaf)),
"Failed with {0} "
"min_weight_fraction_leaf={1}, "
"min_samples_leaf={2}".format(name,
est.min_weight_fraction_leaf,
est.min_samples_leaf))
def test_min_weight_fraction_leaf_with_min_samples_leaf():
# Check on dense input
for name in ALL_TREES:
yield (check_min_weight_fraction_leaf_with_min_samples_leaf,
name, "iris")
# Check on sparse input
for name in SPARSE_TREES:
yield (check_min_weight_fraction_leaf_with_min_samples_leaf,
name, "multilabel", True)
def test_min_impurity_split():
# test if min_impurity_split creates leaves with impurity
# [0, min_impurity_split) when min_samples_leaf = 1 and
# min_samples_split = 2.
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
min_impurity_split = .5
# verify leaf nodes without min_impurity_split less than
# impurity 1e-7
est = TreeEstimator(max_leaf_nodes=max_leaf_nodes,
random_state=0)
assert_true(est.min_impurity_split is None,
"Failed, min_impurity_split = {0} > 1e-7".format(
est.min_impurity_split))
try:
assert_warns(DeprecationWarning, est.fit, X, y)
except AssertionError:
pass
for node in range(est.tree_.node_count):
if (est.tree_.children_left[node] == TREE_LEAF or
est.tree_.children_right[node] == TREE_LEAF):
assert_equal(est.tree_.impurity[node], 0.,
"Failed with {0} "
"min_impurity_split={1}".format(
est.tree_.impurity[node],
est.min_impurity_split))
# verify leaf nodes have impurity [0,min_impurity_split] when using
# min_impurity_split
est = TreeEstimator(max_leaf_nodes=max_leaf_nodes,
min_impurity_split=min_impurity_split,
random_state=0)
assert_warns_message(DeprecationWarning,
"Use the min_impurity_decrease",
est.fit, X, y)
for node in range(est.tree_.node_count):
if (est.tree_.children_left[node] == TREE_LEAF or
est.tree_.children_right[node] == TREE_LEAF):
assert_greater_equal(est.tree_.impurity[node], 0,
"Failed with {0}, "
"min_impurity_split={1}".format(
est.tree_.impurity[node],
est.min_impurity_split))
assert_less_equal(est.tree_.impurity[node], min_impurity_split,
"Failed with {0}, "
"min_impurity_split={1}".format(
est.tree_.impurity[node],
est.min_impurity_split))
def test_min_impurity_decrease():
# test if min_impurity_decrease ensure that a split is made only if
# if the impurity decrease is atleast that value
X, y = datasets.make_classification(n_samples=10000, random_state=42)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
# Check default value of min_impurity_decrease, 1e-7
est1 = TreeEstimator(max_leaf_nodes=max_leaf_nodes, random_state=0)
# Check with explicit value of 0.05
est2 = TreeEstimator(max_leaf_nodes=max_leaf_nodes,
min_impurity_decrease=0.05, random_state=0)
# Check with a much lower value of 0.0001
est3 = TreeEstimator(max_leaf_nodes=max_leaf_nodes,
min_impurity_decrease=0.0001, random_state=0)
# Check with a much lower value of 0.1
est4 = TreeEstimator(max_leaf_nodes=max_leaf_nodes,
min_impurity_decrease=0.1, random_state=0)
for est, expected_decrease in ((est1, 1e-7), (est2, 0.05),
(est3, 0.0001), (est4, 0.1)):
assert_less_equal(est.min_impurity_decrease, expected_decrease,
"Failed, min_impurity_decrease = {0} > {1}"
.format(est.min_impurity_decrease,
expected_decrease))
est.fit(X, y)
for node in range(est.tree_.node_count):
# If current node is a not leaf node, check if the split was
# justified w.r.t the min_impurity_decrease
if est.tree_.children_left[node] != TREE_LEAF:
imp_parent = est.tree_.impurity[node]
wtd_n_node = est.tree_.weighted_n_node_samples[node]
left = est.tree_.children_left[node]
wtd_n_left = est.tree_.weighted_n_node_samples[left]
imp_left = est.tree_.impurity[left]
wtd_imp_left = wtd_n_left * imp_left
right = est.tree_.children_right[node]
wtd_n_right = est.tree_.weighted_n_node_samples[right]
imp_right = est.tree_.impurity[right]
wtd_imp_right = wtd_n_right * imp_right
wtd_avg_left_right_imp = wtd_imp_right + wtd_imp_left
wtd_avg_left_right_imp /= wtd_n_node
fractional_node_weight = (
est.tree_.weighted_n_node_samples[node] / X.shape[0])
actual_decrease = fractional_node_weight * (
imp_parent - wtd_avg_left_right_imp)
assert_greater_equal(actual_decrease, expected_decrease,
"Failed with {0} "
"expected min_impurity_decrease={1}"
.format(actual_decrease,
expected_decrease))
for name, TreeEstimator in ALL_TREES.items():
if "Classifier" in name:
X, y = iris.data, iris.target
else:
X, y = boston.data, boston.target
est = TreeEstimator(random_state=0)
est.fit(X, y)
score = est.score(X, y)
fitted_attribute = dict()
for attribute in ["max_depth", "node_count", "capacity"]:
fitted_attribute[attribute] = getattr(est.tree_, attribute)
serialized_object = pickle.dumps(est)
est2 = pickle.loads(serialized_object)
assert_equal(type(est2), est.__class__)
score2 = est2.score(X, y)
assert_equal(score, score2,
"Failed to generate same score after pickling "
"with {0}".format(name))
for attribute in fitted_attribute:
assert_equal(getattr(est2.tree_, attribute),
fitted_attribute[attribute],
"Failed to generate same attribute {0} after "
"pickling with {1}".format(attribute, name))
def test_multioutput():
# Check estimators on multi-output problems.
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
# Test that n_classes_ and classes_ have proper shape.
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
# Check class rebalancing.
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = compute_sample_weight("balanced", unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
# Check that it works no matter the memory layout
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if not est.presort:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
# Check sample weighting.
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 100)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
# Check sample weighting raises errors.
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def check_class_weights(name):
"""Check class_weights resemble sample_weights behavior."""
TreeClassifier = CLF_TREES[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = TreeClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "auto" which should also have no effect
clf4 = TreeClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in CLF_TREES:
yield check_class_weights, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
TreeClassifier = CLF_TREES[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = TreeClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = TreeClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in CLF_TREES:
yield check_class_weight_errors, name
def test_max_leaf_nodes():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test precedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
# Ensure property arrays' memory stays alive when tree disappears
# non-regression for #2726
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0], [1]], [0, 1]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-3 <= value.flat[0] < 3,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_behaviour_constant_feature_after_splits():
X = np.transpose(np.vstack(([[0, 0, 0, 0, 0, 1, 2, 4, 5, 6, 7]],
np.zeros((4, 11)))))
y = [0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 3]
for name, TreeEstimator in ALL_TREES.items():
# do not check extra random trees
if "ExtraTree" not in name:
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 2)
assert_equal(est.tree_.node_count, 5)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
# Test if the warning for too large inputs is appropriate.
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._utils import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = 8 * struct.calcsize("P")
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "boston"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
def test_sparse_input():
for tree_type, dataset in product(SPARSE_TREES, ("clf_small", "toy",
"digits", "multilabel",
"sparse-pos",
"sparse-neg",
"sparse-mix", "zeros")):
max_depth = 3 if dataset == "digits" else None
yield (check_sparse_input, tree_type, dataset, max_depth)
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
for tree_type, dataset in product(SPARSE_TREES, ["boston", "reg_small"]):
if tree_type in REG_TREES:
yield (check_sparse_input, tree_type, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_parameters():
for tree_type, dataset in product(SPARSE_TREES, ["sparse-pos",
"sparse-neg",
"sparse-mix", "zeros"]):
yield (check_sparse_parameters, tree_type, dataset)
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_criterion():
for tree_type, dataset in product(SPARSE_TREES, ["sparse-pos",
"sparse-neg",
"sparse-mix", "zeros"]):
yield (check_sparse_criterion, tree_type, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert_greater((X_sparse.data == 0.).sum(), 0)
assert_greater((X_sparse_test.data == 0.).sum(), 0)
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.apply(X1), d.apply(X2))
assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1))
assert_array_almost_equal(s.tree_.decision_path(X1).toarray(),
d.tree_.decision_path(X2).toarray())
assert_array_almost_equal(s.decision_path(X1).toarray(),
d.decision_path(X2).toarray())
assert_array_almost_equal(s.decision_path(X1).toarray(),
s.tree_.decision_path(X1).toarray())
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
def test_explicit_sparse_zeros():
for tree_type in SPARSE_TREES:
yield (check_explicit_sparse_zeros, tree_type)
@ignore_warnings
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
assert_raises(ValueError, est.predict, [X])
@ignore_warnings
def test_1d_input():
for name in ALL_TREES:
yield check_raise_error_on_1d_input, name
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
# Private function to keep pretty printing in nose yielded tests
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 1)
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 0)
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
if not TreeEstimator().presort:
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
def test_min_weight_leaf_split_level():
for name in ALL_TREES:
yield check_min_weight_leaf_split_level, name
def check_public_apply(name):
X_small32 = X_small.astype(tree._tree.DTYPE)
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def check_public_apply_sparse(name):
X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE))
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def test_public_apply():
for name in ALL_TREES:
yield (check_public_apply, name)
for name in SPARSE_TREES:
yield (check_public_apply_sparse, name)
def check_presort_sparse(est, X, y):
assert_raises(ValueError, est.fit, X, y)
def test_presort_sparse():
ests = (DecisionTreeClassifier(presort=True),
DecisionTreeRegressor(presort=True))
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for est, sparse_matrix in product(ests, sparse_matrices):
yield check_presort_sparse, est, sparse_matrix(X), y
def test_decision_path_hardcoded():
X = iris.data
y = iris.target
est = DecisionTreeClassifier(random_state=0, max_depth=1).fit(X, y)
node_indicator = est.decision_path(X[:2]).toarray()
assert_array_equal(node_indicator, [[1, 1, 0], [1, 0, 1]])
def check_decision_path(name):
X = iris.data
y = iris.target
n_samples = X.shape[0]
TreeEstimator = ALL_TREES[name]
est = TreeEstimator(random_state=0, max_depth=2)
est.fit(X, y)
node_indicator_csr = est.decision_path(X)
node_indicator = node_indicator_csr.toarray()
assert_equal(node_indicator.shape, (n_samples, est.tree_.node_count))
# Assert that leaves index are correct
leaves = est.apply(X)
leave_indicator = [node_indicator[i, j] for i, j in enumerate(leaves)]
assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))
# Ensure only one leave node per sample
all_leaves = est.tree_.children_left == TREE_LEAF
assert_array_almost_equal(np.dot(node_indicator, all_leaves),
np.ones(shape=n_samples))
# Ensure max depth is consistent with sum of indicator
max_depth = node_indicator.sum(axis=1).max()
assert_less_equal(est.tree_.max_depth, max_depth)
def test_decision_path():
for name in ALL_TREES:
yield (check_decision_path, name)
def check_no_sparse_y_support(name):
X, y = X_multilabel, csr_matrix(y_multilabel)
TreeEstimator = ALL_TREES[name]
assert_raises(TypeError, TreeEstimator(random_state=0).fit, X, y)
def test_no_sparse_y_support():
# Currently we don't support sparse y
for name in ALL_TREES:
yield (check_no_sparse_y_support, name)
def test_mae():
# check MAE criterion produces correct results
# on small toy dataset
dt_mae = DecisionTreeRegressor(random_state=0, criterion="mae",
max_leaf_nodes=2)
dt_mae.fit([[3], [5], [3], [8], [5]], [6, 7, 3, 4, 3])
assert_array_equal(dt_mae.tree_.impurity, [1.4, 1.5, 4.0/3.0])
assert_array_equal(dt_mae.tree_.value.flat, [4, 4.5, 4.0])
dt_mae.fit([[3], [5], [3], [8], [5]], [6, 7, 3, 4, 3],
[0.6, 0.3, 0.1, 1.0, 0.3])
assert_array_equal(dt_mae.tree_.impurity, [7.0/2.3, 3.0/0.7, 4.0/1.6])
assert_array_equal(dt_mae.tree_.value.flat, [4.0, 6.0, 4.0])
def test_criterion_copy():
# Let's check whether copy of our criterion has the same type
# and properties as original
n_outputs = 3
n_classes = np.arange(3, dtype=np.intp)
n_samples = 100
def _pickle_copy(obj):
return pickle.loads(pickle.dumps(obj))
for copy_func in [copy.copy, copy.deepcopy, _pickle_copy]:
for _, typename in CRITERIA_CLF.items():
criteria = typename(n_outputs, n_classes)
result = copy_func(criteria).__reduce__()
typename_, (n_outputs_, n_classes_), _ = result
assert_equal(typename, typename_)
assert_equal(n_outputs, n_outputs_)
assert_array_equal(n_classes, n_classes_)
for _, typename in CRITERIA_REG.items():
criteria = typename(n_outputs, n_samples)
result = copy_func(criteria).__reduce__()
typename_, (n_outputs_, n_samples_), _ = result
assert_equal(typename, typename_)
assert_equal(n_outputs, n_outputs_)
assert_equal(n_samples, n_samples_)
|
bsd-3-clause
|
hrjn/scikit-learn
|
sklearn/datasets/mldata.py
|
31
|
7856
|
"""Automatically download MLdata datasets."""
# Copyright (c) 2011 Pietro Berkes
# License: BSD 3 clause
import os
from os.path import join, exists
import re
import numbers
try:
# Python 2
from urllib2 import HTTPError
from urllib2 import quote
from urllib2 import urlopen
except ImportError:
# Python 3+
from urllib.error import HTTPError
from urllib.parse import quote
from urllib.request import urlopen
import numpy as np
import scipy as sp
from scipy import io
from shutil import copyfileobj
from .base import get_data_home, Bunch
MLDATA_BASE_URL = "http://mldata.org/repository/data/download/matlab/%s"
def mldata_filename(dataname):
"""Convert a raw name for a data set in a mldata.org filename."""
dataname = dataname.lower().replace(' ', '-')
return re.sub(r'[().]', '', dataname)
def fetch_mldata(dataname, target_name='label', data_name='data',
transpose_data=True, data_home=None):
"""Fetch an mldata.org data set
If the file does not exist yet, it is downloaded from mldata.org .
mldata.org does not have an enforced convention for storing data or
naming the columns in a data set. The default behavior of this function
works well with the most common cases:
1) data values are stored in the column 'data', and target values in the
column 'label'
2) alternatively, the first column stores target values, and the second
data values
3) the data array is stored as `n_features x n_samples` , and thus needs
to be transposed to match the `sklearn` standard
Keyword arguments allow to adapt these defaults to specific data sets
(see parameters `target_name`, `data_name`, `transpose_data`, and
the examples below).
mldata.org data sets may have multiple columns, which are stored in the
Bunch object with their original name.
Parameters
----------
dataname :
Name of the data set on mldata.org,
e.g.: "leukemia", "Whistler Daily Snowfall", etc.
The raw name is automatically converted to a mldata.org URL .
target_name : optional, default: 'label'
Name or index of the column containing the target values.
data_name : optional, default: 'data'
Name or index of the column containing the data.
transpose_data : optional, default: True
If True, transpose the downloaded data array.
data_home : optional, default: None
Specify another download and cache folder for the data sets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'DESCR', the full description of the dataset, and
'COL_NAMES', the original names of the dataset columns.
Examples
--------
Load the 'iris' dataset from mldata.org:
>>> from sklearn.datasets.mldata import fetch_mldata
>>> import tempfile
>>> test_data_home = tempfile.mkdtemp()
>>> iris = fetch_mldata('iris', data_home=test_data_home)
>>> iris.target.shape
(150,)
>>> iris.data.shape
(150, 4)
Load the 'leukemia' dataset from mldata.org, which needs to be transposed
to respects the scikit-learn axes convention:
>>> leuk = fetch_mldata('leukemia', transpose_data=True,
... data_home=test_data_home)
>>> leuk.data.shape
(72, 7129)
Load an alternative 'iris' dataset, which has different names for the
columns:
>>> iris2 = fetch_mldata('datasets-UCI iris', target_name=1,
... data_name=0, data_home=test_data_home)
>>> iris3 = fetch_mldata('datasets-UCI iris',
... target_name='class', data_name='double0',
... data_home=test_data_home)
>>> import shutil
>>> shutil.rmtree(test_data_home)
"""
# normalize dataset name
dataname = mldata_filename(dataname)
# check if this data set has been already downloaded
data_home = get_data_home(data_home=data_home)
data_home = join(data_home, 'mldata')
if not exists(data_home):
os.makedirs(data_home)
matlab_name = dataname + '.mat'
filename = join(data_home, matlab_name)
# if the file does not exist, download it
if not exists(filename):
urlname = MLDATA_BASE_URL % quote(dataname)
try:
mldata_url = urlopen(urlname)
except HTTPError as e:
if e.code == 404:
e.msg = "Dataset '%s' not found on mldata.org." % dataname
raise
# store Matlab file
try:
with open(filename, 'w+b') as matlab_file:
copyfileobj(mldata_url, matlab_file)
except:
os.remove(filename)
raise
mldata_url.close()
# load dataset matlab file
with open(filename, 'rb') as matlab_file:
matlab_dict = io.loadmat(matlab_file, struct_as_record=True)
# -- extract data from matlab_dict
# flatten column names
col_names = [str(descr[0])
for descr in matlab_dict['mldata_descr_ordering'][0]]
# if target or data names are indices, transform then into names
if isinstance(target_name, numbers.Integral):
target_name = col_names[target_name]
if isinstance(data_name, numbers.Integral):
data_name = col_names[data_name]
# rules for making sense of the mldata.org data format
# (earlier ones have priority):
# 1) there is only one array => it is "data"
# 2) there are multiple arrays
# a) copy all columns in the bunch, using their column name
# b) if there is a column called `target_name`, set "target" to it,
# otherwise set "target" to first column
# c) if there is a column called `data_name`, set "data" to it,
# otherwise set "data" to second column
dataset = {'DESCR': 'mldata.org dataset: %s' % dataname,
'COL_NAMES': col_names}
# 1) there is only one array => it is considered data
if len(col_names) == 1:
data_name = col_names[0]
dataset['data'] = matlab_dict[data_name]
# 2) there are multiple arrays
else:
for name in col_names:
dataset[name] = matlab_dict[name]
if target_name in col_names:
del dataset[target_name]
dataset['target'] = matlab_dict[target_name]
else:
del dataset[col_names[0]]
dataset['target'] = matlab_dict[col_names[0]]
if data_name in col_names:
del dataset[data_name]
dataset['data'] = matlab_dict[data_name]
else:
del dataset[col_names[1]]
dataset['data'] = matlab_dict[col_names[1]]
# set axes to scikit-learn conventions
if transpose_data:
dataset['data'] = dataset['data'].T
if 'target' in dataset:
if not sp.sparse.issparse(dataset['target']):
dataset['target'] = dataset['target'].squeeze()
return Bunch(**dataset)
# The following is used by test runners to setup the docstring tests fixture
def setup_module(module):
# setup mock urllib2 module to avoid downloading from mldata.org
from sklearn.utils.testing import install_mldata_mock
install_mldata_mock({
'iris': {
'data': np.empty((150, 4)),
'label': np.empty(150),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
'leukemia': {
'data': np.empty((72, 7129)),
},
})
def teardown_module(module):
from sklearn.utils.testing import uninstall_mldata_mock
uninstall_mldata_mock()
|
bsd-3-clause
|
Vrekrer/magdynlab
|
experiments/ZxH_PUC.py
|
1
|
5174
|
# -*- coding: utf-8 -*-
#MxZ_PUC
import numpy
import time
import magdynlab.instruments
import magdynlab.controllers
import magdynlab.data_types
import threading_decorators as ThD
import matplotlib.pyplot as plt
@ThD.gui_safe
def MyPlot(Data):
f = plt.figure('ZxH', (5,4))
if not(f.axes):
plt.subplot()
ax = f.axes[0]
#ax.clear()
scale_factor = 10**-round(numpy.log10(numpy.abs(Data.ylim).max()))
if not(ax.lines):
ax.plot([],[],'b.-')
ax.plot([],[],'r.-')
ax.set_xlim(*Data.xlim)
ax.set_ylim(*(numpy.array(Data.ylim)*scale_factor))
line = ax.lines[-1]
line.set_data(Data.dat[:,0], (Data.dat[:,1]-Data.dat[0,1])*scale_factor)
line = ax.lines[-2]
line.set_data(Data.dat[:,0], (Data.dat[:,2]-Data.dat[0,2])*scale_factor)
ax.set_xlabel('Field (Oe)')
ax.set_ylabel('DV x 10^-%d' % numpy.log10(scale_factor))
ax.grid(True)
f.tight_layout()
f.canvas.draw()
@ThD.gui_safe
def MyPlotFreq(Data):
f = plt.figure('Zxf', (5,4))
if not(f.axes):
plt.subplot()
ax = f.axes[0]
#ax.clear()
scale_factor = 10**-round(numpy.log10(numpy.abs(Data.ylim).max()))
if not(ax.lines):
ax.plot([],[],'b.-')
ax.plot([],[],'r.-')
ax.set_xlim(*Data.xlim)
ax.set_ylim(*(numpy.array(Data.ylim)*scale_factor))
line = ax.lines[-1]
line.set_data(Data.dat[:,3]/1E3, Data.dat[:,1]*scale_factor)
line = ax.lines[-2]
line.set_data(Data.dat[:,3]/1E3, Data.dat[:,2]*scale_factor)
ax.set_xlabel('Freq (kHz)')
ax.set_ylabel('V x 10^-%d' % numpy.log10(scale_factor))
ax.grid(True)
f.tight_layout()
f.canvas.draw()
class ZxH(object):
def __init__(self):
PowerSource = magdynlab.instruments.E3648A()
LockIn = magdynlab.instruments.SRS_SR844()
self.FC = magdynlab.controllers.FieldControlerPUC(PowerSource)
self.VC = magdynlab.controllers.ZControler_PUC(LockIn)
self.Data = magdynlab.data_types.Data2D()
self.Data.reset(n=4)
def _SaveData(self, file_name):
self.Data.save(file_name)
def PlotData(self, i = None):
MyPlot(self.Data)
@ThD.as_thread
def FieldSweep(self, crv = [], file_name = None, freq = 'Auto',
meas_opts = [1, 0.5, 0.1], TC = 'Auto'):
fields = numpy.asarray(crv)
#Initialize data objects
self.Data.reset(n=4)
self.Data.xlim = [fields.min(), fields.max()]
sen = self.VC.LockIn.SEN * 1.5
self.Data.ylim = [-sen/100, sen/100]
if TC != 'Auto':
self.VC.LockIn.TC = TC
if freq != 'Auto':
self.VC.setFreq(freq)
n_pts, iniDelay, measDelay = meas_opts
#Loop for each field
for i, h in enumerate(fields):
self.FC.setField(h)
#time.sleep(0.5)
f, X, Y = self.VC.getFXY(n = n_pts, iniDelay = iniDelay, measDelay = measDelay)
self.Data.addPoint(h, X, Y, f)
MyPlot(self.Data)
ThD.check_stop()
if file_name != None:
self._SaveData(file_name)
self.FC.TurnOff()
self.FC.BEEP()
@ThD.as_thread
def FreqSweep(self, crvf = [], file_name = None, field = 'Auto',
turnFieldOff = True,
meas_opts = [1, 0.5, 0.1], TC = 'Auto'):
freqs = numpy.asarray(crvf)
#Initialize data objects
self.Data.reset(n=4)
self.Data.xlim = [freqs.min()/1E3, freqs.max()/1E3]
sen = self.VC.LockIn.SEN * 1.5
self.Data.ylim = [-sen, sen]
if TC != 'Auto':
self.VC.LockIn.TC = TC
if field != 'Auto':
self.FC.setField(field)
h = self.FC.getField()
n_pts, iniDelay, measDelay = meas_opts
#Loop for each freq
for i, f in enumerate(freqs):
self.VC.setFreq(f)
#time.sleep(0.5)
f, X, Y = self.VC.getFXY(n = n_pts, iniDelay = iniDelay, measDelay = measDelay)
self.Data.addPoint(h, X, Y, f)
MyPlotFreq(self.Data)
ThD.check_stop()
if file_name != None:
self._SaveData(file_name)
if turnFieldOff:
self.FC.TurnOff()
self.FC.BEEP()
def Stop(self, TurnOff = True):
print('Stoping...')
self.FC.BEEP()
self.FieldSweep.stop()
self.FreqSweep.stop()
if self.FieldSweep.thread is not None:
self.FieldSweep.thread.join()
if self.FreqSweep.thread is not None:
self.FreqSweep.thread.join()
print('DONE')
time.sleep(1)
self.FC.BEEP()
time.sleep(0.1)
self.FC.BEEP()
if TurnOff:
print('Turning field OFF')
self.FC.setField(0)
print('DONE')
|
mit
|
JosmanPS/scikit-learn
|
doc/tutorial/text_analytics/skeletons/exercise_01_language_train_model.py
|
254
|
2005
|
"""Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
# TASK: Fit the pipeline on the training set
# TASK: Predict the outcome on the testing set in a variable named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
|
bsd-3-clause
|
OpenSourcePolicyCenter/webapp-public
|
webapp/apps/btax/bubble_plot/bubble_plot_tabs.py
|
2
|
12945
|
import pandas as pd
pd.options.mode.chained_assignment = None
# importing Bokeh libraries
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource, CustomJS, LabelSet
from bokeh.models.widgets import Panel, RadioButtonGroup, Tabs
from bokeh.models import HoverTool, WheelZoomTool, ResetTool, SaveTool
from bokeh.models import NumeralTickFormatter
from bokeh.layouts import gridplot, column
from bokeh.embed import components
from bokeh.resources import CDN
# import styles and callback
from .styles import (PLOT_FORMATS, TITLE_FORMATS, RED, BLUE)
from .controls_callback_script import CONTROLS_CALLBACK_SCRIPT
def bubble_plot_tabs(dataframes):
dataframes = dataframes.copy()
# convert asset dicts to pandas dataframes
base_df = pd.DataFrame.from_dict(dataframes['base_output_by_asset'])
reform_df = pd.DataFrame.from_dict(dataframes['reform_output_by_asset'])
change_df = pd.DataFrame.from_dict(dataframes['changed_output_by_asset'])
list_df = [base_df, change_df, reform_df]
list_string = ['base', 'change', 'reform']
data_sources = {}
for i, df in enumerate(list_df):
# remove data from Intellectual Property, Land, and Inventories
# Categories
df = (df[~df['asset_category']
.isin(['Intellectual Property', 'Land', 'Inventories'])]
.copy())
df = df.dropna()
# define the size DataFrame, if change, use base sizes
if list_string[i] == 'base':
SIZES = list(range(20, 80, 15))
size = pd.qcut(df['assets_c'].values, len(SIZES), labels=SIZES)
size_c = pd.qcut(df['assets_c'].values, len(SIZES), labels=SIZES)
size_nc = pd.qcut(df['assets_nc'].values, len(SIZES), labels=SIZES)
df['size'] = size
df['size_c'] = size_c
df['size_nc'] = size_nc
else:
df['size'] = size
df['size_c'] = size_c
df['size_nc'] = size_nc
# form the two Categories: Equipment and Structures
equipment_df = df[(~df.asset_category.str.contains('Structures')) &
(~df.asset_category.str.contains('Buildings'))]
structure_df = df[(df.asset_category.str.contains('Structures')) |
(df.asset_category.str.contains('Buildings'))]
format_fields = ['metr_c', 'metr_nc', 'metr_c_d', 'metr_nc_d',
'metr_c_e', 'metr_nc_e', 'mettr_c', 'mettr_nc',
'mettr_c_d', 'mettr_nc_d', 'mettr_c_e', 'mettr_nc_e',
'rho_c', 'rho_nc', 'rho_c_d', 'rho_nc_d', 'rho_c_e',
'rho_nc_e', 'z_c', 'z_nc', 'z_c_d', 'z_nc_d', 'z_c_e',
'z_nc_e']
# Make short category
make_short = {
'Instruments and Communications Equipment': 'Instruments and Communications',
'Office and Residential Equipment': 'Office and Residential',
'Other Equipment': 'Other',
'Transportation Equipment': 'Transportation',
'Other Industrial Equipment': 'Other Industrial',
'Nonresidential Buildings': 'Nonresidential Bldgs',
'Residential Buildings': 'Residential Bldgs',
'Mining and Drilling Structures': 'Mining and Drilling',
'Other Structures': 'Other',
'Computers and Software': 'Computers and Software',
'Industrial Machinery': 'Industrial Machinery'}
equipment_df['short_category'] = equipment_df['asset_category'].map(
make_short)
structure_df['short_category'] = structure_df['asset_category'].map(
make_short)
# Add the Reform and the Baseline to Equipment Asset
for f in format_fields:
equipment_copy = equipment_df.copy()
equipment_copy['rate'] = equipment_copy[f]
equipment_copy['hover'] = equipment_copy.apply(
lambda x: "{0:.1f}%".format(x[f] * 100), axis=1)
simple_equipment_copy = equipment_copy.filter(
items=['size',
'size_c',
'size_nc',
'rate',
'hover',
'short_category',
'Asset'])
data_sources[list_string[i] + '_equipment_' +
f] = ColumnDataSource(simple_equipment_copy)
# Add the Reform and the Baseline to Structures Asset
for f in format_fields:
structure_copy = structure_df.copy()
structure_copy['rate'] = structure_copy[f]
structure_copy['hover'] = structure_copy.apply(
lambda x: "{0:.1f}%".format(x[f] * 100), axis=1)
simple_structure_copy = structure_copy.filter(
items=['size',
'size_c',
'size_nc',
'rate',
'hover',
'short_category',
'Asset'])
data_sources[list_string[i] + '_structure_' +
f] = ColumnDataSource(simple_structure_copy)
# Create initial data sources to plot on load
if list_string[i] == 'base':
equipment_copy = equipment_df.copy()
equipment_copy['rate'] = equipment_copy['mettr_c']
equipment_copy['hover'] = equipment_copy.apply(
lambda x: "{0:.1f}%".format(x['mettr_c'] * 100), axis=1)
simple_equipment_copy = equipment_copy.filter(
items=['size',
'size_c',
'size_nc',
'rate',
'hover',
'short_category',
'Asset'])
data_sources['equip_source'] = ColumnDataSource(
simple_equipment_copy)
structure_copy = structure_df.copy()
structure_copy['rate'] = structure_copy['mettr_c']
structure_copy['hover'] = structure_copy.apply(
lambda x: "{0:.1f}%".format(x['mettr_c'] * 100), axis=1)
simple_structure_copy = structure_copy.filter(
items=['size',
'size_c',
'size_nc',
'rate',
'hover',
'short_category',
'Asset'])
data_sources['struc_source'] = ColumnDataSource(
simple_structure_copy)
# Define categories for Equipments assets
equipment_assets = ['Computers and Software',
'Instruments and Communications',
'Office and Residential',
'Transportation',
'Industrial Machinery',
'Other Industrial',
'Other']
# Define categories for Structures assets
structure_assets = ['Residential Bldgs',
'Nonresidential Bldgs',
'Mining and Drilling',
'Other']
# Equipment plot
p = figure(plot_height=540,
plot_width=990,
y_range=list(reversed(equipment_assets)),
tools='hover',
background_fill_alpha=0,
title='Marginal Effective Total Tax Rates on '
'Corporate Investments in Equipment')
p.title.align = 'center'
p.title.text_color = '#6B6B73'
hover = p.select(dict(type=HoverTool))
hover.tooltips = [('Asset', ' @Asset (@hover)')]
p.xaxis.axis_label = "Marginal effective total tax rate"
p.xaxis[0].formatter = NumeralTickFormatter(format="0.1%")
p.toolbar_location = None
p.min_border_right = 5
p.outline_line_width = 5
p.border_fill_alpha = 0
p.xaxis.major_tick_line_color = "firebrick"
p.xaxis.major_tick_line_width = 3
p.xaxis.minor_tick_line_color = "orange"
p.outline_line_width = 1
p.outline_line_alpha = 1
p.outline_line_color = "black"
p.circle(x='rate',
y='short_category',
color=BLUE,
size='size',
line_color="#333333",
fill_alpha=.4,
source=data_sources['equip_source'],
alpha=.4
)
# Style the tools
p.add_tools(WheelZoomTool(), ResetTool(), SaveTool())
p.toolbar_location = "right"
p.toolbar.logo = None
# Define and add a legend
legend_cds = ColumnDataSource({'size': SIZES,
'label': ['<$20B', '', '', '<$1T'],
'x': [0, .15, .35, .6]})
p_legend = figure(height=150, width=480, x_range=(-0.075, .75),
title='Asset Amount')
p_legend.circle(y=None, x='x', size='size', source=legend_cds, color=BLUE,
fill_alpha=.4, alpha=.4, line_color="#333333")
l = LabelSet(y=None, x='x', text='label', x_offset=-20, y_offset=-50,
source=legend_cds)
p_legend.add_layout(l)
p_legend.axis.visible = False
p_legend.grid.grid_line_color = None
p_legend.toolbar.active_drag = None
data_sources['equip_plot'] = p
# Structures plot
p2 = figure(plot_height=540,
plot_width=990,
y_range=list(reversed(structure_assets)),
tools='hover',
background_fill_alpha=0,
title='Marginal Effective Total Tax Rates on '
'Corporate Investments in Structures')
p2.title.align = 'center'
p2.title.text_color = '#6B6B73'
hover = p2.select(dict(type=HoverTool))
hover.tooltips = [('Asset', ' @Asset (@hover)')]
p2.xaxis.axis_label = "Marginal effective total tax rate"
p2.xaxis[0].formatter = NumeralTickFormatter(format="0.1%")
p2.toolbar_location = None
p2.min_border_right = 5
p2.outline_line_width = 0
p2.border_fill_alpha = 0
p2.xaxis.major_tick_line_color = "firebrick"
p2.xaxis.major_tick_line_width = 3
p2.xaxis.minor_tick_line_color = "orange"
p2.circle(x='rate',
y='short_category',
color=RED,
size='size',
line_color="#333333",
fill_alpha=.4,
source=data_sources['struc_source'],
alpha=.4)
p2.outline_line_width = 1
p2.outline_line_alpha = 1
p2.outline_line_color = "black"
# Style the tools
p2.add_tools(WheelZoomTool(), ResetTool(), SaveTool())
p2.toolbar_location = "right"
p2.toolbar.logo = None
# Define and add a legend
p2_legend = figure(height=150, width=380, x_range=(-0.075, .75),
title='Asset Amount')
p2_legend.circle(y=None, x='x', size='size', source=legend_cds, color=RED,
fill_alpha=.4, alpha=.4, line_color="#333333")
l2 = LabelSet(y=None, x='x', text='label', x_offset=-20, y_offset=-50,
source=legend_cds)
p2_legend.add_layout(l2)
p2_legend.axis.visible = False
p2_legend.grid.grid_line_color = None
p2_legend.toolbar.active_drag = None
data_sources['struc_plot'] = p2
# add buttons
controls_callback = CustomJS(args=data_sources,
code=CONTROLS_CALLBACK_SCRIPT)
c_nc_buttons = RadioButtonGroup(labels=['Corporate', 'Noncorporate'],
active=0, callback=controls_callback)
controls_callback.args['c_nc_buttons'] = c_nc_buttons
format_buttons = RadioButtonGroup(labels=['Baseline', 'Reform', 'Change'],
active=0, callback=controls_callback)
controls_callback.args['format_buttons'] = format_buttons
interest_buttons = RadioButtonGroup(
labels=['METTR',
'METR',
'Cost of Capital',
'Depreciation'],
active=0,
width=700,
callback=controls_callback)
controls_callback.args['interest_buttons'] = interest_buttons
type_buttons = RadioButtonGroup(
labels=['Typically Financed',
'Equity Financed',
'Debt Financed'],
active=0,
width=700,
callback=controls_callback)
controls_callback.args['type_buttons'] = type_buttons
# Create Tabs
tab = Panel(child=column([p, p_legend]), title='Equipment')
tab2 = Panel(child=column([p2, p2_legend]), title='Structures')
tabs = Tabs(tabs=[tab, tab2])
layout = gridplot(
children=[[tabs],
[c_nc_buttons, interest_buttons],
[format_buttons, type_buttons]]
)
# Create components
js, div = components(layout)
cdn_js = CDN.js_files[0]
cdn_css = CDN.css_files[0]
widget_js = CDN.js_files[1]
widget_css = CDN.css_files[1]
return js, div, cdn_js, cdn_css, widget_js, widget_css
|
mit
|
trungnt13/scikit-learn
|
examples/cluster/plot_mean_shift.py
|
351
|
1793
|
"""
=============================================
A demo of the mean-shift clustering algorithm
=============================================
Reference:
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.datasets.samples_generator import make_blobs
###############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, _ = make_blobs(n_samples=10000, centers=centers, cluster_std=0.6)
###############################################################################
# Compute clustering with MeanShift
# The following bandwidth can be automatically detected using
bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
###############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(X[my_members, 0], X[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
|
bsd-3-clause
|
Rambatino/CHAID
|
CHAID/__main__.py
|
1
|
4726
|
"""
This package provides a python implementation of the Chi-Squared Automatic
Inference Detection (CHAID) decision tree.
"""
import argparse
from .tree import Tree
import pandas as pd
import numpy as np
def main():
"""Entry point when module is run from command line"""
parser = argparse.ArgumentParser(description='Run the chaid algorithm on a'
' csv/sav file.')
parser.add_argument('file')
parser.add_argument('dependent_variable', nargs=1)
parser.add_argument('--dependent-variable-type', type=str)
var = parser.add_argument_group('Independent Variable Specification')
var.add_argument('nominal_variables', nargs='*', help='The names of '
'independent variables to use that have no intrinsic '
'order to them')
var.add_argument('--ordinal-variables', type=str, nargs='*',
help='The names of independent variables to use that '
'have an intrinsic order but a finite amount of states')
parser.add_argument('--weights', type=str, help='Name of weight column')
parser.add_argument('--max-depth', type=int, help='Max depth of generated '
'tree')
parser.add_argument('--min-parent-node-size', type=int, help='Minimum number of '
'samples required to split the parent node')
parser.add_argument('--min-child-node-size', type=int, help='Minimum number of '
'samples required to split the child node')
parser.add_argument('--alpha-merge', type=float, help='Alpha Merge')
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument('--classify', action='store_true', help='Add column to'
' input with the node id of the node that that '
'respondent has been placed into')
group.add_argument('--predict', action='store_true', help='Add column to '
'input with the value of the dependent variable that '
'the majority of respondents in that node selected')
group.add_argument('--rules', action='store_true')
group.add_argument('--export', action='store_true', help='Whether to export the chart to pdf/dot')
group.add_argument('--export-path', type=str, help='Path to store chart output')
group.add_argument('--exhaustive', action='store_true', help='To implement exhustive CHAID')
nspace = parser.parse_args()
if nspace.file[-4:] == '.csv':
data = pd.read_csv(nspace.file)
elif nspace.file[-4:] == '.sav':
import savReaderWriter as spss
raw_data = spss.SavReader(nspace.file, returnHeader=True)
raw_data_list = list(raw_data)
data = pd.DataFrame(raw_data_list)
data = data.rename(columns=data.loc[0]).iloc[1:]
else:
print('Unknown file type')
exit(1)
config = {}
if nspace.max_depth:
config['max_depth'] = nspace.max_depth
if nspace.alpha_merge:
config['alpha_merge'] = nspace.alpha_merge
if nspace.min_parent_node_size:
config['min_parent_node_size'] = nspace.min_parent_node_size
if nspace.min_child_node_size:
config['min_child_node_size'] = nspace.min_child_node_size
if nspace.weights:
config['weight'] = nspace.weights
if nspace.dependent_variable_type:
config['dep_variable_type'] = nspace.dependent_variable_type
if nspace.exhaustive:
config['is_exhaustive'] = nspace.exhaustive
ordinal = nspace.ordinal_variables or []
nominal = nspace.nominal_variables or []
independent_variables = nominal + ordinal
types = dict(zip(nominal + ordinal, ['nominal'] * len(nominal) + ['ordinal'] * len(ordinal)))
if len(independent_variables) == 0:
print('Need to provide at least one independent variable')
exit(1)
tree = Tree.from_pandas_df(data, types, nspace.dependent_variable[0],
**config)
if nspace.export or nspace.export_path:
tree.render(nspace.export_path, True)
if nspace.classify:
predictions = pd.Series(tree.node_predictions())
predictions.name = 'node_id'
data = pd.concat([data, predictions], axis=1)
print(data.to_csv())
elif nspace.predict:
predictions = pd.Series(tree.model_predictions())
predictions.name = 'predicted'
data = pd.concat([data, predictions], axis=1)
print(data.to_csv())
elif nspace.rules:
print('\n'.join(str(x) for x in tree.classification_rules()))
else:
tree.print_tree()
print('Accuracy: ', tree.accuracy())
if __name__ == "__main__":
main()
|
apache-2.0
|
dmitriz/zipline
|
tests/utils/test_factory.py
|
34
|
2175
|
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
import pandas as pd
import pytz
import numpy as np
from zipline.utils.factory import (load_from_yahoo,
load_bars_from_yahoo)
class TestFactory(TestCase):
def test_load_from_yahoo(self):
stocks = ['AAPL', 'GE']
start = pd.datetime(1993, 1, 1, 0, 0, 0, 0, pytz.utc)
end = pd.datetime(2002, 1, 1, 0, 0, 0, 0, pytz.utc)
data = load_from_yahoo(stocks=stocks, start=start, end=end)
assert data.index[0] == pd.Timestamp('1993-01-04 00:00:00+0000')
assert data.index[-1] == pd.Timestamp('2001-12-31 00:00:00+0000')
for stock in stocks:
assert stock in data.columns
np.testing.assert_raises(
AssertionError, load_from_yahoo, stocks=stocks,
start=end, end=start
)
def test_load_bars_from_yahoo(self):
stocks = ['AAPL', 'GE']
start = pd.datetime(1993, 1, 1, 0, 0, 0, 0, pytz.utc)
end = pd.datetime(2002, 1, 1, 0, 0, 0, 0, pytz.utc)
data = load_bars_from_yahoo(stocks=stocks, start=start, end=end)
assert data.major_axis[0] == pd.Timestamp('1993-01-04 00:00:00+0000')
assert data.major_axis[-1] == pd.Timestamp('2001-12-31 00:00:00+0000')
for stock in stocks:
assert stock in data.items
for ohlc in ['open', 'high', 'low', 'close', 'volume', 'price']:
assert ohlc in data.minor_axis
np.testing.assert_raises(
AssertionError, load_bars_from_yahoo, stocks=stocks,
start=end, end=start
)
|
apache-2.0
|
clemkoa/scikit-learn
|
examples/calibration/plot_calibration_curve.py
|
113
|
5904
|
"""
==============================
Probability Calibration curves
==============================
When performing classification one often wants to predict not only the class
label, but also the associated probability. This probability gives some
kind of confidence on the prediction. This example demonstrates how to display
how well calibrated the predicted probabilities are and how to calibrate an
uncalibrated classifier.
The experiment is performed on an artificial dataset for binary classification
with 100.000 samples (1.000 of them are used for model fitting) with 20
features. Of the 20 features, only 2 are informative and 10 are redundant. The
first figure shows the estimated probabilities obtained with logistic
regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic
calibration and sigmoid calibration. The calibration performance is evaluated
with Brier score, reported in the legend (the smaller the better). One can
observe here that logistic regression is well calibrated while raw Gaussian
naive Bayes performs very badly. This is because of the redundant features
which violate the assumption of feature-independence and result in an overly
confident classifier, which is indicated by the typical transposed-sigmoid
curve.
Calibration of the probabilities of Gaussian naive Bayes with isotonic
regression can fix this issue as can be seen from the nearly diagonal
calibration curve. Sigmoid calibration also improves the brier score slightly,
albeit not as strongly as the non-parametric isotonic regression. This can be
attributed to the fact that we have plenty of calibration data such that the
greater flexibility of the non-parametric model can be exploited.
The second figure shows the calibration curve of a linear support-vector
classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian
naive Bayes: the calibration curve has a sigmoid curve, which is typical for
an under-confident classifier. In the case of LinearSVC, this is caused by the
margin property of the hinge loss, which lets the model focus on hard samples
that are close to the decision boundary (the support vectors).
Both kinds of calibration can fix this issue and yield nearly identical
results. This shows that sigmoid calibration can deal with situations where
the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC)
but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes).
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,
f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.model_selection import train_test_split
# Create dataset of classification task with many redundant and few
# informative features
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=10,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99,
random_state=42)
def plot_calibration_curve(est, name, fig_index):
"""Plot calibration curve for est w/o and with calibration. """
# Calibrated with isotonic calibration
isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')
# Calibrated with sigmoid calibration
sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')
# Logistic regression with no calibration as baseline
lr = LogisticRegression(C=1., solver='lbfgs')
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(est, name),
(isotonic, name + ' + Isotonic'),
(sigmoid, name + ' + Sigmoid')]:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())
print("%s:" % name)
print("\tBrier: %1.3f" % (clf_score))
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s (%1.3f)" % (name, clf_score))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# Plot calibration curve for Gaussian Naive Bayes
plot_calibration_curve(GaussianNB(), "Naive Bayes", 1)
# Plot calibration curve for Linear SVC
plot_calibration_curve(LinearSVC(), "SVC", 2)
plt.show()
|
bsd-3-clause
|
shakamunyi/tensorflow
|
tensorflow/examples/learn/text_classification_cnn.py
|
29
|
5677
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for CNN-based text classification with DBpedia data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
from sklearn import metrics
import tensorflow as tf
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
EMBEDDING_SIZE = 20
N_FILTERS = 10
WINDOW_SIZE = 20
FILTER_SHAPE1 = [WINDOW_SIZE, EMBEDDING_SIZE]
FILTER_SHAPE2 = [WINDOW_SIZE, N_FILTERS]
POOLING_WINDOW = 4
POOLING_STRIDE = 2
n_words = 0
MAX_LABEL = 15
WORDS_FEATURE = 'words' # Name of the input words feature.
def cnn_model(features, labels, mode):
"""2 layer ConvNet to predict from sequence of words to a class."""
# Convert indexes of words into embeddings.
# This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then
# maps word indexes of the sequence into [batch_size, sequence_length,
# EMBEDDING_SIZE].
word_vectors = tf.contrib.layers.embed_sequence(
features[WORDS_FEATURE], vocab_size=n_words, embed_dim=EMBEDDING_SIZE)
word_vectors = tf.expand_dims(word_vectors, 3)
with tf.variable_scope('CNN_Layer1'):
# Apply Convolution filtering on input sequence.
conv1 = tf.layers.conv2d(
word_vectors,
filters=N_FILTERS,
kernel_size=FILTER_SHAPE1,
padding='VALID',
# Add a ReLU for non linearity.
activation=tf.nn.relu)
# Max pooling across output of Convolution+Relu.
pool1 = tf.layers.max_pooling2d(
conv1,
pool_size=POOLING_WINDOW,
strides=POOLING_STRIDE,
padding='SAME')
# Transpose matrix so that n_filters from convolution becomes width.
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# Second level of convolution filtering.
conv2 = tf.layers.conv2d(
pool1,
filters=N_FILTERS,
kernel_size=FILTER_SHAPE2,
padding='VALID')
# Max across each filter to get useful features for classification.
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
# Apply regular WX + B and classification.
logits = tf.layers.dense(pool2, MAX_LABEL, activation=None)
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
})
onehot_labels = tf.one_hot(labels, MAX_LABEL, 1, 0)
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
global n_words
# Prepare training and testing data
dbpedia = tf.contrib.learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor(
MAX_DOCUMENT_LENGTH)
x_train = np.array(list(vocab_processor.fit_transform(x_train)))
x_test = np.array(list(vocab_processor.transform(x_test)))
n_words = len(vocab_processor.vocabulary_)
print('Total words: %d' % n_words)
# Build model
classifier = tf.estimator.Estimator(model_fn=cnn_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={WORDS_FEATURE: x_train},
y=y_train,
batch_size=len(x_train),
num_epochs=None,
shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
# Predict.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={WORDS_FEATURE: x_test},
y=y_test,
num_epochs=1,
shuffle=False)
predictions = classifier.predict(input_fn=test_input_fn)
y_predicted = np.array(list(p['class'] for p in predictions))
y_predicted = y_predicted.reshape(np.array(y_test).shape)
# Score with sklearn.
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy (sklearn): {0:f}'.format(score))
# Score with tensorflow.
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
apache-2.0
|
SheffieldML/GPy
|
GPy/plotting/gpy_plot/plot_util.py
|
2
|
15084
|
#===============================================================================
# Copyright (c) 2012-2015, GPy authors (see AUTHORS.txt).
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of GPy nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
import numpy as np
from scipy import sparse
import itertools
from ...models import WarpedGP
def in_ipynb():
try:
cfg = get_ipython().config
return 'IPKernelApp' in cfg
except NameError:
return False
def find_best_layout_for_subplots(num_subplots):
r, c = 1, 1
while (r*c) < num_subplots:
if (c==(r+1)) or (r==c):
c += 1
elif c==(r+2):
r += 1
c -= 1
return r, c
def helper_predict_with_model(self, Xgrid, plot_raw, apply_link, percentiles, which_data_ycols, predict_kw, samples=0):
"""
Make the right decisions for prediction with a model
based on the standard arguments of plotting.
This is quite complex and will take a while to understand,
so do not change anything in here lightly!!!
"""
# Put some standards into the predict_kw so that prediction is done automatically:
if predict_kw is None:
predict_kw = {}
if 'likelihood' not in predict_kw:
if plot_raw:
from ...likelihoods import Gaussian
from ...likelihoods.link_functions import Identity
lik = Gaussian(Identity(), 1e-9) # Make the likelihood not add any noise
else:
lik = None
predict_kw['likelihood'] = lik
if 'Y_metadata' not in predict_kw:
predict_kw['Y_metadata'] = {}
if 'output_index' not in predict_kw['Y_metadata']:
predict_kw['Y_metadata']['output_index'] = Xgrid[:,-1:].astype(np.int)
mu, _ = self.predict(Xgrid, **predict_kw)
if percentiles is not None:
percentiles = self.predict_quantiles(Xgrid, quantiles=percentiles, **predict_kw)
else: percentiles = []
if samples > 0:
fsamples = self.posterior_samples(Xgrid, size=samples, **predict_kw)
fsamples = fsamples[:, which_data_ycols, :]
else:
fsamples = None
# Filter out the ycolums which we want to plot:
retmu = mu[:, which_data_ycols]
percs = [p[:, which_data_ycols] for p in percentiles]
if plot_raw and apply_link:
for i in range(len(which_data_ycols)):
retmu[:, [i]] = self.likelihood.gp_link.transf(mu[:, [i]])
for perc in percs:
perc[:, [i]] = self.likelihood.gp_link.transf(perc[:, [i]])
if fsamples is not None:
for s in range(fsamples.shape[-1]):
fsamples[:, i, s] = self.likelihood.gp_link.transf(fsamples[:, i, s])
return retmu, percs, fsamples
def helper_for_plot_data(self, X, plot_limits, visible_dims, fixed_inputs, resolution):
"""
Figure out the data, free_dims and create an Xgrid for
the prediction.
This is only implemented for two dimensions for now!
"""
#work out what the inputs are for plotting (1D or 2D)
if fixed_inputs is None:
fixed_inputs = []
fixed_dims = get_fixed_dims(fixed_inputs)
free_dims = get_free_dims(self, visible_dims, fixed_dims)
if len(free_dims) == 1:
#define the frame on which to plot
resolution = resolution or 200
Xnew, xmin, xmax = x_frame1D(X[:,free_dims], plot_limits=plot_limits, resolution=resolution)
Xgrid = np.zeros((Xnew.shape[0],self.input_dim))
Xgrid[:,free_dims] = Xnew
for i,v in fixed_inputs:
Xgrid[:,i] = v
x = Xgrid
y = None
elif len(free_dims) == 2:
#define the frame for plotting on
resolution = resolution or 35
Xnew, x, y, xmin, xmax = x_frame2D(X[:,free_dims], plot_limits, resolution)
Xgrid = np.zeros((Xnew.shape[0], self.input_dim))
Xgrid[:,free_dims] = Xnew
#xmin = Xgrid.min(0)[free_dims]
#xmax = Xgrid.max(0)[free_dims]
for i,v in fixed_inputs:
Xgrid[:,i] = v
else:
raise TypeError("calculated free_dims {} from visible_dims {} and fixed_dims {} is neither 1D nor 2D".format(free_dims, visible_dims, fixed_dims))
return fixed_dims, free_dims, Xgrid, x, y, xmin, xmax, resolution
def scatter_label_generator(labels, X, visible_dims, marker=None):
ulabels = []
for lab in labels:
if not lab in ulabels:
ulabels.append(lab)
if marker is not None:
marker = itertools.cycle(list(marker))
else:
m = None
try:
input_1, input_2, input_3 = visible_dims
except:
try:
# tuple or int?
input_1, input_2 = visible_dims
input_3 = None
except:
input_1 = visible_dims
input_2 = input_3 = None
for ul in ulabels:
from numbers import Number
if isinstance(ul, str):
try:
this_label = unicode(ul)
except NameError:
#python3
this_label = ul
elif isinstance(ul, Number):
this_label = 'class {!s}'.format(ul)
else:
this_label = ul
if marker is not None:
m = next(marker)
index = np.nonzero(labels == ul)[0]
if input_2 is None:
x = X[index, input_1]
y = np.zeros(index.size)
z = None
elif input_3 is None:
x = X[index, input_1]
y = X[index, input_2]
z = None
else:
x = X[index, input_1]
y = X[index, input_2]
z = X[index, input_3]
yield x, y, z, this_label, index, m
def subsample_X(X, labels, num_samples=1000):
"""
Stratified subsampling if labels are given.
This means due to rounding errors you might get a little differences between the
num_samples and the returned subsampled X.
"""
if X.shape[0] > num_samples:
print("Warning: subsampling X, as it has more samples then {}. X.shape={!s}".format(int(num_samples), X.shape))
if labels is not None:
subsample = []
for _, _, _, _, index, _ in scatter_label_generator(labels, X, (0, None, None)):
subsample.append(np.random.choice(index, size=max(2, int(index.size*(float(num_samples)/X.shape[0]))), replace=False))
subsample = np.hstack(subsample)
else:
subsample = np.random.choice(X.shape[0], size=1000, replace=False)
X = X[subsample]
labels = labels[subsample]
#=======================================================================
# <<<WORK IN PROGRESS>>>
# <<<DO NOT DELETE>>>
# plt.close('all')
# fig, ax = plt.subplots(1,1)
# from GPy.plotting.matplot_dep.dim_reduction_plots import most_significant_input_dimensions
# import matplotlib.patches as mpatches
# i1, i2 = most_significant_input_dimensions(m, None)
# xmin, xmax = 100, -100
# ymin, ymax = 100, -100
# legend_handles = []
#
# X = m.X.mean[:, [i1, i2]]
# X = m.X.variance[:, [i1, i2]]
#
# xmin = X[:,0].min(); xmax = X[:,0].max()
# ymin = X[:,1].min(); ymax = X[:,1].max()
# range_ = [[xmin, xmax], [ymin, ymax]]
# ul = np.unique(labels)
#
# for i, l in enumerate(ul):
# #cdict = dict(red =[(0., colors[i][0], colors[i][0]), (1., colors[i][0], colors[i][0])],
# # green=[(0., colors[i][0], colors[i][1]), (1., colors[i][1], colors[i][1])],
# # blue =[(0., colors[i][0], colors[i][2]), (1., colors[i][2], colors[i][2])],
# # alpha=[(0., 0., .0), (.5, .5, .5), (1., .5, .5)])
# #cmap = LinearSegmentedColormap('{}'.format(l), cdict)
# cmap = LinearSegmentedColormap.from_list('cmap_{}'.format(str(l)), [colors[i], colors[i]], 255)
# cmap._init()
# #alphas = .5*(1+scipy.special.erf(np.linspace(-2,2, cmap.N+3)))#np.log(np.linspace(np.exp(0), np.exp(1.), cmap.N+3))
# alphas = (scipy.special.erf(np.linspace(0,2.4, cmap.N+3)))#np.log(np.linspace(np.exp(0), np.exp(1.), cmap.N+3))
# cmap._lut[:, -1] = alphas
# print l
# x, y = X[labels==l].T
#
# heatmap, xedges, yedges = np.histogram2d(x, y, bins=300, range=range_)
# #heatmap, xedges, yedges = np.histogram2d(x, y, bins=100)
#
# im = ax.imshow(heatmap, extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]], cmap=cmap, aspect='auto', interpolation='nearest', label=str(l))
# legend_handles.append(mpatches.Patch(color=colors[i], label=l))
# ax.set_xlim(xmin, xmax)
# ax.set_ylim(ymin, ymax)
# plt.legend(legend_handles, [l.get_label() for l in legend_handles])
# plt.draw()
# plt.show()
#=======================================================================
return X, labels
def update_not_existing_kwargs(to_update, update_from):
"""
This function updates the keyword aguments from update_from in
to_update, only if the keys are not set in to_update.
This is used for updated kwargs from the default dicts.
"""
if to_update is None:
to_update = {}
to_update.update({k:v for k,v in update_from.items() if k not in to_update})
return to_update
def get_x_y_var(model):
"""
Either the the data from a model as
X the inputs,
X_variance the variance of the inputs ([default: None])
and Y the outputs
If (X, X_variance, Y) is given, this just returns.
:returns: (X, X_variance, Y)
"""
# model given
if hasattr(model, 'has_uncertain_inputs') and model.has_uncertain_inputs():
X = model.X.mean.values
X_variance = model.X.variance.values
else:
try:
X = model.X.values
except AttributeError:
X = model.X
X_variance = None
try:
Y = model.Y.values
except AttributeError:
Y = model.Y
if isinstance(model, WarpedGP) and not model.predict_in_warped_space:
Y = model.Y_normalized
if sparse.issparse(Y): Y = Y.todense().view(np.ndarray)
return X, X_variance, Y
def get_free_dims(model, visible_dims, fixed_dims):
"""
work out what the inputs are for plotting (1D or 2D)
The visible dimensions are the dimensions, which are visible.
the fixed_dims are the fixed dimensions for this.
The free_dims are then the visible dims without the fixed dims.
"""
if visible_dims is None:
visible_dims = np.arange(model.input_dim)
dims = np.asanyarray(visible_dims)
if fixed_dims is not None:
dims = [dim for dim in dims if dim not in fixed_dims]
return np.asanyarray([dim for dim in dims if dim is not None])
def get_fixed_dims(fixed_inputs):
"""
Work out the fixed dimensions from the fixed_inputs list of tuples.
"""
return np.array([i for i,_ in fixed_inputs])
def get_which_data_ycols(model, which_data_ycols):
"""
Helper to get the data columns to plot.
"""
if which_data_ycols == 'all' or which_data_ycols is None:
return np.arange(model.output_dim)
return which_data_ycols
def get_which_data_rows(model, which_data_rows):
"""
Helper to get the data rows to plot.
"""
if which_data_rows == 'all' or which_data_rows is None:
return slice(None)
return which_data_rows
def x_frame1D(X,plot_limits=None,resolution=None):
"""
Internal helper function for making plots, returns a set of input values to plot as well as lower and upper limits
"""
assert X.shape[1] ==1, "x_frame1D is defined for one-dimensional inputs"
if plot_limits is None:
from GPy.core.parameterization.variational import VariationalPosterior
if isinstance(X, VariationalPosterior):
xmin,xmax = X.mean.min(0),X.mean.max(0)
else:
xmin,xmax = X.min(0),X.max(0)
xmin, xmax = xmin-0.25*(xmax-xmin), xmax+0.25*(xmax-xmin)
elif len(plot_limits) == 2:
xmin, xmax = map(np.atleast_1d, plot_limits)
else:
raise ValueError("Bad limits for plotting")
Xnew = np.linspace(float(xmin),float(xmax),int(resolution) or 200)[:,None]
return Xnew, xmin, xmax
def x_frame2D(X,plot_limits=None,resolution=None):
"""
Internal helper function for making plots, returns a set of input values to plot as well as lower and upper limits
"""
assert X.shape[1]==2, "x_frame2D is defined for two-dimensional inputs"
if plot_limits is None:
xmin, xmax = X.min(0), X.max(0)
xmin, xmax = xmin-0.075*(xmax-xmin), xmax+0.075*(xmax-xmin)
elif len(plot_limits) == 2:
xmin, xmax = plot_limits
try:
xmin = xmin[0], xmin[1]
except:
# only one limit given, copy over to other lim
xmin = [plot_limits[0], plot_limits[0]]
xmax = [plot_limits[1], plot_limits[1]]
elif len(plot_limits) == 4:
xmin, xmax = (plot_limits[0], plot_limits[2]), (plot_limits[1], plot_limits[3])
else:
raise ValueError("Bad limits for plotting")
resolution = resolution or 50
xx, yy = np.mgrid[xmin[0]:xmax[0]:1j*resolution,xmin[1]:xmax[1]:1j*resolution]
Xnew = np.c_[xx.flat, yy.flat]
return Xnew, xx, yy, xmin, xmax
|
bsd-3-clause
|
mjgrav2001/scikit-learn
|
sklearn/linear_model/stochastic_gradient.py
|
130
|
50966
|
# Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification and regression using Stochastic Gradient Descent (SGD)."""
import numpy as np
import scipy.sparse as sp
from abc import ABCMeta, abstractmethod
from ..externals.joblib import Parallel, delayed
from .base import LinearClassifierMixin, SparseCoefMixin
from ..base import BaseEstimator, RegressorMixin
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import (check_array, check_random_state, check_X_y,
deprecated)
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted
from ..externals import six
from .sgd_fast import plain_sgd, average_sgd
from ..utils.fixes import astype
from ..utils.seq_dataset import ArrayDataset, CSRDataset
from ..utils import compute_class_weight
from .sgd_fast import Hinge
from .sgd_fast import SquaredHinge
from .sgd_fast import Log
from .sgd_fast import ModifiedHuber
from .sgd_fast import SquaredLoss
from .sgd_fast import Huber
from .sgd_fast import EpsilonInsensitive
from .sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {"constant": 1, "optimal": 2, "invscaling": 3,
"pa1": 4, "pa2": 5}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
SPARSE_INTERCEPT_DECAY = 0.01
"""For sparse data intercept updates are scaled by this decay factor to avoid
intercept oscillation."""
DEFAULT_EPSILON = 0.1
"""Default value of ``epsilon`` parameter. """
class BaseSGD(six.with_metaclass(ABCMeta, BaseEstimator, SparseCoefMixin)):
"""Base class for SGD classification and regression."""
def __init__(self, loss, penalty='l2', alpha=0.0001, C=1.0,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=0.1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
warm_start=False, average=False):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.n_iter = n_iter
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.warm_start = warm_start
self.average = average
self._validate_params()
self.coef_ = None
if self.average > 0:
self.standard_coef_ = None
self.average_coef_ = None
# iteration count for learning rate schedule
# must not be int (e.g. if ``learning_rate=='optimal'``)
self.t_ = None
def set_params(self, *args, **kwargs):
super(BaseSGD, self).set_params(*args, **kwargs)
self._validate_params()
return self
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self):
"""Validate input params. """
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if self.n_iter <= 0:
raise ValueError("n_iter must be > zero")
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.learning_rate in ("constant", "invscaling"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``. """
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ('huber', 'epsilon_insensitive',
'squared_epsilon_insensitive'):
args = (self.epsilon, )
return loss_class(*args)
except KeyError:
raise ValueError("The loss %s is not supported. " % loss)
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError:
raise ValueError("learning rate %s "
"is not supported. " % learning_rate)
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError:
raise ValueError("Penalty %s is not supported. " % penalty)
def _validate_sample_weight(self, sample_weight, n_samples):
"""Set the sample weight array."""
if sample_weight is None:
# uniform sample weights
sample_weight = np.ones(n_samples, dtype=np.float64, order='C')
else:
# user-provided array
sample_weight = np.asarray(sample_weight, dtype=np.float64,
order="C")
if sample_weight.shape[0] != n_samples:
raise ValueError("Shapes of X and sample_weight do not match.")
return sample_weight
def _allocate_parameter_mem(self, n_classes, n_features, coef_init=None,
intercept_init=None):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros((n_classes, n_features),
dtype=np.float64, order="C")
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes, ):
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64,
order="C")
else:
# allocate coef_ for binary problem
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64,
order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not "
"match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
# allocate intercept_ for binary problem
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init "
"does not match dataset.")
self.intercept_ = intercept_init.reshape(1,)
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = np.zeros(self.coef_.shape,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(self.standard_intercept_.shape,
dtype=np.float64,
order="C")
def _make_dataset(X, y_i, sample_weight):
"""Create ``Dataset`` abstraction for sparse and dense inputs.
This also returns the ``intercept_decay`` which is different
for sparse datasets.
"""
if sp.issparse(X):
dataset = CSRDataset(X.data, X.indptr, X.indices, y_i, sample_weight)
intercept_decay = SPARSE_INTERCEPT_DECAY
else:
dataset = ArrayDataset(X, y_i, sample_weight)
intercept_decay = 1.0
return dataset, intercept_decay
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est.standard_coef_.ravel()
intercept = est.standard_intercept_[0]
average_coef = est.average_coef_.ravel()
average_intercept = est.average_intercept_[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est.standard_coef_[i]
intercept = est.standard_intercept_[i]
average_coef = est.average_coef_[i]
average_intercept = est.average_intercept_[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(est, i, X, y, alpha, C, learning_rate, n_iter,
pos_weight, neg_weight, sample_weight):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = \
_prepare_fit_binary(est, y, i)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
dataset, intercept_decay = _make_dataset(X, y_i, sample_weight)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
# XXX should have random_state_!
random_state = check_random_state(est.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if not est.average:
return plain_sgd(coef, intercept, est.loss_function,
penalty_type, alpha, C, est.l1_ratio,
dataset, n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle), seed,
pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_, intercept_decay)
else:
standard_coef, standard_intercept, average_coef, \
average_intercept = average_sgd(coef, intercept, average_coef,
average_intercept,
est.loss_function, penalty_type,
alpha, C, est.l1_ratio, dataset,
n_iter, int(est.fit_intercept),
int(est.verbose), int(est.shuffle),
seed, pos_weight, neg_weight,
learning_rate_type, est.eta0,
est.power_t, est.t_,
intercept_decay,
est.average)
if len(est.classes_) == 2:
est.average_intercept_[0] = average_intercept
else:
est.average_intercept_[i] = average_intercept
return standard_coef, standard_intercept
class BaseSGDClassifier(six.with_metaclass(ABCMeta, BaseSGD,
LinearClassifierMixin)):
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log, ),
"modified_huber": (ModifiedHuber, ),
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(BaseSGDClassifier, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
self.class_weight = class_weight
self.classes_ = None
self.n_jobs = int(n_jobs)
def _partial_fit(self, X, y, alpha, C,
loss, learning_rate, n_iter,
classes, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
self._validate_params()
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(self.class_weight,
self.classes_, y)
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None or coef_init is not None:
self._allocate_parameter_mem(n_classes, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
self.loss_function = self._get_loss_function(loss)
if self.t_ is None:
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
elif n_classes == 2:
self._fit_binary(X, y, alpha=alpha, C=C,
learning_rate=learning_rate,
sample_weight=sample_weight, n_iter=n_iter)
else:
raise ValueError("The number of class labels must be "
"greater than one.")
return self
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if hasattr(self, "classes_"):
self.classes_ = None
X, y = check_X_y(X, y, 'csr', dtype=np.float64, order="C")
n_samples, n_features = X.shape
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
classes = np.unique(y)
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_coef_ = self.coef_
self.standard_intercept_ = self.intercept_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
self._partial_fit(X, y, alpha, C, loss, learning_rate, self.n_iter,
classes, sample_weight, coef_init, intercept_init)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight,
learning_rate, n_iter):
"""Fit a binary classifier on X and y. """
coef, intercept = fit_binary(self, 1, X, y, alpha, C,
learning_rate, n_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight)
self.t_ += n_iter * X.shape[0]
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self.average_coef_.reshape(1, -1)
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_.reshape(1, -1)
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate,
sample_weight, n_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OVA: One Versus All.
"""
# Use joblib to fit OvA in parallel.
result = Parallel(n_jobs=self.n_jobs, backend="threading",
verbose=self.verbose)(
delayed(fit_binary)(self, i, X, y, alpha, C, learning_rate,
n_iter, self._expanded_class_weight[i], 1.,
sample_weight)
for i in range(len(self.classes_)))
for i, (_, intercept) in enumerate(result):
self.intercept_[i] = intercept
self.t_ += n_iter * X.shape[0]
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.standard_intercept_ = np.atleast_1d(intercept)
self.intercept_ = self.standard_intercept_
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data
y : numpy array, shape (n_samples,)
Subset of the target values
classes : array, shape (n_classes,)
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
if self.class_weight in ['balanced', 'auto']:
raise ValueError("class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight('{0}', classes, y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight))
return self._partial_fit(X, y, alpha=self.alpha, C=1.0, loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
classes=classes, sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
contructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init, intercept_init=intercept_init,
sample_weight=sample_weight)
class SGDClassifier(BaseSGDClassifier, _LearntSelectorMixin):
"""Linear classifiers (SVM, logistic regression, a.o.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning, see the partial_fit method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'hinge', 'log', 'modified_huber', 'squared_hinge',\
'perceptron', or a regression loss: 'squared_loss', 'huber',\
'epsilon_insensitive', or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see SGDRegressor for a description.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
n_jobs : integer, optional
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation. -1 means 'all CPUs'. Defaults
to 1.
learning_rate : string, optional
The learning rate schedule:
constant: eta = eta0
optimal: eta = 1.0 / (t + t0) [default]
invscaling: eta = eta0 / pow(t, power_t)
where t0 is chosen by a heuristic proposed by Leon Bottou.
eta0 : double
The initial learning rate for the 'constant' or 'invscaling'
schedules. The default value is 0.0 as eta0 is not used by the
default schedule 'optimal'.
power_t : double
The exponent for inverse scaling learning rate [default 0.5].
class_weight : dict, {class_label: weight} or "balanced" or None, optional
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So average=10 will begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (1, n_features) if n_classes == 2 else (n_classes,\
n_features)
Weights assigned to the features.
intercept_ : array, shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> clf = linear_model.SGDClassifier()
>>> clf.fit(X, Y)
... #doctest: +NORMALIZE_WHITESPACE
SGDClassifier(alpha=0.0001, average=False, class_weight=None, epsilon=0.1,
eta0=0.0, fit_intercept=True, l1_ratio=0.15,
learning_rate='optimal', loss='hinge', n_iter=5, n_jobs=1,
penalty='l2', power_t=0.5, random_state=None, shuffle=True,
verbose=0, warm_start=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
LinearSVC, LogisticRegression, Perceptron
"""
def __init__(self, loss="hinge", penalty='l2', alpha=0.0001, l1_ratio=0.15,
fit_intercept=True, n_iter=5, shuffle=True, verbose=0,
epsilon=DEFAULT_EPSILON, n_jobs=1, random_state=None,
learning_rate="optimal", eta0=0.0, power_t=0.5,
class_weight=None, warm_start=False, average=False):
super(SGDClassifier, self).__init__(
loss=loss, penalty=penalty, alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept, n_iter=n_iter, shuffle=shuffle,
verbose=verbose, epsilon=epsilon, n_jobs=n_jobs,
random_state=random_state, learning_rate=learning_rate, eta0=eta0,
power_t=power_t, class_weight=class_weight, warm_start=warm_start,
average=average)
def _check_proba(self):
check_is_fitted(self, "t_")
if self.loss not in ("log", "modified_huber"):
raise AttributeError("probability estimates are not available for"
" loss=%r" % self.loss)
@property
def predict_proba(self):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
http://www.research.ibm.com/people/z/zadrozny/kdd2002-Transf.pdf
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = (len(self.classes_) == 2)
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.
prob /= 2.
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = (prob_sum == 0)
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError("predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)" % self.loss)
@property
def predict_log_proba(self):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
class BaseSGDRegressor(BaseSGD, RegressorMixin):
loss_functions = {
"squared_loss": (SquaredLoss, ),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive,
DEFAULT_EPSILON),
}
@abstractmethod
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(BaseSGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
def _partial_fit(self, X, y, alpha, C, loss, learning_rate,
n_iter, sample_weight,
coef_init, intercept_init):
X, y = check_X_y(X, y, "csr", copy=False, order='C', dtype=np.float64)
y = astype(y, np.float64, copy=False)
n_samples, n_features = X.shape
self._validate_params()
# Allocate datastructures from input arguments
sample_weight = self._validate_sample_weight(sample_weight, n_samples)
if self.coef_ is None:
self._allocate_parameter_mem(1, n_features,
coef_init, intercept_init)
elif n_features != self.coef_.shape[-1]:
raise ValueError("Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1]))
if self.average > 0 and self.average_coef_ is None:
self.average_coef_ = np.zeros(n_features,
dtype=np.float64,
order="C")
self.average_intercept_ = np.zeros(1,
dtype=np.float64,
order="C")
self._fit_regressor(X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data
y : numpy array of shape (n_samples,)
Subset of target values
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : returns an instance of self.
"""
return self._partial_fit(X, y, self.alpha, C=1.0,
loss=self.loss,
learning_rate=self.learning_rate, n_iter=1,
sample_weight=sample_weight,
coef_init=None, intercept_init=None)
def _fit(self, X, y, alpha, C, loss, learning_rate, coef_init=None,
intercept_init=None, sample_weight=None):
if self.warm_start and self.coef_ is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self.standard_intercept_ = self.intercept_
self.standard_coef_ = self.coef_
self.average_coef_ = None
self.average_intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = None
return self._partial_fit(X, y, alpha, C, loss, learning_rate,
self.n_iter, sample_weight,
coef_init, intercept_init)
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_features,)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (1,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : returns an instance of self.
"""
return self._fit(X, y, alpha=self.alpha, C=1.0,
loss=self.loss, learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight)
@deprecated(" and will be removed in 0.19.")
def decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self, ["t_", "coef_", "intercept_"], all_or_any=all)
X = check_array(X, accept_sparse='csr')
scores = safe_sparse_dot(X, self.coef_.T,
dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
array, shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(self, X, y, alpha, C, loss, learning_rate,
sample_weight, n_iter):
dataset, intercept_decay = _make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if self.t_ is None:
self.t_ = 1.0
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
if self.average > 0:
self.standard_coef_, self.standard_intercept_, \
self.average_coef_, self.average_intercept_ =\
average_sgd(self.standard_coef_,
self.standard_intercept_[0],
self.average_coef_,
self.average_intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay, self.average)
self.average_intercept_ = np.atleast_1d(self.average_intercept_)
self.standard_intercept_ = np.atleast_1d(self.standard_intercept_)
self.t_ += n_iter * X.shape[0]
if self.average <= self.t_ - 1.0:
self.coef_ = self.average_coef_
self.intercept_ = self.average_intercept_
else:
self.coef_ = self.standard_coef_
self.intercept_ = self.standard_intercept_
else:
self.coef_, self.intercept_ = \
plain_sgd(self.coef_,
self.intercept_[0],
loss_function,
penalty_type,
alpha, C,
self.l1_ratio,
dataset,
n_iter,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0, 1.0,
learning_rate_type,
self.eta0, self.power_t, self.t_,
intercept_decay)
self.t_ += n_iter * X.shape[0]
self.intercept_ = np.atleast_1d(self.intercept_)
class SGDRegressor(BaseSGDRegressor, _LearntSelectorMixin):
"""Linear model fitted by minimizing a regularized empirical loss with SGD
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, 'squared_loss', 'huber', 'epsilon_insensitive', \
or 'squared_epsilon_insensitive'
The loss function to be used. Defaults to 'squared_loss' which refers
to the ordinary least squares fit. 'huber' modifies 'squared_loss' to
focus less on getting outliers correct by switching from squared to
linear loss past a distance of epsilon. 'epsilon_insensitive' ignores
errors less than epsilon and is linear past that; this is the loss
function used in SVR. 'squared_epsilon_insensitive' is the same but
becomes squared loss past a tolerance of epsilon.
penalty : str, 'none', 'l2', 'l1', or 'elasticnet'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float
Constant that multiplies the regularization term. Defaults to 0.0001
l1_ratio : float
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Defaults to 0.15.
fit_intercept : bool
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered. Defaults to True.
n_iter : int, optional
The number of passes over the training data (aka epochs). The number
of iterations is set to 1 if using partial_fit.
Defaults to 5.
shuffle : bool, optional
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
verbose : integer, optional
The verbosity level.
epsilon : float
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
learning_rate : string, optional
The learning rate:
constant: eta = eta0
optimal: eta = 1.0/(alpha * t)
invscaling: eta = eta0 / pow(t, power_t) [default]
eta0 : double, optional
The initial learning rate [default 0.01].
power_t : double, optional
The exponent for inverse scaling learning rate [default 0.25].
warm_start : bool, optional
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
average : bool or int, optional
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10 will`` begin averaging after seeing 10 samples.
Attributes
----------
coef_ : array, shape (n_features,)
Weights assigned to the features.
intercept_ : array, shape (1,)
The intercept term.
average_coef_ : array, shape (n_features,)
Averaged weights assigned to the features.
average_intercept_ : array, shape (1,)
The averaged intercept term.
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = linear_model.SGDRegressor()
>>> clf.fit(X, y)
... #doctest: +NORMALIZE_WHITESPACE
SGDRegressor(alpha=0.0001, average=False, epsilon=0.1, eta0=0.01,
fit_intercept=True, l1_ratio=0.15, learning_rate='invscaling',
loss='squared_loss', n_iter=5, penalty='l2', power_t=0.25,
random_state=None, shuffle=True, verbose=0, warm_start=False)
See also
--------
Ridge, ElasticNet, Lasso, SVR
"""
def __init__(self, loss="squared_loss", penalty="l2", alpha=0.0001,
l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True,
verbose=0, epsilon=DEFAULT_EPSILON, random_state=None,
learning_rate="invscaling", eta0=0.01, power_t=0.25,
warm_start=False, average=False):
super(SGDRegressor, self).__init__(loss=loss, penalty=penalty,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
n_iter=n_iter, shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0, power_t=power_t,
warm_start=warm_start,
average=average)
|
bsd-3-clause
|
mgr0dzicki/python-neo
|
neo/io/neuralynxio.py
|
2
|
106780
|
# -*- coding: utf-8 -*-
"""
Class for reading data from Neuralynx files.
This IO supports NCS, NEV and NSE file formats.
Depends on: numpy
Supported: Read
Author: Julia Sprenger, Carlos Canova
Adapted from the exampleIO of python-neo
"""
# needed for python 3 compatibility
from __future__ import absolute_import, division
import sys
import os
import warnings
import codecs
import copy
import re
import datetime
import pkg_resources
if hasattr(pkg_resources, 'pkg_resources'):
parse_version = pkg_resources.pkg_resources.parse_version
else:
parse_version = pkg_resources.parse_version
import numpy as np
import quantities as pq
from neo.io.baseio import BaseIO
from neo.core import (Block, Segment, ChannelIndex, AnalogSignal, SpikeTrain,
Event, Unit)
from os import listdir, sep
from os.path import isfile, getsize
import hashlib
import pickle
class NeuralynxIO(BaseIO):
"""
Class for reading Neuralynx files.
It enables reading:
- :class:'Block'
- :class:'Segment'
- :class:'AnalogSignal'
- :class:'SpikeTrain'
Usage:
from neo import io
import quantities as pq
import matplotlib.pyplot as plt
session_folder = '../Data/2014-07-24_10-31-02'
NIO = io.NeuralynxIO(session_folder,print_diagnostic = True)
block = NIO.read_block(t_starts = 0.1*pq.s, t_stops = 0.2*pq.s,
events=True)
seg = block.segments[0]
analogsignal = seg.analogsignals[0]
plt.plot(analogsignal.times.rescale(pq.ms), analogsignal.magnitude)
plt.show()
"""
is_readable = True # This class can only read data
is_writable = False # write is not supported
# This class is able to directly or indirectly handle the following objects
# You can notice that this greatly simplifies the full Neo object hierarchy
supported_objects = [Segment, AnalogSignal, SpikeTrain, Event]
# This class can return either a Block or a Segment
# The first one is the default ( self.read )
# These lists should go from highest object to lowest object because
# common_io_test assumes it.
readable_objects = [Segment, AnalogSignal, SpikeTrain]
# This class is not able to write objects
writeable_objects = []
has_header = False
is_streameable = False
# This is for GUI stuff : a definition for parameters when reading.
# This dict should be keyed by object (`Block`). Each entry is a list
# of tuple. The first entry in each tuple is the parameter name. The
# second entry is a dict with keys 'value' (for default value),
# and 'label' (for a descriptive name).
# Note that if the highest-level object requires parameters,
# common_io_test will be skipped.
read_params = {
Segment: [('waveforms', {'value': True})],
Block: [('waveforms', {'value': False})]
}
# do not supported write so no GUI stuff
write_params = None
name = 'Neuralynx'
description = 'This IO reads .nse/.ncs/.nev files of the Neuralynx (' \
'Cheetah) recordings system (tetrodes).'
extensions = ['nse', 'ncs', 'nev', 'ntt']
# mode can be 'file' or 'dir' or 'fake' or 'database'
# the main case is 'file' but some reader are base on a directory or
# a database this info is for GUI stuff also
mode = 'dir'
# hardcoded parameters from manual, which are not present in Neuralynx
# data files
# unit of timestamps in different files
nev_time_unit = pq.microsecond
ncs_time_unit = pq.microsecond
nse_time_unit = pq.microsecond
ntt_time_unit = pq.microsecond
# unit of sampling rate in different files
ncs_sr_unit = pq.Hz
nse_sr_unit = pq.Hz
ntt_sr_unit = pq.Hz
def __init__(self, sessiondir=None, cachedir=None, use_cache='hash',
print_diagnostic=False, filename=None):
"""
Arguments:
sessiondir: the directory the files of the recording session are
collected. Default 'None'.
print_diagnostic: indicates, whether information about the
loading of
data is printed in terminal or not. Default 'False'.
cachedir: the directory where metadata about the recording
session is
read from and written to.
use_cache: method used for cache identification. Possible values:
'hash'/
'always'/'datesize'/'never'. Default 'hash'
filename: this argument is handles the same as sessiondir and is
only
added for external IO interfaces. The value of
sessiondir
has priority over filename.
"""
BaseIO.__init__(self)
# possiblity to provide filename instead of sessiondir for IO
# compatibility
if filename is not None and sessiondir is None:
sessiondir = filename
if sessiondir is None:
raise ValueError('Must provide a directory containing data files of'
' of one recording session.')
# remove filename if specific file was passed
if any([sessiondir.endswith('.%s' % ext) for ext in self.extensions]):
sessiondir = sessiondir[:sessiondir.rfind(sep)]
# remove / for consistent directory handling
if sessiondir.endswith(sep):
sessiondir = sessiondir.rstrip(sep)
# set general parameters of this IO
self.sessiondir = sessiondir
self.filename = sessiondir.split(sep)[-1]
self._print_diagnostic = print_diagnostic
self.associated = False
self._associate(cachedir=cachedir, usecache=use_cache)
self._diagnostic_print(
'Initialized IO for session %s' % self.sessiondir)
def read_block(self, lazy=False, cascade=True, t_starts=None,
t_stops=None,
electrode_list=None, unit_list=None, analogsignals=True,
events=False,
waveforms=False):
"""
Reads data in a requested time window and returns block with as many
segments
es necessary containing these data.
Arguments:
lazy : Postpone actual reading of the data files. Default 'False'.
cascade : Do not postpone reading subsequent neo types (segments).
Default 'True'.
t_starts : list of quantities or quantity describing the start of
the requested time window to load. If None or [None]
the complete session is loaded. Default 'None'.
t_stops : list of quantities or quantity describing the end of the
requested time window to load. Has to contain the
same number of values as t_starts. If None or [None]
the complete session is loaded. Default 'None'.
electrode_list : list of integers containing the IDs of the
requested to load. If [] or None all available
channels will be loaded.
Default: None.
unit_list : list of integers containing the IDs of the requested
units to load. If [] or None all available units
will be loaded.
Default: None.
analogsignals : boolean, indication whether analogsignals should be
read. Default: True.
events : Loading events. If True all available events in the given
time window will be read. Default: False.
waveforms : Load waveform for spikes in the requested time
window. Default: False.
Returns: Block object containing the requested data in neo structures.
Usage:
from neo import io
import quantities as pq
import matplotlib.pyplot as plt
session_folder = '../Data/2014-07-24_10-31-02'
NIO = io.NeuralynxIO(session_folder,print_diagnostic = True)
block = NIO.read_block(lazy = False, cascade = True,
t_starts = 0.1*pq.s, t_stops = 0.2*pq.s,
electrode_list = [1,5,10],
unit_list = [1,2,3],
events = True, waveforms = True)
plt.plot(block.segments[0].analogsignals[0])
plt.show()
"""
# Create block
bl = Block(file_origin=self.sessiondir)
bl.name = self.filename
if not cascade:
return bl
# Checking input of t_start and t_stop
# For lazy users that specify x,x instead of [x],[x] for t_starts,
# t_stops
if t_starts is None:
t_starts = [None]
elif type(t_starts) == pq.Quantity:
t_starts = [t_starts]
elif type(t_starts) != list or any(
[(type(i) != pq.Quantity and i is not None) for i in t_starts]):
raise ValueError('Invalid specification of t_starts.')
if t_stops is None:
t_stops = [None]
elif type(t_stops) == pq.Quantity:
t_stops = [t_stops]
elif type(t_stops) != list or any(
[(type(i) != pq.Quantity and i is not None) for i in t_stops]):
raise ValueError('Invalid specification of t_stops.')
# adapting t_starts and t_stops to known gap times (extracted in
# association process / initialization)
for gap in self.parameters_global['gaps']:
# gap=gap_list[0]
for e in range(len(t_starts)):
t1, t2 = t_starts[e], t_stops[e]
gap_start = gap[1] * self.ncs_time_unit - \
self.parameters_global['t_start']
gap_stop = gap[2] * self.ncs_time_unit - self.parameters_global[
't_start']
if ((t1 is None and t2 is None)
or (t1 is None and t2 is not None and t2.rescale(
self.ncs_time_unit) > gap_stop)
or (t2 is None and t1 is not None and t1.rescale(
self.ncs_time_unit) < gap_stop)
or (t1 is not None and t2 is not None and t1.rescale(
self.ncs_time_unit) < gap_start
and t2.rescale(self.ncs_time_unit) > gap_stop)):
# adapting first time segment
t_stops[e] = gap_start
# inserting second time segment
t_starts.insert(e + 1, gap_stop)
t_stops.insert(e + 1, t2)
warnings.warn(
'Substituted t_starts and t_stops in order to skip '
'gap in recording session.')
# loading all channels if empty electrode_list
if electrode_list == [] or electrode_list is None:
electrode_list = self.parameters_ncs.keys()
# adding a segment for each t_start, t_stop pair
for t_start, t_stop in zip(t_starts, t_stops):
seg = self.read_segment(lazy=lazy, cascade=cascade,
t_start=t_start, t_stop=t_stop,
electrode_list=electrode_list,
unit_list=unit_list,
analogsignals=analogsignals, events=events,
waveforms=waveforms)
bl.segments.append(seg)
# generate units
units = []
channel_unit_collection = {}
for st in [s for seg in bl.segments for s in seg.spiketrains]:
# collecting spiketrains of same channel and unit id to generate
# common unit
chuid = (st.annotations['channel_index'], st.annotations['unit_id'])
if chuid in channel_unit_collection:
channel_unit_collection[chuid].append(st)
else:
channel_unit_collection[chuid] = [st]
for chuid in channel_unit_collection:
sts = channel_unit_collection[chuid]
unit = Unit(name='Channel %i, Unit %i' % chuid)
unit.spiketrains.extend(sts)
units.append(unit)
# generate one channel indexes for each analogsignal
for anasig in [a for seg in bl.segments for a in seg.analogsignals]:
channelids = anasig.annotations['channel_index']
channel_names = ['channel %i' % i for i in channelids]
channelidx = ChannelIndex(index=range(len(channelids)),
channel_names=channel_names,
name='channel ids for all analogsignal '
'"%s"' % anasig.name,
channel_ids=channelids)
channelidx.analogsignals.append(anasig)
bl.channel_indexes.append(channelidx)
# generate channel indexes for units
channelids = [unit.spiketrains[0].annotations['channel_index']
for unit in units]
channel_names = ['channel %i' % i for i in channelids]
channelidx = ChannelIndex(index=range(len(channelids)),
channel_names=channel_names,
name='channel ids for all spiketrains',
channel_ids=channelids)
channelidx.units.extend(units)
bl.channel_indexes.append(channelidx)
bl.create_many_to_one_relationship()
# Adding global parameters to block annotation
bl.annotations.update(self.parameters_global)
return bl
def read_segment(self, lazy=False, cascade=True, t_start=None, t_stop=None,
electrode_list=None, unit_list=None, analogsignals=True,
events=False, waveforms=False):
"""Reads one Segment.
The Segment will contain one AnalogSignal for each channel
and will go from t_start to t_stop.
Arguments:
lazy : Postpone actual reading of the data files. Default 'False'.
cascade : Do not postpone reading subsequent neo types (SpikeTrains,
AnalogSignals, Events).
Default 'True'.
t_start : time (quantity) that the Segment begins. Default None.
t_stop : time (quantity) that the Segment ends. Default None.
electrode_list : list of integers containing the IDs of the
requested to load. If [] or None all available
channels will be loaded.
Default: None.
unit_list : list of integers containing the IDs of the requested
units to load. If [] or None all available units
will be loaded. If False, no unit will be loaded.
Default: None.
analogsignals : boolean, indication whether analogsignals should be
read. Default: True.
events : Loading events. If True all available events in the given
time window will be read. Default: False.
waveforms : Load waveform for spikes in the requested time
window. Default: False.
Returns:
Segment object containing neo objects, which contain the data.
"""
# input check
# loading all channels if empty electrode_list
if electrode_list == [] or electrode_list is None:
electrode_list = self.parameters_ncs.keys()
elif electrode_list is None:
raise ValueError('Electrode_list can not be None.')
elif [v for v in electrode_list if
v in self.parameters_ncs.keys()] == []:
# warn if non of the requested channels are present in this session
warnings.warn('Requested channels %s are not present in session '
'(contains only %s)' % (
electrode_list, self.parameters_ncs.keys()))
electrode_list = []
seg = Segment(file_origin=self.filename)
if not cascade:
return seg
# generate empty segment for analogsignal collection
empty_seg = Segment(file_origin=self.filename)
# Reading NCS Files #
# selecting ncs files to load based on electrode_list requested
if analogsignals:
for chid in electrode_list:
if chid in self.parameters_ncs:
file_ncs = self.parameters_ncs[chid]['filename']
self.read_ncs(file_ncs, empty_seg, lazy, cascade,
t_start=t_start, t_stop=t_stop)
else:
self._diagnostic_print('Can not load ncs of channel %i. '
'No corresponding ncs file '
'present.' % (chid))
# supplementory merge function, should be replaced by neo utility
# function
def merge_analogsignals(anasig_list):
for aid, anasig in enumerate(anasig_list):
anasig.channel_index = None
if aid == 0:
full_analogsignal = anasig
else:
full_analogsignal = full_analogsignal.merge(anasig)
for key in anasig_list[0].annotations.keys():
listified_values = [a.annotations[key] for a in anasig_list]
full_analogsignal.annotations[key] = listified_values
return full_analogsignal
analogsignal = merge_analogsignals(empty_seg.analogsignals)
seg.analogsignals.append(analogsignal)
analogsignal.segment = seg
# Reading NEV Files (Events)#
# reading all files available
if events:
for filename_nev in self.nev_asso:
self.read_nev(filename_nev, seg, lazy, cascade, t_start=t_start,
t_stop=t_stop)
# Reading Spike Data only if requested
if unit_list is not False:
# Reading NSE Files (Spikes)#
# selecting nse files to load based on electrode_list requested
for chid in electrode_list:
if chid in self.parameters_nse:
filename_nse = self.parameters_nse[chid]['filename']
self.read_nse(filename_nse, seg, lazy, cascade,
t_start=t_start, t_stop=t_stop,
waveforms=waveforms)
else:
self._diagnostic_print('Can not load nse of channel %i. '
'No corresponding nse file '
'present.' % (chid))
# Reading ntt Files (Spikes)#
# selecting ntt files to load based on electrode_list requested
for chid in electrode_list:
if chid in self.parameters_ntt:
filename_ntt = self.parameters_ntt[chid]['filename']
self.read_ntt(filename_ntt, seg, lazy, cascade,
t_start=t_start, t_stop=t_stop,
waveforms=waveforms)
else:
self._diagnostic_print('Can not load ntt of channel %i. '
'No corresponding ntt file '
'present.' % (chid))
return seg
def read_ncs(self, filename_ncs, seg, lazy=False, cascade=True,
t_start=None, t_stop=None):
'''
Reading a single .ncs file from the associated Neuralynx recording
session.
In case of a recording gap between t_start and t_stop, data are only
loaded until gap start.
For loading data across recording gaps use read_block(...).
Arguments:
filename_ncs : Name of the .ncs file to be loaded.
seg : Neo Segment, to which the AnalogSignal containing the data
will be attached.
lazy : Postpone actual reading of the data. Instead provide a dummy
AnalogSignal. Default 'False'.
cascade : Not used in this context. Default: 'True'.
t_start : time or sample (quantity or integer) that the
AnalogSignal begins.
Default None.
t_stop : time or sample (quantity or integer) that the
AnalogSignal ends.
Default None.
Returns:
None
'''
# checking format of filename and correcting if necessary
if filename_ncs[-4:] != '.ncs':
filename_ncs = filename_ncs + '.ncs'
if sep in filename_ncs:
filename_ncs = filename_ncs.split(sep)[-1]
# Extracting the channel id from prescan (association) of ncs files with
# this recording session
chid = self.get_channel_id_by_file_name(filename_ncs)
if chid is None:
raise ValueError('NeuralynxIO is attempting to read a file '
'not associated to this session (%s).' % (
filename_ncs))
if not cascade:
return
# read data
header_time_data = self.__mmap_ncs_packet_timestamps(filename_ncs)
data = self.__mmap_ncs_data(filename_ncs)
# ensure meaningful values for requested start and stop times
# in case time is provided in samples: transform to absolute time units
if isinstance(t_start, int):
t_start = t_start / self.parameters_ncs[chid]['sampling_rate']
if isinstance(t_stop, int):
t_stop = t_stop / self.parameters_ncs[chid]['sampling_rate']
# rescaling to global start time of recording (time of first sample
# in any file type)
if t_start is None or t_start < (
self.parameters_ncs[chid]['t_start'] -
self.parameters_global[
't_start']):
t_start = (
self.parameters_ncs[chid]['t_start'] - self.parameters_global[
't_start'])
if t_start > (
self.parameters_ncs[chid]['t_stop'] -
self.parameters_global[
't_start']):
raise ValueError(
'Requested times window (%s to %s) is later than data are '
'recorded (t_stop = %s) '
'for file %s.' % (t_start, t_stop,
(self.parameters_ncs[chid]['t_stop'] -
self.parameters_global['t_start']),
filename_ncs))
if t_stop is None or t_stop > (
self.parameters_ncs[chid]['t_stop'] -
self.parameters_global[
't_start']):
t_stop = (
self.parameters_ncs[chid]['t_stop'] - self.parameters_global[
't_start'])
if t_stop < (
self.parameters_ncs[chid]['t_start'] -
self.parameters_global['t_start']):
raise ValueError(
'Requested times window (%s to %s) is earlier than data '
'are '
'recorded (t_start = %s) '
'for file %s.' % (t_start, t_stop,
(self.parameters_ncs[chid]['t_start'] -
self.parameters_global['t_start']),
filename_ncs))
if t_start >= t_stop:
raise ValueError(
'Requested start time (%s) is later than / equal to stop '
'time '
'(%s) '
'for file %s.' % (t_start, t_stop, filename_ncs))
# Extracting data signal in requested time window
unit = pq.dimensionless # default value
if lazy:
sig = []
p_id_start = 0
else:
tstamps = header_time_data * self.ncs_time_unit - \
self.parameters_global['t_start']
# find data packet to start with signal construction
starts = np.where(tstamps <= t_start)[0]
if len(starts) == 0:
self._diagnostic_print(
'Requested AnalogSignal not present in this time '
'interval.')
return
else:
# first packet to be included into signal
p_id_start = starts[-1]
# find data packet where signal ends (due to gap or t_stop)
stops = np.where(tstamps >= t_stop)[0]
if len(stops) != 0:
first_stop = [stops[0]]
else:
first_stop = []
# last packet to be included in signal
p_id_stop = min(first_stop + [len(data)])
# search gaps in recording in time range to load
gap_packets = [gap_id[0] for gap_id in
self.parameters_ncs[chid]['gaps'] if
gap_id[0] > p_id_start]
if len(gap_packets) > 0 and min(gap_packets) < p_id_stop:
p_id_stop = min(gap_packets)
warnings.warn(
'Analogsignalarray was shortened due to gap in '
'recorded '
'data '
' of file %s at packet id %i' % (
filename_ncs, min(gap_packets)))
# search broken packets in time range to load
broken_packets = []
if 'broken_packet' in self.parameters_ncs[chid]:
broken_packets = [packet[0] for packet in
self.parameters_ncs[chid]['broken_packet']
if packet[0] > p_id_start]
if len(broken_packets) > 0 and min(broken_packets) < p_id_stop:
p_id_stop = min(broken_packets)
warnings.warn(
'Analogsignalarray was shortened due to broken data '
'packet in recorded data '
' of file %s at packet id %i' % (
filename_ncs, min(broken_packets)))
# construct signal in valid packet range
sig = np.array(data[p_id_start:p_id_stop + 1], dtype=float)
sig = sig.reshape(len(sig) * len(sig[0]))
# ADBitVolts is not guaranteed to be present in the header!
if 'ADBitVolts' in self.parameters_ncs[chid]:
sig *= self.parameters_ncs[chid]['ADBitVolts']
unit = pq.V
else:
warnings.warn(
'Could not transform data from file %s into physical '
'signal. '
'Missing "ADBitVolts" value in text header.')
# defining sampling rate for rescaling purposes
sampling_rate = self.parameters_ncs[chid]['sampling_unit'][0]
# creating neo AnalogSignal containing data
anasig = AnalogSignal(signal=pq.Quantity(sig, unit, copy=False),
sampling_rate=1 * sampling_rate,
# rescaling t_start to sampling time units
t_start=(header_time_data[
p_id_start] * self.ncs_time_unit -
self.parameters_global[
't_start']).rescale(
1 / sampling_rate),
name='channel_%i' % (chid),
channel_index=chid)
# removing protruding parts of first and last data packet
if anasig.t_start < t_start.rescale(anasig.t_start.units):
anasig = anasig.time_slice(t_start.rescale(anasig.t_start.units),
None)
if anasig.t_stop > t_stop.rescale(anasig.t_start.units):
anasig = anasig.time_slice(None,
t_stop.rescale(anasig.t_start.units))
annotations = copy.deepcopy(self.parameters_ncs[chid])
for pop_key in ['sampling_rate', 't_start']:
if pop_key in annotations:
annotations.pop(pop_key)
anasig.annotations.update(annotations)
anasig.annotations['electrode_id'] = chid
# this annotation is necesary for automatic genereation of
# recordingchannels
anasig.annotations['channel_index'] = chid
anasig.segment = seg # needed for merge function of analogsignals
seg.analogsignals.append(anasig)
def read_nev(self, filename_nev, seg, lazy=False, cascade=True,
t_start=None, t_stop=None):
'''
Reads associated nev file and attaches its content as eventarray to
provided neo segment. In constrast to read_ncs times can not be provided
in number of samples as a nev file has no inherent sampling rate.
Arguments:
filename_nev : Name of the .nev file to be loaded.
seg : Neo Segment, to which the Event containing the data
will be attached.
lazy : Postpone actual reading of the data. Instead provide a dummy
Event. Default 'False'.
cascade : Not used in this context. Default: 'True'.
t_start : time (quantity) that the Events begin.
Default None.
t_stop : time (quantity) that the Event end.
Default None.
Returns:
None
'''
if filename_nev[-4:] != '.nev':
filename_nev += '.nev'
if sep in filename_nev:
filename_nev = filename_nev.split(sep)[-1]
if filename_nev not in self.nev_asso:
raise ValueError('NeuralynxIO is attempting to read a file '
'not associated to this session (%s).' % (
filename_nev))
# # ensure meaningful values for requested start and stop times
# # providing time is samples for nev file does not make sense as we
# don't know the underlying sampling rate
if isinstance(t_start, int):
raise ValueError(
'Requesting event information from nev file in samples '
'does '
'not make sense. '
'Requested t_start %s' % t_start)
if isinstance(t_stop, int):
raise ValueError(
'Requesting event information from nev file in samples '
'does '
'not make sense. '
'Requested t_stop %s' % t_stop)
# ensure meaningful values for requested start and stop times
if t_start is None or t_start < (
self.parameters_nev[filename_nev]['t_start'] -
self.parameters_global['t_start']):
t_start = (self.parameters_nev[filename_nev]['t_start'] -
self.parameters_global['t_start'])
if t_start > (self.parameters_nev[filename_nev]['t_stop'] -
self.parameters_global['t_start']):
raise ValueError(
'Requested times window (%s to %s) is later than data are '
'recorded (t_stop = %s) '
'for file %s.' % (t_start, t_stop,
(self.parameters_nev[filename_nev][
't_stop'] -
self.parameters_global['t_start']),
filename_nev))
if t_stop is None or t_stop > (
self.parameters_nev[filename_nev]['t_stop'] -
self.parameters_global['t_start']):
t_stop = (self.parameters_nev[filename_nev]['t_stop'] -
self.parameters_global['t_start'])
if t_stop < (self.parameters_nev[filename_nev]['t_start'] -
self.parameters_global['t_start']):
raise ValueError(
'Requested times window (%s to %s) is earlier than data '
'are '
'recorded (t_start = %s) '
'for file %s.' % (t_start, t_stop,
(
self.parameters_nev[filename_nev][
't_start'] -
self.parameters_global['t_start']),
filename_nev))
if t_start >= t_stop:
raise ValueError(
'Requested start time (%s) is later than / equal to stop '
'time '
'(%s) '
'for file %s.' % (t_start, t_stop, filename_nev))
data = self.__mmap_nev_file(filename_nev)
# Extracting all events for one event type and put it into an event
# array
# TODO: Check if this is the correct way of event creation.
for event_type in self.parameters_nev[filename_nev]['event_types']:
# Extract all time stamps of digital markers and rescaling time
type_mask = [i for i in range(len(data)) if
(data[i][4] == event_type['event_id']
and data[i][5] == event_type['nttl']
and data[i][10].decode('latin-1') == event_type[
'name'])]
marker_times = [t[3] for t in
data[type_mask]] * self.nev_time_unit - \
self.parameters_global['t_start']
# only consider Events in the requested time window [t_start,
# t_stop]
time_mask = [i for i in range(len(marker_times)) if (
marker_times[i] >= t_start and marker_times[i] <= t_stop)]
marker_times = marker_times[time_mask]
# Do not create an eventarray if there are no events of this type
# in the requested time range
if len(marker_times) == 0:
continue
ev = Event(times=pq.Quantity(marker_times, units=self.nev_time_unit,
dtype="int"),
labels=event_type['name'],
name="Digital Marker " + str(event_type),
file_origin=filename_nev,
marker_id=event_type['event_id'],
digital_marker=True,
analog_marker=False,
nttl=event_type['nttl'])
seg.events.append(ev)
def read_nse(self, filename_nse, seg, lazy=False, cascade=True,
t_start=None, t_stop=None, unit_list=None,
waveforms=False):
'''
Reads nse file and attaches content as spike train to provided neo
segment. Times can be provided in samples (integer values). If the
nse file does not contain a sampling rate value, the ncs sampling
rate on the same electrode is used.
Arguments:
filename_nse : Name of the .nse file to be loaded.
seg : Neo Segment, to which the Spiketrain containing the data
will be attached.
lazy : Postpone actual reading of the data. Instead provide a dummy
SpikeTrain. Default 'False'.
cascade : Not used in this context. Default: 'True'.
t_start : time or sample (quantity or integer) that the
SpikeTrain begins.
Default None.
t_stop : time or sample (quantity or integer) that the SpikeTrain
ends.
Default None.
unit_list : unit ids to be loaded. If [], all units are loaded.
Default None.
waveforms : Load the waveform (up to 32 data points) for each
spike time. Default: False
Returns:
None
'''
if filename_nse[-4:] != '.nse':
filename_nse += '.nse'
if sep in filename_nse:
filename_nse = filename_nse.split(sep)[-1]
# extracting channel id of requested file
channel_id = self.get_channel_id_by_file_name(filename_nse)
if channel_id is not None:
chid = channel_id
else:
# if nse file is empty it is not listed in self.parameters_nse, but
# in self.nse_avail
if filename_nse in self.nse_avail:
warnings.warn('NeuralynxIO is attempting to read an empty '
'(not associated) nse file (%s). '
'Not loading nse file.' % (filename_nse))
return
else:
raise ValueError('NeuralynxIO is attempting to read a file '
'not associated to this session (%s).' % (
filename_nse))
# ensure meaningful values for requested start and stop times
# in case time is provided in samples: transform to absolute time units
# ncs sampling rate is best guess if there is no explicit sampling
# rate given for nse values.
if 'sampling_rate' in self.parameters_nse[chid]:
sr = self.parameters_nse[chid]['sampling_rate']
elif chid in self.parameters_ncs and 'sampling_rate' in \
self.parameters_ncs[chid]:
sr = self.parameters_ncs[chid]['sampling_rate']
else:
raise ValueError(
'No sampling rate present for channel id %i in nse file '
'%s. '
'Could also not find the sampling rate of the respective '
'ncs '
'file.' % (
chid, filename_nse))
if isinstance(t_start, int):
t_start = t_start / sr
if isinstance(t_stop, int):
t_stop = t_stop / sr
# + rescaling global recording start (first sample in any file type)
# This is not optimal, as there is no way to know how long the
# recording lasted after last spike
if t_start is None or t_start < (
self.parameters_nse[chid]['t_first'] -
self.parameters_global[
't_start']):
t_start = (
self.parameters_nse[chid]['t_first'] - self.parameters_global[
't_start'])
if t_start > (
self.parameters_nse[chid]['t_last'] -
self.parameters_global['t_start']):
raise ValueError(
'Requested times window (%s to %s) is later than data are '
'recorded (t_stop = %s) '
'for file %s.' % (t_start, t_stop,
(self.parameters_nse[chid]['t_last'] -
self.parameters_global['t_start']),
filename_nse))
if t_stop is None:
t_stop = (sys.maxsize) * self.nse_time_unit
if t_stop is None or t_stop > (
self.parameters_nse[chid]['t_last'] -
self.parameters_global[
't_start']):
t_stop = (
self.parameters_nse[chid]['t_last'] - self.parameters_global[
't_start'])
if t_stop < (
self.parameters_nse[chid]['t_first'] -
self.parameters_global[
't_start']):
raise ValueError(
'Requested times window (%s to %s) is earlier than data '
'are recorded (t_start = %s) '
'for file %s.' % (t_start, t_stop,
(self.parameters_nse[chid]['t_first'] -
self.parameters_global['t_start']),
filename_nse))
if t_start >= t_stop:
raise ValueError(
'Requested start time (%s) is later than / equal to stop '
'time '
'(%s) for file %s.' % (t_start, t_stop, filename_nse))
# reading data
[timestamps, channel_ids, cell_numbers, features,
data_points] = self.__mmap_nse_packets(filename_nse)
# load all units available if unit_list==[] or None
if unit_list == [] or unit_list is None:
unit_list = np.unique(cell_numbers)
elif not any([u in cell_numbers for u in unit_list]):
self._diagnostic_print(
'None of the requested unit ids (%s) present '
'in nse file %s (contains unit_list %s)' % (
unit_list, filename_nse, np.unique(cell_numbers)))
# extracting spikes unit-wise and generate spiketrains
for unit_i in unit_list:
if not lazy:
# Extract all time stamps of that neuron on that electrode
unit_mask = np.where(cell_numbers == unit_i)[0]
spike_times = timestamps[unit_mask] * self.nse_time_unit
spike_times = spike_times - self.parameters_global['t_start']
time_mask = np.where(np.logical_and(spike_times >= t_start,
spike_times < t_stop))
spike_times = spike_times[time_mask]
else:
spike_times = pq.Quantity([], units=self.nse_time_unit)
# Create SpikeTrain object
st = SpikeTrain(times=spike_times,
t_start=t_start,
t_stop=t_stop,
sampling_rate=self.parameters_ncs[chid][
'sampling_rate'],
name="Channel %i, Unit %i" % (chid, unit_i),
file_origin=filename_nse,
unit_id=unit_i,
channel_id=chid)
if waveforms and not lazy:
# Collect all waveforms of the specific unit
# For computational reasons: no units, no time axis
st.waveforms = data_points[unit_mask][time_mask]
# TODO: Add units to waveforms (pq.uV?) and add annotation
# left_sweep = x * pq.ms indicating when threshold crossing
# occurred in waveform
st.annotations.update(self.parameters_nse[chid])
st.annotations['electrode_id'] = chid
# This annotations is necessary for automatic generation of
# recordingchannels
st.annotations['channel_index'] = chid
seg.spiketrains.append(st)
def read_ntt(self, filename_ntt, seg, lazy=False, cascade=True,
t_start=None, t_stop=None, unit_list=None,
waveforms=False):
'''
Reads ntt file and attaches content as spike train to provided neo
segment.
Arguments:
filename_ntt : Name of the .ntt file to be loaded.
seg : Neo Segment, to which the Spiketrain containing the data
will be attached.
lazy : Postpone actual reading of the data. Instead provide a dummy
SpikeTrain. Default 'False'.
cascade : Not used in this context. Default: 'True'.
t_start : time (quantity) that the SpikeTrain begins. Default None.
t_stop : time (quantity) that the SpikeTrain ends. Default None.
unit_list : unit ids to be loaded. If [] or None all units are
loaded.
Default None.
waveforms : Load the waveform (up to 32 data points) for each
spike time. Default: False
Returns:
None
'''
if filename_ntt[-4:] != '.ntt':
filename_ntt += '.ntt'
if sep in filename_ntt:
filename_ntt = filename_ntt.split(sep)[-1]
# extracting channel id of requested file
channel_id = self.get_channel_id_by_file_name(filename_ntt)
if channel_id is not None:
chid = channel_id
else:
# if ntt file is empty it is not listed in self.parameters_ntt, but
# in self.ntt_avail
if filename_ntt in self.ntt_avail:
warnings.warn('NeuralynxIO is attempting to read an empty '
'(not associated) ntt file (%s). '
'Not loading ntt file.' % (filename_ntt))
return
else:
raise ValueError('NeuralynxIO is attempting to read a file '
'not associated to this session (%s).' % (
filename_ntt))
# ensure meaningful values for requested start and stop times
# in case time is provided in samples: transform to absolute time units
# ncs sampling rate is best guess if there is no explicit sampling
# rate given for ntt values.
if 'sampling_rate' in self.parameters_ntt[chid]:
sr = self.parameters_ntt[chid]['sampling_rate']
elif chid in self.parameters_ncs and 'sampling_rate' in \
self.parameters_ncs[chid]:
sr = self.parameters_ncs[chid]['sampling_rate']
else:
raise ValueError(
'No sampling rate present for channel id %i in ntt file '
'%s. '
'Could also not find the sampling rate of the respective '
'ncs '
'file.' % (
chid, filename_ntt))
if isinstance(t_start, int):
t_start = t_start / sr
if isinstance(t_stop, int):
t_stop = t_stop / sr
# + rescaling to global recording start (first sample in any
# recording file)
if t_start is None or t_start < (
self.parameters_ntt[chid]['t_first'] -
self.parameters_global[
't_start']):
t_start = (
self.parameters_ntt[chid]['t_first'] - self.parameters_global[
't_start'])
if t_start > (
self.parameters_ntt[chid]['t_last'] -
self.parameters_global[
't_start']):
raise ValueError(
'Requested times window (%s to %s) is later than data are '
'recorded (t_stop = %s) '
'for file %s.' % (t_start, t_stop,
(self.parameters_ntt[chid]['t_last'] -
self.parameters_global['t_start']),
filename_ntt))
if t_stop is None:
t_stop = (sys.maxsize) * self.ntt_time_unit
if t_stop is None or t_stop > (
self.parameters_ntt[chid]['t_last'] -
self.parameters_global[
't_start']):
t_stop = (
self.parameters_ntt[chid]['t_last'] - self.parameters_global[
't_start'])
if t_stop < (
self.parameters_ntt[chid]['t_first'] -
self.parameters_global[
't_start']):
raise ValueError(
'Requested times window (%s to %s) is earlier than data '
'are '
'recorded (t_start = %s) '
'for file %s.' % (t_start, t_stop,
(self.parameters_ntt[chid]['t_first'] -
self.parameters_global['t_start']),
filename_ntt))
if t_start >= t_stop:
raise ValueError(
'Requested start time (%s) is later than / equal to stop '
'time '
'(%s) '
'for file %s.' % (t_start, t_stop, filename_ntt))
# reading data
[timestamps, channel_ids, cell_numbers, features,
data_points] = self.__mmap_ntt_packets(filename_ntt)
# TODO: When ntt available: Implement 1 RecordingChannelGroup per
# Tetrode, such that each electrode gets its own recording channel
# load all units available if units==[]
if unit_list == [] or unit_list is None:
unit_list = np.unique(cell_numbers)
elif not any([u in cell_numbers for u in unit_list]):
self._diagnostic_print(
'None of the requested unit ids (%s) present '
'in ntt file %s (contains units %s)' % (
unit_list, filename_ntt, np.unique(cell_numbers)))
# loading data for each unit and generating spiketrain
for unit_i in unit_list:
if not lazy:
# Extract all time stamps of that neuron on that electrode
mask = np.where(cell_numbers == unit_i)[0]
spike_times = timestamps[mask] * self.ntt_time_unit
spike_times = spike_times - self.parameters_global['t_start']
spike_times = spike_times[np.where(
np.logical_and(spike_times >= t_start,
spike_times < t_stop))]
else:
spike_times = pq.Quantity([], units=self.ntt_time_unit)
# Create SpikeTrain object
st = SpikeTrain(times=spike_times,
t_start=t_start,
t_stop=t_stop,
sampling_rate=self.parameters_ncs[chid][
'sampling_rate'],
name="Channel %i, Unit %i" % (chid, unit_i),
file_origin=filename_ntt,
unit_id=unit_i,
channel_id=chid)
# Collect all waveforms of the specific unit
if waveforms and not lazy:
# For computational reasons: no units, no time axis
# transposing to adhere to neo guidline, which states that
# time should be in the first axis.
# This is stupid and not intuitive.
st.waveforms = np.array(
[data_points[t, :, :] for t in range(len(timestamps))
if cell_numbers[t] == unit_i]).transpose()
# TODO: Add units to waveforms (pq.uV?) and add annotation
# left_sweep = x * pq.ms indicating when threshold crossing
# occurred in waveform
st.annotations = self.parameters_ntt[chid]
st.annotations['electrode_id'] = chid
# This annotations is necessary for automatic generation of
# recordingchannels
st.annotations['channel_index'] = chid
seg.spiketrains.append(st)
############# private routines
# #################################################
def _associate(self, cachedir=None, usecache='hash'):
"""
Associates the object with a specified Neuralynx session, i.e., a
combination of a .nse, .nev and .ncs files. The meta data is read
into the
object for future reference.
Arguments:
cachedir : Directory for loading and saving hashes of recording
sessions
and pickled meta information about files
extracted during
association process
use_cache: method used for cache identification. Possible values:
'hash'/
'always'/'datesize'/'never'. Default 'hash'
Returns:
-
"""
# If already associated, disassociate first
if self.associated:
raise IOError(
"Trying to associate an already associated NeuralynxIO "
"object.")
# Create parameter containers
# Dictionary that holds different parameters read from the .nev file
self.parameters_nse = {}
# List of parameter dictionaries for all potential file types
self.parameters_ncs = {}
self.parameters_nev = {}
self.parameters_ntt = {}
# combined global parameters
self.parameters_global = {}
# Scanning session directory for recorded files
self.sessionfiles = [f for f in listdir(self.sessiondir) if
isfile(os.path.join(self.sessiondir, f))]
# Listing available files
self.ncs_avail = []
self.nse_avail = []
self.nev_avail = []
self.ntt_avail = []
# Listing associated (=non corrupted, non empty files)
self.ncs_asso = []
self.nse_asso = []
self.nev_asso = []
self.ntt_asso = []
if usecache not in ['hash', 'always', 'datesize', 'never']:
raise ValueError(
"Argument value of usecache '%s' is not valid. Accepted "
"values are 'hash','always','datesize','never'" % usecache)
if cachedir is None and usecache != 'never':
raise ValueError('No cache directory provided.')
# check if there are any changes of the data files -> new data check run
check_files = True if usecache != 'always' else False # never
# checking files if usecache=='always'
if cachedir is not None and usecache != 'never':
self._diagnostic_print(
'Calculating %s of session files to check for cached '
'parameter files.' % usecache)
cachefile = cachedir + sep + self.sessiondir.split(sep)[
-1] + '/hashkeys'
if not os.path.exists(cachedir + sep + self.sessiondir.split(sep)[
-1]):
os.makedirs(cachedir + sep + self.sessiondir.split(sep)[-1])
if usecache == 'hash':
hashes_calc = {}
# calculates hash of all available files
for f in self.sessionfiles:
file_hash = self.hashfile(open(self.sessiondir + sep + f,
'rb'), hashlib.sha256())
hashes_calc[f] = file_hash
elif usecache == 'datesize':
hashes_calc = {}
for f in self.sessionfiles:
hashes_calc[f] = self.datesizefile(
self.sessiondir + sep + f)
# load hashes saved for this session in an earlier loading run
if os.path.exists(cachefile):
hashes_read = pickle.load(open(cachefile, 'rb'))
else:
hashes_read = {}
# compare hashes to previously saved meta data und load meta data
# if no changes occured
if usecache == 'always' or all([f in hashes_calc and
f in hashes_read and
hashes_calc[f] ==
hashes_read[f]
for f in self.sessionfiles]):
check_files = False
self._diagnostic_print(
'Using cached metadata from earlier analysis run in '
'file '
'%s. Skipping file checks.' % cachefile)
# loading saved parameters
parameterfile = cachedir + sep + self.sessiondir.split(sep)[
-1] + '/parameters.cache'
if os.path.exists(parameterfile):
parameters_read = pickle.load(open(parameterfile, 'rb'))
else:
raise IOError('Inconsistent cache files.')
for IOdict, dictname in [(self.parameters_global, 'global'),
(self.parameters_ncs, 'ncs'),
(self.parameters_nse, 'nse'),
(self.parameters_nev, 'nev'),
(self.parameters_ntt, 'ntt')]:
IOdict.update(parameters_read[dictname])
self.nev_asso = self.parameters_nev.keys()
self.ncs_asso = [val['filename'] for val in
self.parameters_ncs.values()]
self.nse_asso = [val['filename'] for val in
self.parameters_nse.values()]
self.ntt_asso = [val['filename'] for val in
self.parameters_ntt.values()]
for filename in self.sessionfiles:
# Extracting only continuous signal files (.ncs)
if filename[-4:] == '.ncs':
self.ncs_avail.append(filename)
elif filename[-4:] == '.nse':
self.nse_avail.append(filename)
elif filename[-4:] == '.nev':
self.nev_avail.append(filename)
elif filename[-4:] == '.ntt':
self.ntt_avail.append(filename)
else:
self._diagnostic_print(
'Ignoring file of unknown data type %s' % filename)
if check_files:
self._diagnostic_print('Starting individual file checks.')
# =======================================================================
# # Scan NCS files
# =======================================================================
self._diagnostic_print(
'\nDetected %i .ncs file(s).' % (len(self.ncs_avail)))
for ncs_file in self.ncs_avail:
# Loading individual NCS file and extracting parameters
self._diagnostic_print("Scanning " + ncs_file + ".")
# Reading file packet headers
filehandle = self.__mmap_ncs_packet_headers(ncs_file)
if filehandle is None:
continue
try:
# Checking consistency of ncs file
self.__ncs_packet_check(filehandle)
except AssertionError:
warnings.warn(
'Session file %s did not pass data packet check. '
'This file can not be loaded.' % ncs_file)
continue
# Reading data packet header information and store them in
# parameters_ncs
self.__read_ncs_data_headers(filehandle, ncs_file)
# Reading txt file header
channel_id = self.get_channel_id_by_file_name(ncs_file)
self.__read_text_header(ncs_file,
self.parameters_ncs[channel_id])
# Check for invalid starting times of data packets in ncs file
self.__ncs_invalid_first_sample_check(filehandle)
# Check ncs file for gaps
self.__ncs_gap_check(filehandle)
self.ncs_asso.append(ncs_file)
# =======================================================================
# # Scan NSE files
# =======================================================================
# Loading individual NSE file and extracting parameters
self._diagnostic_print(
'\nDetected %i .nse file(s).' % (len(self.nse_avail)))
for nse_file in self.nse_avail:
# Loading individual NSE file and extracting parameters
self._diagnostic_print('Scanning ' + nse_file + '.')
# Reading file
filehandle = self.__mmap_nse_packets(nse_file)
if filehandle is None:
continue
try:
# Checking consistency of nse file
self.__nse_check(filehandle)
except AssertionError:
warnings.warn(
'Session file %s did not pass data packet check. '
'This file can not be loaded.' % nse_file)
continue
# Reading header information and store them in parameters_nse
self.__read_nse_data_header(filehandle, nse_file)
# Reading txt file header
channel_id = self.get_channel_id_by_file_name(nse_file)
self.__read_text_header(nse_file,
self.parameters_nse[channel_id])
# using sampling rate from txt header, as this is not saved
# in data packets
if 'SamplingFrequency' in self.parameters_nse[channel_id]:
self.parameters_nse[channel_id]['sampling_rate'] = \
(self.parameters_nse[channel_id][
'SamplingFrequency'] * self.nse_sr_unit)
self.nse_asso.append(nse_file)
# =======================================================================
# # Scan NEV files
# =======================================================================
self._diagnostic_print(
'\nDetected %i .nev file(s).' % (len(self.nev_avail)))
for nev_file in self.nev_avail:
# Loading individual NEV file and extracting parameters
self._diagnostic_print('Scanning ' + nev_file + '.')
# Reading file
filehandle = self.__mmap_nev_file(nev_file)
if filehandle is None:
continue
try:
# Checking consistency of nev file
self.__nev_check(filehandle)
except AssertionError:
warnings.warn(
'Session file %s did not pass data packet check. '
'This file can not be loaded.' % nev_file)
continue
# Reading header information and store them in parameters_nev
self.__read_nev_data_header(filehandle, nev_file)
# Reading txt file header
self.__read_text_header(nev_file, self.parameters_nev[nev_file])
self.nev_asso.append(nev_file)
# =======================================================================
# # Scan NTT files
# =======================================================================
self._diagnostic_print(
'\nDetected %i .ntt file(s).' % (len(self.ntt_avail)))
for ntt_file in self.ntt_avail:
# Loading individual NTT file and extracting parameters
self._diagnostic_print('Scanning ' + ntt_file + '.')
# Reading file
filehandle = self.__mmap_ntt_file(ntt_file)
if filehandle is None:
continue
try:
# Checking consistency of nev file
self.__ntt_check(filehandle)
except AssertionError:
warnings.warn(
'Session file %s did not pass data packet check. '
'This file can not be loaded.' % ntt_file)
continue
# Reading header information and store them in parameters_nev
self.__read_ntt_data_header(filehandle, ntt_file)
# Reading txt file header
self.__read_ntt_text_header(ntt_file)
# using sampling rate from txt header, as this is not saved
# in data packets
if 'SamplingFrequency' in self.parameters_ntt[channel_id]:
self.parameters_ntt[channel_id]['sampling_rate'] = \
(self.parameters_ntt[channel_id][
'SamplingFrequency'] * self.ntt_sr_unit)
self.ntt_asso.append(ntt_file)
# =======================================================================
# # Check consistency across files
# =======================================================================
# check RECORDING_OPENED / CLOSED times (from txt header) for
# different files
for parameter_collection in [self.parameters_ncs,
self.parameters_nse,
self.parameters_nev,
self.parameters_ntt]:
# check recoding_closed times for specific file types
if any(np.abs(np.diff([i['recording_opened'] for i in
parameter_collection.values()]))
> datetime.timedelta(seconds=1)):
raise ValueError(
'NCS files were opened for recording with a delay '
'greater than 0.1 second.')
# check recoding_closed times for specific file types
if any(np.diff([i['recording_closed'] for i in
parameter_collection.values()
if i[
'recording_closed'] is not None]) >
datetime.timedelta(
seconds=0.1)):
raise ValueError(
'NCS files were closed after recording with a '
'delay '
'greater than 0.1 second.')
# get maximal duration of any file in the recording
parameter_collection = list(self.parameters_ncs.values()) + \
list(self.parameters_nse.values()) + \
list(self.parameters_ntt.values()) + \
list(self.parameters_nev.values())
self.parameters_global['recording_opened'] = min(
[i['recording_opened'] for i in parameter_collection])
self.parameters_global['recording_closed'] = max(
[i['recording_closed'] for i in parameter_collection])
############ Set up GLOBAL TIMING SCHEME
# #############################
for file_type, parameter_collection in [
('ncs', self.parameters_ncs), ('nse', self.parameters_nse),
('nev', self.parameters_nev), ('ntt', self.parameters_ntt)]:
# check starting times
name_t1, name_t2 = ['t_start', 't_stop'] if (
file_type != 'nse' and file_type != 'ntt') \
else ['t_first', 't_last']
# checking if files of same type start at same time point
if file_type != 'nse' and file_type != 'ntt' \
and len(np.unique(np.array(
[i[name_t1].magnitude for i in
parameter_collection.values()]))) > 1:
raise ValueError(
'%s files do not start at same time point.' %
file_type)
# saving t_start and t_stop for each file type available
if len([i[name_t1] for i in parameter_collection.values()]):
self.parameters_global['%s_t_start' % file_type] = min(
[i[name_t1]
for i in parameter_collection.values()])
self.parameters_global['%s_t_stop' % file_type] = min(
[i[name_t2]
for i in parameter_collection.values()])
# extracting minimial t_start and maximal t_stop value for this
# recording session
self.parameters_global['t_start'] = min(
[self.parameters_global['%s_t_start' % t]
for t in ['ncs', 'nev', 'nse', 'ntt']
if '%s_t_start' % t in self.parameters_global])
self.parameters_global['t_stop'] = max(
[self.parameters_global['%s_t_stop' % t]
for t in ['ncs', 'nev', 'nse', 'ntt']
if '%s_t_start' % t in self.parameters_global])
# checking gap consistency across ncs files
# check number of gaps detected
if len(np.unique([len(i['gaps']) for i in
self.parameters_ncs.values()])) != 1:
raise ValueError('NCS files contain different numbers of gaps!')
# check consistency of gaps across files and create global gap
# collection
self.parameters_global['gaps'] = []
for g in range(len(list(self.parameters_ncs.values())[0]['gaps'])):
integrated = False
gap_stats = np.unique(
[i['gaps'][g] for i in self.parameters_ncs.values()],
return_counts=True)
if len(gap_stats[0]) != 3 or len(np.unique(gap_stats[1])) != 1:
raise ValueError(
'Gap number %i is not consistent across NCS '
'files.' % (
g))
else:
# check if this is second part of already existing gap
for gg in range(len(self.parameters_global['gaps'])):
globalgap = self.parameters_global['gaps'][gg]
# check if stop time of first is start time of second
# -> continuous gap
if globalgap[2] == \
list(self.parameters_ncs.values())[0]['gaps'][
g][1]:
self.parameters_global['gaps'][gg] = \
self.parameters_global['gaps'][gg][:2] + (
list(self.parameters_ncs.values())[0][
'gaps'][g][
2],)
integrated = True
break
if not integrated:
# add as new gap if this is not a continuation of
# existing global gap
self.parameters_global['gaps'].append(
list(self.parameters_ncs.values())[0][
'gaps'][g])
# save results of association for future analysis together with hash
# values for change tracking
if cachedir is not None and usecache != 'never':
pickle.dump({'global': self.parameters_global,
'ncs': self.parameters_ncs,
'nev': self.parameters_nev,
'nse': self.parameters_nse,
'ntt': self.parameters_ntt},
open(cachedir + sep + self.sessiondir.split(sep)[
-1] + '/parameters.cache', 'wb'))
if usecache != 'always':
pickle.dump(hashes_calc, open(
cachedir + sep + self.sessiondir.split(sep)[
-1] + '/hashkeys', 'wb'))
self.associated = True
#################### private routines
# #########################################################ü
################# Memory Mapping Methods
def __mmap_nse_packets(self, filename):
"""
Memory map of the Neuralynx .ncs file optimized for extraction of
data packet headers
Reading standard dtype improves speed, but timestamps need to be
reconstructed
"""
filesize = getsize(self.sessiondir + sep + filename) # in byte
if filesize > 16384:
data = np.memmap(self.sessiondir + sep + filename,
dtype='<u2',
shape=((filesize - 16384) / 2 / 56, 56),
mode='r', offset=16384)
# reconstructing original data
# first 4 ints -> timestamp in microsec
timestamps = data[:, 0] + data[:, 1] * 2 ** 16 + data[:,
2] * 2 ** 32 + \
data[
:,
3] * 2 ** 48
channel_id = data[:, 4] + data[:, 5] * 2 ** 16
cell_number = data[:, 6] + data[:, 7] * 2 ** 16
features = [data[:, p] + data[:, p + 1] * 2 ** 16 for p in
range(8, 23, 2)]
features = np.array(features, dtype='i4')
data_points = data[:, 24:56].astype('i2')
del data
return timestamps, channel_id, cell_number, features, data_points
else:
return None
def __mmap_ncs_data(self, filename):
""" Memory map of the Neuralynx .ncs file optimized for data
extraction"""
if getsize(self.sessiondir + sep + filename) > 16384:
data = np.memmap(self.sessiondir + sep + filename,
dtype=np.dtype(('i2', (522))), mode='r',
offset=16384)
# removing data packet headers and flattening data
return data[:, 10:]
else:
return None
def __mmap_ncs_packet_headers(self, filename):
"""
Memory map of the Neuralynx .ncs file optimized for extraction of
data packet headers
Reading standard dtype improves speed, but timestamps need to be
reconstructed
"""
filesize = getsize(self.sessiondir + sep + filename) # in byte
if filesize > 16384:
data = np.memmap(self.sessiondir + sep + filename,
dtype='<u4',
shape=((filesize - 16384) / 4 / 261, 261),
mode='r', offset=16384)
ts = data[:, 0:2]
multi = np.repeat(np.array([1, 2 ** 32], ndmin=2), len(data),
axis=0)
timestamps = np.sum(ts * multi, axis=1)
# timestamps = data[:,0] + (data[:,1] *2**32)
header_u4 = data[:, 2:5]
return timestamps, header_u4
else:
return None
def __mmap_ncs_packet_timestamps(self, filename):
"""
Memory map of the Neuralynx .ncs file optimized for extraction of
data packet headers
Reading standard dtype improves speed, but timestamps need to be
reconstructed
"""
filesize = getsize(self.sessiondir + sep + filename) # in byte
if filesize > 16384:
data = np.memmap(self.sessiondir + sep + filename,
dtype='<u4',
shape=(int((filesize - 16384) / 4 / 261), 261),
mode='r', offset=16384)
ts = data[:, 0:2]
multi = np.repeat(np.array([1, 2 ** 32], ndmin=2), len(data),
axis=0)
timestamps = np.sum(ts * multi, axis=1)
# timestamps = data[:,0] + data[:,1]*2**32
return timestamps
else:
return None
def __mmap_nev_file(self, filename):
""" Memory map the Neuralynx .nev file """
nev_dtype = np.dtype([
('reserved', '<i2'),
('system_id', '<i2'),
('data_size', '<i2'),
('timestamp', '<u8'),
('event_id', '<i2'),
('ttl_input', '<i2'),
('crc_check', '<i2'),
('dummy1', '<i2'),
('dummy2', '<i2'),
('extra', '<i4', (8,)),
('event_string', 'a128'),
])
if getsize(self.sessiondir + sep + filename) > 16384:
return np.memmap(self.sessiondir + sep + filename,
dtype=nev_dtype, mode='r', offset=16384)
else:
return None
def __mmap_ntt_file(self, filename):
""" Memory map the Neuralynx .nse file """
nse_dtype = np.dtype([
('timestamp', '<u8'),
('sc_number', '<u4'),
('cell_number', '<u4'),
('params', '<u4', (8,)),
('data', '<i2', (32, 4)),
])
if getsize(self.sessiondir + sep + filename) > 16384:
return np.memmap(self.sessiondir + sep + filename,
dtype=nse_dtype, mode='r', offset=16384)
else:
return None
def __mmap_ntt_packets(self, filename):
"""
Memory map of the Neuralynx .ncs file optimized for extraction of
data packet headers
Reading standard dtype improves speed, but timestamps need to be
reconstructed
"""
filesize = getsize(self.sessiondir + sep + filename) # in byte
if filesize > 16384:
data = np.memmap(self.sessiondir + sep + filename,
dtype='<u2',
shape=((filesize - 16384) / 2 / 152, 152),
mode='r', offset=16384)
# reconstructing original data
# first 4 ints -> timestamp in microsec
timestamps = data[:, 0] + data[:, 1] * 2 ** 16 + \
data[:, 2] * 2 ** 32 + data[:, 3] * 2 ** 48
channel_id = data[:, 4] + data[:, 5] * 2 ** 16
cell_number = data[:, 6] + data[:, 7] * 2 ** 16
features = [data[:, p] + data[:, p + 1] * 2 ** 16 for p in
range(8, 23, 2)]
features = np.array(features, dtype='i4')
data_points = data[:, 24:152].astype('i2').reshape((4, 32))
del data
return timestamps, channel_id, cell_number, features, data_points
else:
return None
# ___________________________ header extraction __________________________
def __read_text_header(self, filename, parameter_dict):
# Reading main file header (plain text, 16kB)
text_header = codecs.open(self.sessiondir + sep + filename, 'r',
'latin-1').read(16384)
# necessary text encoding depends on Python version
if sys.version_info.major < 3:
text_header = text_header.encode('latin-1')
parameter_dict['cheetah_version'] = \
self.__get_cheetah_version_from_txt_header(text_header, filename)
parameter_dict.update(self.__get_filename_and_times_from_txt_header(
text_header, parameter_dict['cheetah_version']))
# separating lines of header and ignoring last line (fill), check if
# Linux or Windows OS
if sep == '/':
text_header = text_header.split('\r\n')[:-1]
if sep == '\\':
text_header = text_header.split('\n')[:-1]
# minor parameters possibly saved in header (for any file type)
minor_keys = ['AcqEntName',
'FileType',
'FileVersion',
'RecordSize',
'HardwareSubSystemName',
'HardwareSubSystemType',
'SamplingFrequency',
'ADMaxValue',
'ADBitVolts',
'NumADChannels',
'ADChannel',
'InputRange',
'InputInverted',
'DSPLowCutFilterEnabled',
'DspLowCutFrequency',
'DspLowCutNumTaps',
'DspLowCutFilterType',
'DSPHighCutFilterEnabled',
'DspHighCutFrequency',
'DspHighCutNumTaps',
'DspHighCutFilterType',
'DspDelayCompensation',
'DspFilterDelay_\xb5s',
'DisabledSubChannels',
'WaveformLength',
'AlignmentPt',
'ThreshVal',
'MinRetriggerSamples',
'SpikeRetriggerTime',
'DualThresholding',
'Feature Peak 0',
'Feature Valley 1',
'Feature Energy 2',
'Feature Height 3',
'Feature NthSample 4',
'Feature NthSample 5',
'Feature NthSample 6',
'Feature NthSample 7',
'SessionUUID',
'FileUUID',
'CheetahRev',
'ProbeName',
'OriginalFileName',
'TimeCreated',
'TimeClosed',
'ApplicationName',
'AcquisitionSystem',
'ReferenceChannel']
# extracting minor key values of header (only taking into account
# non-empty lines)
for i, minor_entry in enumerate(text_header):
if minor_entry == '' or minor_entry[0] == '#':
continue
matching_key = [key for key in minor_keys if
minor_entry.strip('-').startswith(key)]
if len(matching_key) == 1:
matching_key = matching_key[0]
minor_value = minor_entry.split(matching_key)[1].strip(
' ').rstrip(' ')
# determine data type of entry
if minor_value.isdigit():
# converting to int if possible
minor_value = int(minor_value)
else:
# converting to float if possible
try:
minor_value = float(minor_value)
except:
pass
if matching_key in parameter_dict:
warnings.warn(
'Multiple entries for %s in text header of %s' % (
matching_key, filename))
else:
parameter_dict[matching_key] = minor_value
elif len(matching_key) > 1:
raise ValueError(
'Inconsistent minor key list for text header '
'interpretation.')
else:
warnings.warn(
'Skipping text header entry %s, because it is not in '
'minor key list' % minor_entry)
self._diagnostic_print(
'Successfully decoded text header of file (%s).' % filename)
def __get_cheetah_version_from_txt_header(self, text_header, filename):
version_regex = re.compile('((-CheetahRev )|'
'(ApplicationName Cheetah "))'
'(?P<version>\d{1,3}\.\d{1,3}\.\d{1,3})')
match = version_regex.search(text_header)
if match:
return match.groupdict()['version']
else:
raise ValueError('Can not extract Cheetah version from file '
'header of file %s' % filename)
def __get_filename_and_times_from_txt_header(self, text_header, version):
if parse_version(version) <= parse_version('5.6.4'):
datetime1_regex = re.compile('## Time Opened \(m/d/y\): '
'(?P<date>\S+)'
' \(h:m:s\.ms\) '
'(?P<time>\S+)')
datetime2_regex = re.compile('## Time Closed \(m/d/y\): '
'(?P<date>\S+)'
' \(h:m:s\.ms\) '
'(?P<time>\S+)')
filename_regex = re.compile('## File Name (?P<filename>\S+)')
datetimeformat = '%m/%d/%Y %H:%M:%S.%f'
else:
datetime1_regex = re.compile('-TimeCreated '
'(?P<date>\S+) '
'(?P<time>\S+)')
datetime2_regex = re.compile('-TimeClosed '
'(?P<date>\S+) '
'(?P<time>\S+)')
filename_regex = re.compile('-OriginalFileName '
'"?(?P<filename>\S+)"?')
datetimeformat = '%Y/%m/%d %H:%M:%S'
matchtime1 = datetime1_regex.search(text_header).groupdict()
matchtime2 = datetime2_regex.search(text_header).groupdict()
matchfilename = filename_regex.search(text_header)
filename = matchfilename.groupdict()['filename']
if '## Time Closed File was not closed properly' in text_header:
warnings.warn('Text header of file %s does not contain recording '
'closed time. File was not closed properly.'
'' % filename)
datetime1 = datetime.datetime.strptime(matchtime1['date'] + ' ' +
matchtime1['time'],
datetimeformat)
datetime2 = datetime.datetime.strptime(matchtime2['date'] + ' ' +
matchtime2['time'],
datetimeformat)
output = {'recording_opened': datetime1,
'recording_closed': datetime2,
'file_created': datetime1,
'file_closed': datetime2,
'recording_file_name': filename}
return output
def __read_ncs_data_headers(self, filehandle, filename):
'''
Reads the .ncs data block headers and stores the information in the
object's parameters_ncs dictionary.
Args:
filehandle (file object):
Handle to the already opened .ncs file.
filename (string):
Name of the ncs file.
Returns:
dict of extracted data
'''
timestamps = filehandle[0]
header_u4 = filehandle[1]
channel_id = header_u4[0][0]
sr = header_u4[0][1] # in Hz
t_start = timestamps[0] # in microseconds
# calculating corresponding time stamp of first sample, that was not
# recorded any more
# t_stop= time of first sample in last packet +(#samples per packet *
# conversion factor / sampling rate)
# conversion factor is needed as times are recorded in ms
t_stop = timestamps[-1] + (
(header_u4[-1][2]) * (
1 / self.ncs_time_unit.rescale(pq.s)).magnitude /
header_u4[-1][1])
if channel_id in self.parameters_ncs:
raise ValueError(
'Detected multiple ncs files for channel_id %i.'
% channel_id)
else:
sampling_unit = [pq.CompoundUnit('%f*%s'
'' % (sr,
self.ncs_sr_unit.symbol))]
sampling_rate = sr * self.ncs_sr_unit
self.parameters_ncs[channel_id] = {'filename': filename,
't_start': t_start *
self.ncs_time_unit,
't_stop': t_stop *
self.ncs_time_unit,
'sampling_rate': sampling_rate,
'sampling_unit': sampling_unit,
'gaps': []}
return {channel_id: self.parameters_ncs[channel_id]}
def __read_nse_data_header(self, filehandle, filename):
'''
Reads the .nse data block headers and stores the information in the
object's parameters_ncs dictionary.
Args:
filehandle (file object):
Handle to the already opened .nse file.
filename (string):
Name of the nse file.
Returns:
-
'''
[timestamps, channel_ids, cell_numbers, features,
data_points] = filehandle
if filehandle is not None:
t_first = timestamps[0] # in microseconds
t_last = timestamps[-1] # in microseconds
channel_id = channel_ids[0]
cell_count = cell_numbers[0] # number of cells identified
self.parameters_nse[channel_id] = {'filename': filename,
't_first': t_first *
self.nse_time_unit,
't_last': t_last *
self.nse_time_unit,
'cell_count': cell_count}
def __read_ntt_data_header(self, filehandle, filename):
'''
Reads the .nse data block headers and stores the information in the
object's parameters_ncs dictionary.
Args:
filehandle (file object):
Handle to the already opened .nse file.
filename (string):
Name of the nse file.
Returns:
-
'''
[timestamps, channel_ids, cell_numbers, features,
data_points] = filehandle
if filehandle is not None:
t_first = timestamps[0] # in microseconds
t_last = timestamps[-1] # in microseconds
channel_id = channel_ids[0]
cell_count = cell_numbers[0] # number of cells identified
# spike_parameters = filehandle[0][3]
# else:
# t_first = None
# channel_id = None
# cell_count = 0
# # spike_parameters = None
#
# self._diagnostic_print('Empty file: No information
# contained in %s'%filename)
self.parameters_ntt[channel_id] = {'filename': filename,
't_first': t_first *
self.ntt_time_unit,
't_last': t_last *
self.nse_time_unit,
'cell_count': cell_count}
def __read_nev_data_header(self, filehandle, filename):
'''
Reads the .nev data block headers and stores the relevant information
in the
object's parameters_nev dictionary.
Args:
filehandle (file object):
Handle to the already opened .nev file.
filename (string):
Name of the nev file.
Returns:
-
'''
# Extracting basic recording events to be able to check recording
# consistency
if filename in self.parameters_nev:
raise ValueError(
'Detected multiple nev files of name %s.' % (filename))
else:
self.parameters_nev[filename] = {}
if 'Starting_Recording' in self.parameters_nev[filename]:
raise ValueError('Trying to read second nev file of name %s. '
' Only one can be handled.' % filename)
self.parameters_nev[filename]['Starting_Recording'] = []
self.parameters_nev[filename]['events'] = []
for event in filehandle:
# separately extracting 'Starting Recording'
if ((event[4] in [11, 19]) and
(event[10].decode('latin-1') == 'Starting Recording')):
self.parameters_nev[filename]['Starting_Recording'].append(
event[3] * self.nev_time_unit)
# adding all events to parameter collection
self.parameters_nev[filename]['events'].append(
{'timestamp': event[3] * self.nev_time_unit,
'event_id': event[4],
'nttl': event[5],
'name': event[10].decode('latin-1')})
if len(self.parameters_nev[filename]['Starting_Recording']) < 1:
raise ValueError(
'No Event "Starting_Recording" detected in %s' % (
filename))
self.parameters_nev[filename]['t_start'] = min(
self.parameters_nev[filename]['Starting_Recording'])
# t_stop = time stamp of last event in file
self.parameters_nev[filename]['t_stop'] = max(
[e['timestamp'] for e in
self.parameters_nev[filename]['events']])
# extract all occurring event types (= combination of nttl,
# event_id and name/string)
event_types = copy.deepcopy(self.parameters_nev[filename]['events'])
for d in event_types:
d.pop('timestamp')
self.parameters_nev[filename]['event_types'] = [dict(y) for y in
set(tuple(
x.items())
for x in
event_types)]
# ________________ File Checks __________________________________
def __ncs_packet_check(self, filehandle):
'''
Checks consistency of data in ncs file and raises assertion error if a
check fails. Detected recording gaps are added to parameter_ncs
Args:
filehandle (file object):
Handle to the already opened .ncs file.
'''
timestamps = filehandle[0]
header_u4 = filehandle[1]
# checking sampling rate of data packets
sr0 = header_u4[0, 1]
assert all(header_u4[:, 1] == sr0)
# checking channel id of data packets
channel_id = header_u4[0, 0]
assert all(header_u4[:, 0] == channel_id)
# time offset of data packets
# TODO: Check if there is a safer way to do the delta_t check for ncs
# data packets
# this is a not safe assumption, that the first two data packets have
# correct time stamps
delta_t = timestamps[1] - timestamps[0]
# valid samples of first data packet
temp_valid_samples = header_u4[0, 2]
# unit test
# time difference between packets corresponds to number of recorded
# samples
assert delta_t == (
temp_valid_samples / (
self.ncs_time_unit.rescale(pq.s).magnitude * sr0))
self._diagnostic_print('NCS packet check successful.')
def __nse_check(self, filehandle):
'''
Checks consistency of data in ncs file and raises assertion error if a
check fails.
Args:
filehandle (file object):
Handle to the already opened .nse file.
'''
[timestamps, channel_ids, cell_numbers, features,
data_points] = filehandle
assert all(channel_ids == channel_ids[0])
assert all([len(dp) == len(data_points[0]) for dp in data_points])
self._diagnostic_print('NSE file check successful.')
def __nev_check(self, filehandle):
'''
Checks consistency of data in nev file and raises assertion error if a
check fails.
Args:
filehandle (file object):
Handle to the already opened .nev file.
'''
# this entry should always equal 2 (see Neuralynx File Description),
# but it is not. For me, this is 0.
assert all([f[2] == 2 or f[2] == 0 for f in filehandle])
# TODO: check with more nev files, if index 0,1,2,6,7,8 and 9 can be
# non-zero. Interpretation? Include in event extraction.
# only observed 0 for index 0,1,2,6,7,8,9 in nev files.
# If they are non-zero, this needs to be included in event extraction
assert all([f[0] == 0 for f in filehandle])
assert all([f[1] == 0 for f in filehandle])
assert all([f[2] in [0, 2] for f in filehandle])
assert all([f[6] == 0 for f in filehandle])
assert all([f[7] == 0 for f in filehandle])
assert all([f[8] == 0 for f in filehandle])
assert all([all(f[9] == 0) for f in filehandle])
self._diagnostic_print('NEV file check successful.')
def __ntt_check(self, filehandle):
'''
Checks consistency of data in ncs file and raises assertion error if a
check fails.
Args:
filehandle (file object):
Handle to the already opened .nse file.
'''
# TODO: check this when first .ntt files are available
[timestamps, channel_ids, cell_numbers, features,
data_points] = filehandle
assert all(channel_ids == channel_ids[0])
assert all([len(dp) == len(data_points[0]) for dp in data_points])
self._diagnostic_print('NTT file check successful.')
def __ncs_gap_check(self, filehandle):
'''
Checks individual data blocks of ncs files for consistent starting
times with respect to sample count.
This covers intended recording gaps as well as shortened data packet,
which are incomplete
'''
timestamps = filehandle[0]
header_u4 = filehandle[1]
channel_id = header_u4[0, 0]
if channel_id not in self.parameters_ncs:
self.parameters_ncs[channel_id] = {}
# time stamps of data packets
delta_t = timestamps[1] - timestamps[0] # in microsec
data_packet_offsets = np.diff(timestamps) # in microsec
# check if delta_t corresponds to number of valid samples present in
# data packets
# NOTE: This also detects recording gaps!
valid_samples = header_u4[:-1, 2]
sampling_rate = header_u4[0, 1]
packet_checks = (valid_samples / (self.ncs_time_unit.rescale(
pq.s).magnitude * sampling_rate)) == data_packet_offsets
if not all(packet_checks):
if 'broken_packets' not in self.parameters_ncs[channel_id]:
self.parameters_ncs[channel_id]['broken_packets'] = []
broken_packets = np.where(np.array(packet_checks) == False)[0]
for broken_packet in broken_packets:
self.parameters_ncs[channel_id]['broken_packets'].append(
(broken_packet,
valid_samples[broken_packet],
data_packet_offsets[broken_packet]))
self._diagnostic_print('Detected broken packet in NCS file at '
'packet id %i (sample number %i '
'time offset id %i)'
'' % (broken_packet,
valid_samples[broken_packet],
data_packet_offsets[broken_packet])
) # in microsec
# checking for irregular data packet durations -> gaps / shortened
# data packets
if not all(data_packet_offsets == delta_t):
if 'gaps' not in self.parameters_ncs[channel_id]:
self.parameters_ncs[channel_id]['gaps'] = []
# gap identification by (sample of gap start, duration)
# gap packets
gap_packet_ids = np.where(data_packet_offsets != delta_t)[0]
for gap_packet_id in gap_packet_ids:
# skip if this packet starting time is known to be corrupted
# hoping no corruption and gap occurs simultaneously
# corrupted time stamp affects two delta_t comparisons:
if gap_packet_id in self.parameters_ncs[channel_id][
'invalid_first_samples'] \
or gap_packet_id + 1 in self.parameters_ncs[channel_id][
'invalid_first_samples']:
continue
gap_start = timestamps[
gap_packet_id] # t_start of last packet [microsec]
gap_stop = timestamps[
gap_packet_id + 1] # t_stop of first packet [microsec]
self.parameters_ncs[channel_id]['gaps'].append((gap_packet_id,
gap_start,
gap_stop)) #
# [,microsec,microsec]
self._diagnostic_print('Detected gap in NCS file between'
'sample time %i and %i (last correct '
'packet id %i)' % (gap_start, gap_stop,
gap_packet_id))
def __ncs_invalid_first_sample_check(self, filehandle):
'''
Checks data blocks of ncs files for corrupted starting times indicating
a missing first sample in the data packet. These are then excluded from
the gap check, but ignored for further analysis.
'''
timestamps = filehandle[0]
header_u4 = filehandle[1]
channel_id = header_u4[0, 0]
self.parameters_ncs[channel_id]['invalid_first_samples'] = []
# checking if first bit of timestamp is 1, which indicates error
invalid_packet_ids = np.where(timestamps >= 2 ** 55)[0]
if len(invalid_packet_ids) > 0:
warnings.warn('Invalid first sample(s) detected in ncs file'
'(packet id(s) %i)! This error is ignored in'
'subsequent routines.' % (invalid_packet_ids))
self.parameters_ncs[channel_id][
'invalid_first_samples'] = invalid_packet_ids
# checking consistency of data around corrupted packet time
for invalid_packet_id in invalid_packet_ids:
if invalid_packet_id < 2 or invalid_packet_id > len(
filehandle) - 2:
raise ValueError(
'Corrupted ncs data packet at the beginning'
'or end of file.')
elif (timestamps[invalid_packet_id + 1] - timestamps[
invalid_packet_id - 1]
!= 2 * (
timestamps[invalid_packet_id - 1] - timestamps[
invalid_packet_id - 2])):
raise ValueError('Starting times of ncs data packets around'
'corrupted data packet are not '
'consistent!')
############ Supplementory Functions ###########################
def get_channel_id_by_file_name(self, filename):
"""
Checking parameters of NCS, NSE and NTT Files for given filename and
return channel_id if result is consistent
:param filename:
:return:
"""
channel_ids = []
channel_ids += [k for k in self.parameters_ncs if
self.parameters_ncs[k]['filename'] == filename]
channel_ids += [k for k in self.parameters_nse if
self.parameters_nse[k]['filename'] == filename]
channel_ids += [k for k in self.parameters_ntt if
self.parameters_ntt[k]['filename'] == filename]
if len(np.unique(np.asarray(channel_ids))) == 1:
return channel_ids[0]
elif len(channel_ids) > 1:
raise ValueError(
'Ambiguous channel ids detected. Filename %s is associated'
' to different channels of NCS and NSE and NTT %s'
'' % (filename, channel_ids))
else: # if filename was not detected
return None
def hashfile(self, afile, hasher, blocksize=65536):
buf = afile.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(blocksize)
return hasher.digest()
def datesizefile(self, filename):
return str(os.path.getmtime(filename)) + '_' + str(
os.path.getsize(filename))
def _diagnostic_print(self, text):
'''
Print a diagnostic message.
Args:
text (string):
Diagnostic text to print.
Returns:
-
'''
if self._print_diagnostic:
print('NeuralynxIO: ' + text)
|
bsd-3-clause
|
martinbuc/missionplanner
|
Lib/site-packages/scipy/misc/common.py
|
53
|
10116
|
"""
Functions which are common and require SciPy Base and Level 1 SciPy
(special, linalg)
"""
from numpy import exp, asarray, arange, newaxis, hstack, product, array, \
where, zeros, extract, place, pi, sqrt, eye, poly1d, dot, r_
__all__ = ['factorial','factorial2','factorialk','comb',
'central_diff_weights', 'derivative', 'pade', 'lena']
# XXX: the factorial functions could move to scipy.special, and the others
# to numpy perhaps?
def factorial(n,exact=0):
"""
The factorial function, n! = special.gamma(n+1).
If exact is 0, then floating point precision is used, otherwise
exact long integer is computed.
- Array argument accepted only for exact=0 case.
- If n<0, the return value is 0.
Parameters
----------
n : int or array_like of ints
Calculate ``n!``. Arrays are only supported with `exact` set
to False. If ``n < 0``, the return value is 0.
exact : bool, optional
The result can be approximated rapidly using the gamma-formula
above. If `exact` is set to True, calculate the
answer exactly using integer arithmetic. Default is False.
Returns
-------
nf : float or int
Factorial of `n`, as an integer or a float depending on `exact`.
Examples
--------
>>> arr = np.array([3,4,5])
>>> sc.factorial(arr, exact=False)
array([ 6., 24., 120.])
>>> sc.factorial(5, exact=True)
120L
"""
if exact:
if n < 0:
return 0L
val = 1L
for k in xrange(1,n+1):
val *= k
return val
else:
from scipy import special
n = asarray(n)
sv = special.errprint(0)
vals = special.gamma(n+1)
sv = special.errprint(sv)
return where(n>=0,vals,0)
def factorial2(n, exact=False):
"""
Double factorial.
This is the factorial with every second value skipped, i.e.,
``7!! = 7 * 5 * 3 * 1``. It can be approximated numerically as::
n!! = special.gamma(n/2+1)*2**((m+1)/2)/sqrt(pi) n odd
= 2**(n/2) * (n/2)! n even
Parameters
----------
n : int or array_like
Calculate ``n!!``. Arrays are only supported with `exact` set
to False. If ``n < 0``, the return value is 0.
exact : bool, optional
The result can be approximated rapidly using the gamma-formula
above (default). If `exact` is set to True, calculate the
answer exactly using integer arithmetic.
Returns
-------
nff : float or int
Double factorial of `n`, as an int or a float depending on
`exact`.
Examples
--------
>>> factorial2(7, exact=False)
array(105.00000000000001)
>>> factorial2(7, exact=True)
105L
"""
if exact:
if n < -1:
return 0L
if n <= 0:
return 1L
val = 1L
for k in xrange(n,0,-2):
val *= k
return val
else:
from scipy import special
n = asarray(n)
vals = zeros(n.shape,'d')
cond1 = (n % 2) & (n >= -1)
cond2 = (1-(n % 2)) & (n >= -1)
oddn = extract(cond1,n)
evenn = extract(cond2,n)
nd2o = oddn / 2.0
nd2e = evenn / 2.0
place(vals,cond1,special.gamma(nd2o+1)/sqrt(pi)*pow(2.0,nd2o+0.5))
place(vals,cond2,special.gamma(nd2e+1) * pow(2.0,nd2e))
return vals
def factorialk(n,k,exact=1):
"""
n(!!...!) = multifactorial of order k
k times
Parameters
----------
n : int, array_like
Calculate multifactorial. Arrays are only supported with exact
set to False. If n < 0, the return value is 0.
exact : bool, optional
If exact is set to True, calculate the answer exactly using
integer arithmetic.
Returns
-------
val : int
Multi factorial of n.
Raises
------
NotImplementedError
Raises when exact is False
Examples
--------
>>> sc.factorialk(5, 1, exact=True)
120L
>>> sc.factorialk(5, 3, exact=True)
10L
"""
if exact:
if n < 1-k:
return 0L
if n<=0:
return 1L
val = 1L
for j in xrange(n,0,-k):
val = val*j
return val
else:
raise NotImplementedError
def comb(N,k,exact=0):
"""
The number of combinations of N things taken k at a time.
This is often expressed as "N choose k".
Parameters
----------
N : int, array
Number of things.
k : int, array
Number of elements taken.
exact : int, optional
If exact is 0, then floating point precision is used, otherwise
exact long integer is computed.
Returns
-------
val : int, array
The total number of combinations.
Notes
-----
- Array arguments accepted only for exact=0 case.
- If k > N, N < 0, or k < 0, then a 0 is returned.
Examples
--------
>>> k = np.array([3, 4])
>>> n = np.array([10, 10])
>>> sc.comb(n, k, exact=False)
array([ 120., 210.])
>>> sc.comb(10, 3, exact=True)
120L
"""
if exact:
if (k > N) or (N < 0) or (k < 0):
return 0L
val = 1L
for j in xrange(min(k, N-k)):
val = (val*(N-j))//(j+1)
return val
else:
from scipy import special
k,N = asarray(k), asarray(N)
lgam = special.gammaln
cond = (k <= N) & (N >= 0) & (k >= 0)
sv = special.errprint(0)
vals = exp(lgam(N+1) - lgam(N-k+1) - lgam(k+1))
sv = special.errprint(sv)
return where(cond, vals, 0.0)
def central_diff_weights(Np, ndiv=1):
"""
Return weights for an Np-point central derivative of order ndiv
assuming equally-spaced function points.
If weights are in the vector w, then
derivative is w[0] * f(x-ho*dx) + ... + w[-1] * f(x+h0*dx)
Notes
-----
Can be inaccurate for large number of points.
"""
if Np < ndiv + 1:
raise ValueError("Number of points must be at least the derivative order + 1.")
if Np % 2 == 0:
raise ValueError("The number of points must be odd.")
from scipy import linalg
ho = Np >> 1
x = arange(-ho,ho+1.0)
x = x[:,newaxis]
X = x**0.0
for k in range(1,Np):
X = hstack([X,x**k])
w = product(arange(1,ndiv+1),axis=0)*linalg.inv(X)[ndiv]
return w
def derivative(func, x0, dx=1.0, n=1, args=(), order=3):
"""
Find the n-th derivative of a function at point x0.
Given a function, use a central difference formula with spacing `dx` to
compute the n-th derivative at `x0`.
Parameters
----------
func : function
Input function.
x0 : float
The point at which nth derivative is found.
dx : int, optional
Spacing.
n : int, optional
Order of the derivative. Default is 1.
args : tuple, optional
Arguments
order : int, optional
Number of points to use, must be odd.
Notes
-----
Decreasing the step size too small can result in round-off error.
Examples
--------
>>> def x2(x):
... return x*x
...
>>> derivative(x2, 2)
4.0
"""
if order < n + 1:
raise ValueError("'order' (the number of points used to compute the derivative), "
"must be at least the derivative order 'n' + 1.")
if order % 2 == 0:
raise ValueError("'order' (the number of points used to compute the derivative) "
"must be odd.")
# pre-computed for n=1 and 2 and low-order for speed.
if n==1:
if order == 3:
weights = array([-1,0,1])/2.0
elif order == 5:
weights = array([1,-8,0,8,-1])/12.0
elif order == 7:
weights = array([-1,9,-45,0,45,-9,1])/60.0
elif order == 9:
weights = array([3,-32,168,-672,0,672,-168,32,-3])/840.0
else:
weights = central_diff_weights(order,1)
elif n==2:
if order == 3:
weights = array([1,-2.0,1])
elif order == 5:
weights = array([-1,16,-30,16,-1])/12.0
elif order == 7:
weights = array([2,-27,270,-490,270,-27,2])/180.0
elif order == 9:
weights = array([-9,128,-1008,8064,-14350,8064,-1008,128,-9])/5040.0
else:
weights = central_diff_weights(order,2)
else:
weights = central_diff_weights(order, n)
val = 0.0
ho = order >> 1
for k in range(order):
val += weights[k]*func(x0+(k-ho)*dx,*args)
return val / product((dx,)*n,axis=0)
def pade(an, m):
"""Given Taylor series coefficients in an, return a Pade approximation to
the function as the ratio of two polynomials p / q where the order of q is m.
"""
from scipy import linalg
an = asarray(an)
N = len(an) - 1
n = N-m
if (n < 0):
raise ValueError("Order of q <m> must be smaller than len(an)-1.")
Akj = eye(N+1,n+1)
Bkj = zeros((N+1,m),'d')
for row in range(1,m+1):
Bkj[row,:row] = -(an[:row])[::-1]
for row in range(m+1,N+1):
Bkj[row,:] = -(an[row-m:row])[::-1]
C = hstack((Akj,Bkj))
pq = dot(linalg.inv(C),an)
p = pq[:n+1]
q = r_[1.0,pq[n+1:]]
return poly1d(p[::-1]), poly1d(q[::-1])
def lena():
"""
Get classic image processing example image, Lena, at 8-bit grayscale
bit-depth, 512 x 512 size.
Parameters
----------
None
Returns
-------
lena : ndarray
Lena image
Examples
--------
>>> import scipy.misc
>>> lena = scipy.misc.lena()
>>> lena.shape
(512, 512)
>>> lena.max()
245
>>> lena.dtype
dtype('int32')
>>> import matplotlib.pyplot as plt
>>> plt.gray()
>>> plt.imshow(lena)
>>> plt.show()
"""
import cPickle, os
fname = os.path.join(os.path.dirname(__file__),'lena.dat')
f = open(fname,'rb')
lena = array(cPickle.load(f))
f.close()
return lena
|
gpl-3.0
|
boomsbloom/dtm-fmri
|
DTM/for_gensim/lib/python2.7/site-packages/matplotlib/backends/backend_qt4.py
|
8
|
2867
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
from matplotlib.externals.six import unichr
import os
import re
import signal
import sys
import matplotlib
from matplotlib.cbook import is_string_like
from matplotlib.backend_bases import FigureManagerBase
from matplotlib.backend_bases import FigureCanvasBase
from matplotlib.backend_bases import NavigationToolbar2
from matplotlib.backend_bases import cursors
from matplotlib.backend_bases import TimerBase
from matplotlib.backend_bases import ShowBase
from matplotlib._pylab_helpers import Gcf
from matplotlib.figure import Figure
from matplotlib.widgets import SubplotTool
try:
import matplotlib.backends.qt_editor.figureoptions as figureoptions
except ImportError:
figureoptions = None
from .qt_compat import QtCore, QtWidgets, _getSaveFileName, __version__
from matplotlib.backends.qt_editor.formsubplottool import UiSubplotTool
from .backend_qt5 import (backend_version, SPECIAL_KEYS, SUPER, ALT, CTRL,
SHIFT, MODIFIER_KEYS, fn_name, cursord,
draw_if_interactive, _create_qApp, show, TimerQT,
MainWindow, FigureManagerQT, NavigationToolbar2QT,
SubplotToolQt, error_msg_qt, exception_handler)
from .backend_qt5 import FigureCanvasQT as FigureCanvasQT5
DEBUG = False
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
thisFig = Figure(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasQT(figure)
manager = FigureManagerQT(canvas, num)
return manager
class FigureCanvasQT(FigureCanvasQT5):
def __init__(self, figure):
if DEBUG:
print('FigureCanvasQt qt4: ', figure)
_create_qApp()
# Note different super-calling style to backend_qt5
QtWidgets.QWidget.__init__(self)
FigureCanvasBase.__init__(self, figure)
self.figure = figure
self.setMouseTracking(True)
self._idle = True
w, h = self.get_width_height()
self.resize(w, h)
def wheelEvent(self, event):
x = event.x()
# flipy so y=0 is bottom of canvas
y = self.figure.bbox.height - event.y()
# from QWheelEvent::delta doc
steps = event.delta()/120
if (event.orientation() == QtCore.Qt.Vertical):
FigureCanvasBase.scroll_event(self, x, y, steps)
if DEBUG:
print('scroll event: delta = %i, '
'steps = %i ' % (event.delta(), steps))
FigureCanvas = FigureCanvasQT
FigureManager = FigureManagerQT
|
mit
|
wy2136/climate_index
|
nao.py
|
1
|
1919
|
#
# NAO daily index from http://www.esrl.noaa.gov/psd/data/timeseries/daily/NAO/.
#
# Written by Wenchang Yang (yang.wenchang@uci.edu)
#
import numpy as np
import pandas as pd
import xarray as xr
import datetime
import os
def get_daily_nao(months=None, years=None, reload_data=False):
''' NAO daily index from:
http://www.esrl.noaa.gov/psd/data/timeseries/daily/NAO/.
'''
module_dir, fname = os.path.split(__file__)
data_file = os.path.join(module_dir, 'data',
'nao.reanalysis.t10trunc.1948-present.txt')
data_file_exists = os.path.exists(data_file)
col_names = ['year', 'month', 'day', 'NAO']
# read data file
if data_file_exists and not reload_data:
# load data from local drive
df = pd.read_csv(data_file,
skiprows=0, names=col_names, sep=r'\s+')
else:
# download data from the internet
df = pd.read_csv(
'ftp://ftp.cdc.noaa.gov/Public/gbates/teleconn/nao.reanalysis.t10trunc.1948-present.txt',
skiprows=0, names=col_names, sep=r'\s+')
years_, months_, days_ = (df.loc[:, 'year'], df.loc[:, 'month'],
df.loc[:,'day'])
time = [datetime.datetime(year, month, day, 12, 0, 0)
for year, month, day in zip(years_, months_, days_)]
df.index = time
# select columns
df = df.loc[:, ['NAO']]
# mask invalid Value
# df[df>=999] = np.nan
# convert to xarray Dataset with dimension name "time"
ds = xr.Dataset.from_dataframe(df)
ds = ds.rename({'index': 'time'})
# select months
if months is not None:
L = False
for month in months:
L = L | (ds['time.month'] == month)
ds = ds.sel(time=ds['time'][L])
# select years
if years is not None:
L = False
for year in years:
L = L | (ds['time.year'] == year)
ds = ds.sel(time=ds['time'][L])
return ds['NAO']
|
bsd-3-clause
|
rubikloud/scikit-learn
|
examples/manifold/plot_lle_digits.py
|
138
|
8594
|
"""
=============================================================================
Manifold learning on handwritten digits: Locally Linear Embedding, Isomap...
=============================================================================
An illustration of various embeddings on the digits dataset.
The RandomTreesEmbedding, from the :mod:`sklearn.ensemble` module, is not
technically a manifold embedding method, as it learn a high-dimensional
representation on which we apply a dimensionality reduction method.
However, it is often useful to cast a dataset into a representation in
which the classes are linearly-separable.
t-SNE will be initialized with the embedding that is generated by PCA in
this example, which is not the default setting. It ensures global stability
of the embedding, i.e., the embedding does not depend on random
initialization.
"""
# Authors: Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import (manifold, datasets, decomposition, ensemble,
discriminant_analysis, random_projection)
digits = datasets.load_digits(n_class=6)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
n_neighbors = 30
#----------------------------------------------------------------------
# Scale and visualize the embedding vectors
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(digits.data.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 4e-3:
# don't show points that are too close
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
#----------------------------------------------------------------------
# Plot images of the digits
n_img_per_row = 20
img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
for i in range(n_img_per_row):
ix = 10 * i + 1
for j in range(n_img_per_row):
iy = 10 * j + 1
img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))
plt.imshow(img, cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('A selection from the 64-dimensional digits dataset')
#----------------------------------------------------------------------
# Random 2D projection using a random unitary matrix
print("Computing random projection")
rp = random_projection.SparseRandomProjection(n_components=2, random_state=42)
X_projected = rp.fit_transform(X)
plot_embedding(X_projected, "Random Projection of the digits")
#----------------------------------------------------------------------
# Projection on to the first 2 principal components
print("Computing PCA projection")
t0 = time()
X_pca = decomposition.TruncatedSVD(n_components=2).fit_transform(X)
plot_embedding(X_pca,
"Principal Components projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Projection on to the first 2 linear discriminant components
print("Computing Linear Discriminant Analysis projection")
X2 = X.copy()
X2.flat[::X.shape[1] + 1] += 0.01 # Make X invertible
t0 = time()
X_lda = discriminant_analysis.LinearDiscriminantAnalysis(n_components=2).fit_transform(X2, y)
plot_embedding(X_lda,
"Linear Discriminant projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Isomap projection of the digits dataset
print("Computing Isomap embedding")
t0 = time()
X_iso = manifold.Isomap(n_neighbors, n_components=2).fit_transform(X)
print("Done.")
plot_embedding(X_iso,
"Isomap projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Locally linear embedding of the digits dataset
print("Computing LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='standard')
t0 = time()
X_lle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_lle,
"Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Modified Locally linear embedding of the digits dataset
print("Computing modified LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='modified')
t0 = time()
X_mlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_mlle,
"Modified Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# HLLE embedding of the digits dataset
print("Computing Hessian LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='hessian')
t0 = time()
X_hlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_hlle,
"Hessian Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# LTSA embedding of the digits dataset
print("Computing LTSA embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='ltsa')
t0 = time()
X_ltsa = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_ltsa,
"Local Tangent Space Alignment of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# MDS embedding of the digits dataset
print("Computing MDS embedding")
clf = manifold.MDS(n_components=2, n_init=1, max_iter=100)
t0 = time()
X_mds = clf.fit_transform(X)
print("Done. Stress: %f" % clf.stress_)
plot_embedding(X_mds,
"MDS embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Random Trees embedding of the digits dataset
print("Computing Totally Random Trees embedding")
hasher = ensemble.RandomTreesEmbedding(n_estimators=200, random_state=0,
max_depth=5)
t0 = time()
X_transformed = hasher.fit_transform(X)
pca = decomposition.TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
plot_embedding(X_reduced,
"Random forest embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Spectral embedding of the digits dataset
print("Computing Spectral embedding")
embedder = manifold.SpectralEmbedding(n_components=2, random_state=0,
eigen_solver="arpack")
t0 = time()
X_se = embedder.fit_transform(X)
plot_embedding(X_se,
"Spectral embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# t-SNE embedding of the digits dataset
print("Computing t-SNE embedding")
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
t0 = time()
X_tsne = tsne.fit_transform(X)
plot_embedding(X_tsne,
"t-SNE embedding of the digits (time %.2fs)" %
(time() - t0))
plt.show()
|
bsd-3-clause
|
ericmckean/syzygy
|
third_party/numpy/files/numpy/lib/function_base.py
|
16
|
109648
|
__docformat__ = "restructuredtext en"
__all__ = ['select', 'piecewise', 'trim_zeros', 'copy', 'iterable',
'percentile', 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex',
'disp', 'extract', 'place', 'nansum', 'nanmax', 'nanargmax',
'nanargmin', 'nanmin', 'vectorize', 'asarray_chkfinite', 'average',
'histogram', 'histogramdd', 'bincount', 'digitize', 'cov', 'corrcoef',
'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett',
'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring',
'meshgrid', 'delete', 'insert', 'append', 'interp']
import warnings
import types
import sys
import numpy.core.numeric as _nx
from numpy.core import linspace
from numpy.core.numeric import ones, zeros, arange, concatenate, array, \
asarray, asanyarray, empty, empty_like, ndarray, around
from numpy.core.numeric import ScalarType, dot, where, newaxis, intp, \
integer, isscalar
from numpy.core.umath import pi, multiply, add, arctan2, \
frompyfunc, isnan, cos, less_equal, sqrt, sin, mod, exp, log10
from numpy.core.fromnumeric import ravel, nonzero, choose, sort, mean
from numpy.core.numerictypes import typecodes, number
from numpy.core import atleast_1d, atleast_2d
from numpy.lib.twodim_base import diag
from _compiled_base import _insert, add_docstring
from _compiled_base import digitize, bincount, interp as compiled_interp
from arraysetops import setdiff1d
from utils import deprecate
import numpy as np
def iterable(y):
"""
Check whether or not an object can be iterated over.
Parameters
----------
y : object
Input object.
Returns
-------
b : {0, 1}
Return 1 if the object has an iterator method or is a sequence,
and 0 otherwise.
Examples
--------
>>> np.iterable([1, 2, 3])
1
>>> np.iterable(2)
0
"""
try: iter(y)
except: return 0
return 1
def histogram(a, bins=10, range=None, normed=False, weights=None, density=None):
"""
Compute the histogram of a set of data.
Parameters
----------
a : array_like
Input data. The histogram is computed over the flattened array.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a sequence,
it defines the bin edges, including the rightmost edge, allowing
for non-uniform bin widths.
range : (float, float), optional
The lower and upper range of the bins. If not provided, range
is simply ``(a.min(), a.max())``. Values outside the range are
ignored.
normed : bool, optional
This keyword is deprecated in Numpy 1.6 due to confusing/buggy
behavior. It will be removed in Numpy 2.0. Use the density keyword
instead.
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that this latter behavior is
known to be buggy with unequal bin widths; use `density` instead.
weights : array_like, optional
An array of weights, of the same shape as `a`. Each value in `a`
only contributes its associated weight towards the bin count
(instead of 1). If `normed` is True, the weights are normalized,
so that the integral of the density over the range remains 1
density : bool, optional
If False, the result will contain the number of samples
in each bin. If True, the result is the value of the
probability *density* function at the bin, normalized such that
the *integral* over the range is 1. Note that the sum of the
histogram values will not be equal to 1 unless bins of unity
width are chosen; it is not a probability *mass* function.
Overrides the `normed` keyword if given.
Returns
-------
hist : array
The values of the histogram. See `normed` and `weights` for a
description of the possible semantics.
bin_edges : array of dtype float
Return the bin edges ``(length(hist)+1)``.
See Also
--------
histogramdd, bincount, searchsorted, digitize
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is::
[1, 2, 3, 4]
then the first bin is ``[1, 2)`` (including 1, but excluding 2) and the
second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which *includes*
4.
Examples
--------
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
(array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, density=True)
>>> hist
array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist*np.diff(bin_edges))
1.0
"""
a = asarray(a)
if weights is not None:
weights = asarray(weights)
if np.any(weights.shape != a.shape):
raise ValueError(
'weights should have the same shape as a.')
weights = weights.ravel()
a = a.ravel()
if (range is not None):
mn, mx = range
if (mn > mx):
raise AttributeError(
'max must be larger than min in range parameter.')
if not iterable(bins):
if np.isscalar(bins) and bins < 1:
raise ValueError("`bins` should be a positive integer.")
if range is None:
if a.size == 0:
# handle empty arrays. Can't determine range, so use 0-1.
range = (0, 1)
else:
range = (a.min(), a.max())
mn, mx = [mi+0.0 for mi in range]
if mn == mx:
mn -= 0.5
mx += 0.5
bins = linspace(mn, mx, bins+1, endpoint=True)
else:
bins = asarray(bins)
if (np.diff(bins) < 0).any():
raise AttributeError(
'bins must increase monotonically.')
# Histogram is an integer or a float array depending on the weights.
if weights is None:
ntype = int
else:
ntype = weights.dtype
n = np.zeros(bins.shape, ntype)
block = 65536
if weights is None:
for i in arange(0, len(a), block):
sa = sort(a[i:i+block])
n += np.r_[sa.searchsorted(bins[:-1], 'left'), \
sa.searchsorted(bins[-1], 'right')]
else:
zero = array(0, dtype=ntype)
for i in arange(0, len(a), block):
tmp_a = a[i:i+block]
tmp_w = weights[i:i+block]
sorting_index = np.argsort(tmp_a)
sa = tmp_a[sorting_index]
sw = tmp_w[sorting_index]
cw = np.concatenate(([zero,], sw.cumsum()))
bin_index = np.r_[sa.searchsorted(bins[:-1], 'left'), \
sa.searchsorted(bins[-1], 'right')]
n += cw[bin_index]
n = np.diff(n)
if density is not None:
if density:
db = array(np.diff(bins), float)
return n/db/n.sum(), bins
else:
return n, bins
else:
# deprecated, buggy behavior. Remove for Numpy 2.0
if normed:
db = array(np.diff(bins), float)
return n/(n*db).sum(), bins
else:
return n, bins
def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
"""
Compute the multidimensional histogram of some data.
Parameters
----------
sample : array_like
The data to be histogrammed. It must be an (N,D) array or data
that can be converted to such. The rows of the resulting array
are the coordinates of points in a D dimensional polytope.
bins : sequence or int, optional
The bin specification:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... =bins)
* The number of bins for all dimensions (nx=ny=...=bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitely in `bins`. Defaults to the minimum and maximum
values along each dimension.
normed : bool, optional
If False, returns the number of samples in each bin. If True, returns
the bin density, ie, the bin count divided by the bin hypervolume.
weights : array_like (N,), optional
An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
Weights are normalized to 1 if normed is True. If normed is False, the
values of the returned histogram are equal to the sum of the weights
belonging to the samples falling into each bin.
Returns
-------
H : ndarray
The multidimensional histogram of sample x. See normed and weights for
the different possible semantics.
edges : list
A list of D arrays describing the bin edges for each dimension.
See Also
--------
histogram: 1-D histogram
histogram2d: 2-D histogram
Examples
--------
>>> r = np.random.randn(100,3)
>>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
>>> H.shape, edges[0].size, edges[1].size, edges[2].size
((5, 8, 4), 6, 9, 5)
"""
try:
# Sample is an ND-array.
N, D = sample.shape
except (AttributeError, ValueError):
# Sample is a sequence of 1D arrays.
sample = atleast_2d(sample).T
N, D = sample.shape
nbin = empty(D, int)
edges = D*[None]
dedges = D*[None]
if weights is not None:
weights = asarray(weights)
try:
M = len(bins)
if M != D:
raise AttributeError(
'The dimension of bins must be equal'\
' to the dimension of the sample x.')
except TypeError:
# bins is an integer
bins = D*[bins]
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
# Handle empty input. Range can't be determined in that case, use 0-1.
if N == 0:
smin = zeros(D)
smax = ones(D)
else:
smin = atleast_1d(array(sample.min(0), float))
smax = atleast_1d(array(sample.max(0), float))
else:
smin = zeros(D)
smax = zeros(D)
for i in arange(D):
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in arange(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# Create edge arrays
for i in arange(D):
if isscalar(bins[i]):
if bins[i] < 1:
raise ValueError("Element at index %s in `bins` should be "
"a positive integer." % i)
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = linspace(smin[i], smax[i], nbin[i]-1)
else:
edges[i] = asarray(bins[i], float)
nbin[i] = len(edges[i])+1 # +1 for outlier bins
dedges[i] = diff(edges[i])
if np.any(np.asarray(dedges[i]) <= 0):
raise ValueError("""
Found bin edge of size <= 0. Did you specify `bins` with
non-monotonic sequence?""")
# Handle empty input.
if N == 0:
return np.zeros(D), edges
nbin = asarray(nbin)
# Compute the bin number each sample falls into.
Ncount = {}
for i in arange(D):
Ncount[i] = digitize(sample[:,i], edges[i])
# Using digitize, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right
# edge to be counted in the last bin, and not as an outlier.
outliers = zeros(N, int)
for i in arange(D):
# Rounding precision
mindiff = dedges[i].min()
if not np.isinf(mindiff):
decimal = int(-log10(mindiff)) + 6
# Find which points are on the rightmost edge.
on_edge = where(around(sample[:,i], decimal) == around(edges[i][-1],
decimal))[0]
# Shift these points one bin to the left.
Ncount[i][on_edge] -= 1
# Flattened histogram matrix (1D)
# Reshape is used so that overlarge arrays
# will raise an error.
hist = zeros(nbin, float).reshape(-1)
# Compute the sample indices in the flattened histogram matrix.
ni = nbin.argsort()
shape = []
xy = zeros(N, int)
for i in arange(0, D-1):
xy += Ncount[ni[i]] * nbin[ni[i+1:]].prod()
xy += Ncount[ni[-1]]
# Compute the number of repetitions in xy and assign it to the
# flattened histmat.
if len(xy) == 0:
return zeros(nbin-2, int), edges
flatcount = bincount(xy, weights)
a = arange(len(flatcount))
hist[a] = flatcount
# Shape into a proper matrix
hist = hist.reshape(sort(nbin))
for i in arange(nbin.size):
j = ni.argsort()[i]
hist = hist.swapaxes(i,j)
ni[i],ni[j] = ni[j],ni[i]
# Remove outliers (indices 0 and -1 for each dimension).
core = D*[slice(1,-1)]
hist = hist[core]
# Normalize if normed is True
if normed:
s = hist.sum()
for i in arange(D):
shape = ones(D, int)
shape[i] = nbin[i] - 2
hist = hist / dedges[i].reshape(shape)
hist /= s
if (hist.shape != nbin - 2).any():
raise RuntimeError(
"Internal Shape Error")
return hist, edges
def average(a, axis=None, weights=None, returned=False):
"""
Compute the weighted average along the specified axis.
Parameters
----------
a : array_like
Array containing data to be averaged. If `a` is not an array, a
conversion is attempted.
axis : int, optional
Axis along which to average `a`. If `None`, averaging is done over
the flattened array.
weights : array_like, optional
An array of weights associated with the values in `a`. Each value in
`a` contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If `weights=None`, then all data in `a` are assumed to have a
weight equal to one.
returned : bool, optional
Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`)
is returned, otherwise only the average is returned.
If `weights=None`, `sum_of_weights` is equivalent to the number of
elements over which the average is taken.
Returns
-------
average, [sum_of_weights] : {array_type, double}
Return the average along the specified axis. When returned is `True`,
return a tuple with the average as the first element and the sum
of the weights as the second element. The return type is `Float`
if `a` is of integer type, otherwise it is of the same type as `a`.
`sum_of_weights` is of the same type as `average`.
Raises
------
ZeroDivisionError
When all weights along axis are zero. See `numpy.ma.average` for a
version robust to this type of error.
TypeError
When the length of 1D `weights` is not the same as the shape of `a`
along axis.
See Also
--------
mean
ma.average : average for masked arrays
Examples
--------
>>> data = range(1,5)
>>> data
[1, 2, 3, 4]
>>> np.average(data)
2.5
>>> np.average(range(1,11), weights=range(10,0,-1))
4.0
>>> data = np.arange(6).reshape((3,2))
>>> data
array([[0, 1],
[2, 3],
[4, 5]])
>>> np.average(data, axis=1, weights=[1./4, 3./4])
array([ 0.75, 2.75, 4.75])
>>> np.average(data, weights=[1./4, 3./4])
Traceback (most recent call last):
...
TypeError: Axis must be specified when shapes of a and weights differ.
"""
if not isinstance(a, np.matrix) :
a = np.asarray(a)
if weights is None :
avg = a.mean(axis)
scl = avg.dtype.type(a.size/avg.size)
else :
a = a + 0.0
wgt = np.array(weights, dtype=a.dtype, copy=0)
# Sanity checks
if a.shape != wgt.shape :
if axis is None :
raise TypeError(
"Axis must be specified when shapes of a "\
"and weights differ.")
if wgt.ndim != 1 :
raise TypeError(
"1D weights expected when shapes of a and "\
"weights differ.")
if wgt.shape[0] != a.shape[axis] :
raise ValueError(
"Length of weights not compatible with "\
"specified axis.")
# setup wgt to broadcast along axis
wgt = np.array(wgt, copy=0, ndmin=a.ndim).swapaxes(-1, axis)
scl = wgt.sum(axis=axis)
if (scl == 0.0).any():
raise ZeroDivisionError(
"Weights sum to zero, can't be normalized")
avg = np.multiply(a, wgt).sum(axis)/scl
if returned:
scl = np.multiply(avg, 0) + scl
return avg, scl
else:
return avg
def asarray_chkfinite(a):
"""
Convert the input to an array, checking for NaNs or Infs.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays. Success requires no NaNs or Infs.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major ('C') or column-major ('FORTRAN') memory
representation. Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray. If `a` is a subclass of ndarray, a base
class ndarray is returned.
Raises
------
ValueError
Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity).
See Also
--------
asarray : Create and array.
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array. If all elements are finite
``asarray_chkfinite`` is identical to ``asarray``.
>>> a = [1, 2]
>>> np.asarray_chkfinite(a)
array([1, 2])
Raises ValueError if array_like contains Nans or Infs.
>>> a = [1, 2, np.inf]
>>> try:
... np.asarray_chkfinite(a)
... except ValueError:
... print 'ValueError'
...
ValueError
"""
a = asarray(a)
if (a.dtype.char in typecodes['AllFloat']) \
and (_nx.isnan(a).any() or _nx.isinf(a).any()):
raise ValueError(
"array must not contain infs or NaNs")
return a
def piecewise(x, condlist, funclist, *args, **kw):
"""
Evaluate a piecewise-defined function.
Given a set of conditions and corresponding functions, evaluate each
function on the input data wherever its condition is true.
Parameters
----------
x : ndarray
The input domain.
condlist : list of bool arrays
Each boolean array corresponds to a function in `funclist`. Wherever
`condlist[i]` is True, `funclist[i](x)` is used as the output value.
Each boolean array in `condlist` selects a piece of `x`,
and should therefore be of the same shape as `x`.
The length of `condlist` must correspond to that of `funclist`.
If one extra function is given, i.e. if
``len(funclist) - len(condlist) == 1``, then that extra function
is the default value, used wherever all conditions are false.
funclist : list of callables, f(x,*args,**kw), or scalars
Each function is evaluated over `x` wherever its corresponding
condition is True. It should take an array as input and give an array
or a scalar value as output. If, instead of a callable,
a scalar is provided then a constant function (``lambda x: scalar``) is
assumed.
args : tuple, optional
Any further arguments given to `piecewise` are passed to the functions
upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then
each function is called as ``f(x, 1, 'a')``.
kw : dict, optional
Keyword arguments used in calling `piecewise` are passed to the
functions upon execution, i.e., if called
``piecewise(..., ..., lambda=1)``, then each function is called as
``f(x, lambda=1)``.
Returns
-------
out : ndarray
The output is the same shape and type as x and is found by
calling the functions in `funclist` on the appropriate portions of `x`,
as defined by the boolean arrays in `condlist`. Portions not covered
by any condition have undefined values.
See Also
--------
choose, select, where
Notes
-----
This is similar to choose or select, except that functions are
evaluated on elements of `x` that satisfy the corresponding condition from
`condlist`.
The result is::
|--
|funclist[0](x[condlist[0]])
out = |funclist[1](x[condlist[1]])
|...
|funclist[n2](x[condlist[n2]])
|--
Examples
--------
Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``.
>>> x = np.arange(6) - 2.5
>>> np.piecewise(x, [x < 0, x >= 0], [-1, 1])
array([-1., -1., -1., 1., 1., 1.])
Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for
``x >= 0``.
>>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x])
array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5])
"""
x = asanyarray(x)
n2 = len(funclist)
if isscalar(condlist) or \
not (isinstance(condlist[0], list) or
isinstance(condlist[0], ndarray)):
condlist = [condlist]
condlist = [asarray(c, dtype=bool) for c in condlist]
n = len(condlist)
if n == n2-1: # compute the "otherwise" condition.
totlist = condlist[0]
for k in range(1, n):
totlist |= condlist[k]
condlist.append(~totlist)
n += 1
if (n != n2):
raise ValueError(
"function list and condition list must be the same")
zerod = False
# This is a hack to work around problems with NumPy's
# handling of 0-d arrays and boolean indexing with
# numpy.bool_ scalars
if x.ndim == 0:
x = x[None]
zerod = True
newcondlist = []
for k in range(n):
if condlist[k].ndim == 0:
condition = condlist[k][None]
else:
condition = condlist[k]
newcondlist.append(condition)
condlist = newcondlist
y = zeros(x.shape, x.dtype)
for k in range(n):
item = funclist[k]
if not callable(item):
y[condlist[k]] = item
else:
vals = x[condlist[k]]
if vals.size > 0:
y[condlist[k]] = item(vals, *args, **kw)
if zerod:
y = y.squeeze()
return y
def select(condlist, choicelist, default=0):
"""
Return an array drawn from elements in choicelist, depending on conditions.
Parameters
----------
condlist : list of bool ndarrays
The list of conditions which determine from which array in `choicelist`
the output elements are taken. When multiple conditions are satisfied,
the first one encountered in `condlist` is used.
choicelist : list of ndarrays
The list of arrays from which the output elements are taken. It has
to be of the same length as `condlist`.
default : scalar, optional
The element inserted in `output` when all conditions evaluate to False.
Returns
-------
output : ndarray
The output at position m is the m-th element of the array in
`choicelist` where the m-th element of the corresponding array in
`condlist` is True.
See Also
--------
where : Return elements from one of two arrays depending on condition.
take, choose, compress, diag, diagonal
Examples
--------
>>> x = np.arange(10)
>>> condlist = [x<3, x>5]
>>> choicelist = [x, x**2]
>>> np.select(condlist, choicelist)
array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81])
"""
n = len(condlist)
n2 = len(choicelist)
if n2 != n:
raise ValueError(
"list of cases must be same length as list of conditions")
choicelist = [default] + choicelist
S = 0
pfac = 1
for k in range(1, n+1):
S += k * pfac * asarray(condlist[k-1])
if k < n:
pfac *= (1-asarray(condlist[k-1]))
# handle special case of a 1-element condition but
# a multi-element choice
if type(S) in ScalarType or max(asarray(S).shape)==1:
pfac = asarray(1)
for k in range(n2+1):
pfac = pfac + asarray(choicelist[k])
if type(S) in ScalarType:
S = S*ones(asarray(pfac).shape, type(S))
else:
S = S*ones(asarray(pfac).shape, S.dtype)
return choose(S, tuple(choicelist))
def copy(a):
"""
Return an array copy of the given object.
Parameters
----------
a : array_like
Input data.
Returns
-------
arr : ndarray
Array interpretation of `a`.
Notes
-----
This is equivalent to
>>> np.array(a, copy=True) #doctest: +SKIP
Examples
--------
Create an array x, with a reference y and a copy z:
>>> x = np.array([1, 2, 3])
>>> y = x
>>> z = np.copy(x)
Note that, when we modify x, y changes, but not z:
>>> x[0] = 10
>>> x[0] == y[0]
True
>>> x[0] == z[0]
False
"""
return array(a, copy=True)
# Basic operations
def gradient(f, *varargs):
"""
Return the gradient of an N-dimensional array.
The gradient is computed using central differences in the interior
and first differences at the boundaries. The returned gradient hence has
the same shape as the input array.
Parameters
----------
f : array_like
An N-dimensional array containing samples of a scalar function.
`*varargs` : scalars
0, 1, or N scalars specifying the sample distances in each direction,
that is: `dx`, `dy`, `dz`, ... The default distance is 1.
Returns
-------
g : ndarray
N arrays of the same shape as `f` giving the derivative of `f` with
respect to each dimension.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float)
>>> np.gradient(x)
array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
>>> np.gradient(x, 2)
array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
[array([[ 2., 2., -1.],
[ 2., 2., -1.]]),
array([[ 1. , 2.5, 4. ],
[ 1. , 1. , 1. ]])]
"""
N = len(f.shape) # number of dimensions
n = len(varargs)
if n == 0:
dx = [1.0]*N
elif n == 1:
dx = [varargs[0]]*N
elif n == N:
dx = list(varargs)
else:
raise SyntaxError(
"invalid number of arguments")
# use central differences on interior and first differences on endpoints
outvals = []
# create slice objects --- initially all are [:, :, ..., :]
slice1 = [slice(None)]*N
slice2 = [slice(None)]*N
slice3 = [slice(None)]*N
otype = f.dtype.char
if otype not in ['f', 'd', 'F', 'D']:
otype = 'd'
for axis in range(N):
# select out appropriate parts for this dimension
out = np.zeros_like(f).astype(otype)
slice1[axis] = slice(1, -1)
slice2[axis] = slice(2, None)
slice3[axis] = slice(None, -2)
# 1D equivalent -- out[1:-1] = (f[2:] - f[:-2])/2.0
out[slice1] = (f[slice2] - f[slice3])/2.0
slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
# 1D equivalent -- out[0] = (f[1] - f[0])
out[slice1] = (f[slice2] - f[slice3])
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
# 1D equivalent -- out[-1] = (f[-1] - f[-2])
out[slice1] = (f[slice2] - f[slice3])
# divide by step size
outvals.append(out / dx[axis])
# reset the slice object in this dimension to ":"
slice1[axis] = slice(None)
slice2[axis] = slice(None)
slice3[axis] = slice(None)
if N == 1:
return outvals[0]
else:
return outvals
def diff(a, n=1, axis=-1):
"""
Calculate the n-th order discrete difference along given axis.
The first order difference is given by ``out[n] = a[n+1] - a[n]`` along
the given axis, higher order differences are calculated by using `diff`
recursively.
Parameters
----------
a : array_like
Input array
n : int, optional
The number of times values are differenced.
axis : int, optional
The axis along which the difference is taken, default is the last axis.
Returns
-------
out : ndarray
The `n` order differences. The shape of the output is the same as `a`
except along `axis` where the dimension is smaller by `n`.
See Also
--------
gradient, ediff1d
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
array([ 1, 2, 3, -7])
>>> np.diff(x, n=2)
array([ 1, 1, -10])
>>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
[5, 1, 2]])
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
"""
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
a = asanyarray(a)
nd = len(a.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
if n > 1:
return diff(a[slice1]-a[slice2], n-1, axis=axis)
else:
return a[slice1]-a[slice2]
def interp(x, xp, fp, left=None, right=None):
"""
One-dimensional linear interpolation.
Returns the one-dimensional piecewise linear interpolant to a function
with given values at discrete data-points.
Parameters
----------
x : array_like
The x-coordinates of the interpolated values.
xp : 1-D sequence of floats
The x-coordinates of the data points, must be increasing.
fp : 1-D sequence of floats
The y-coordinates of the data points, same length as `xp`.
left : float, optional
Value to return for `x < xp[0]`, default is `fp[0]`.
right : float, optional
Value to return for `x > xp[-1]`, defaults is `fp[-1]`.
Returns
-------
y : {float, ndarray}
The interpolated values, same shape as `x`.
Raises
------
ValueError
If `xp` and `fp` have different length
Notes
-----
Does not check that the x-coordinate sequence `xp` is increasing.
If `xp` is not increasing, the results are nonsense.
A simple check for increasingness is::
np.all(np.diff(xp) > 0)
Examples
--------
>>> xp = [1, 2, 3]
>>> fp = [3, 2, 0]
>>> np.interp(2.5, xp, fp)
1.0
>>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)
array([ 3. , 3. , 2.5 , 0.56, 0. ])
>>> UNDEF = -99.0
>>> np.interp(3.14, xp, fp, right=UNDEF)
-99.0
Plot an interpolant to the sine function:
>>> x = np.linspace(0, 2*np.pi, 10)
>>> y = np.sin(x)
>>> xvals = np.linspace(0, 2*np.pi, 50)
>>> yinterp = np.interp(xvals, x, y)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(xvals, yinterp, '-x')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
"""
if isinstance(x, (float, int, number)):
return compiled_interp([x], xp, fp, left, right).item()
elif isinstance(x, np.ndarray) and x.ndim == 0:
return compiled_interp([x], xp, fp, left, right).item()
else:
return compiled_interp(x, xp, fp, left, right)
def angle(z, deg=0):
"""
Return the angle of the complex argument.
Parameters
----------
z : array_like
A complex number or sequence of complex numbers.
deg : bool, optional
Return angle in degrees if True, radians if False (default).
Returns
-------
angle : {ndarray, scalar}
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
See Also
--------
arctan2
absolute
Examples
--------
>>> np.angle([1.0, 1.0j, 1+1j]) # in radians
array([ 0. , 1.57079633, 0.78539816])
>>> np.angle(1+1j, deg=True) # in degrees
45.0
"""
if deg:
fact = 180/pi
else:
fact = 1.0
z = asarray(z)
if (issubclass(z.dtype.type, _nx.complexfloating)):
zimag = z.imag
zreal = z.real
else:
zimag = 0
zreal = z
return arctan2(zimag, zreal) * fact
def unwrap(p, discont=pi, axis=-1):
"""
Unwrap by changing deltas between values to 2*pi complement.
Unwrap radian phase `p` by changing absolute jumps greater than
`discont` to their 2*pi complement along the given axis.
Parameters
----------
p : array_like
Input array.
discont : float, optional
Maximum discontinuity between values, default is ``pi``.
axis : int, optional
Axis along which unwrap will operate, default is the last axis.
Returns
-------
out : ndarray
Output array.
See Also
--------
rad2deg, deg2rad
Notes
-----
If the discontinuity in `p` is smaller than ``pi``, but larger than
`discont`, no unwrapping is done because taking the 2*pi complement
would only make the discontinuity larger.
Examples
--------
>>> phase = np.linspace(0, np.pi, num=5)
>>> phase[3:] += np.pi
>>> phase
array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531])
>>> np.unwrap(phase)
array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ])
"""
p = asarray(p)
nd = len(p.shape)
dd = diff(p, axis=axis)
slice1 = [slice(None, None)]*nd # full slices
slice1[axis] = slice(1, None)
ddmod = mod(dd+pi, 2*pi)-pi
_nx.putmask(ddmod, (ddmod==-pi) & (dd > 0), pi)
ph_correct = ddmod - dd;
_nx.putmask(ph_correct, abs(dd)<discont, 0)
up = array(p, copy=True, dtype='d')
up[slice1] = p[slice1] + ph_correct.cumsum(axis)
return up
def sort_complex(a):
"""
Sort a complex array using the real part first, then the imaginary part.
Parameters
----------
a : array_like
Input array
Returns
-------
out : complex ndarray
Always returns a sorted complex array.
Examples
--------
>>> np.sort_complex([5, 3, 6, 2, 1])
array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j])
>>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j])
array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j])
"""
b = array(a,copy=True)
b.sort()
if not issubclass(b.dtype.type, _nx.complexfloating):
if b.dtype.char in 'bhBH':
return b.astype('F')
elif b.dtype.char == 'g':
return b.astype('G')
else:
return b.astype('D')
else:
return b
def trim_zeros(filt, trim='fb'):
"""
Trim the leading and/or trailing zeros from a 1-D array or sequence.
Parameters
----------
filt : 1-D array or sequence
Input array.
trim : str, optional
A string with 'f' representing trim from front and 'b' to trim from
back. Default is 'fb', trim zeros from both front and back of the
array.
Returns
-------
trimmed : 1-D array or sequence
The result of trimming the input. The input data type is preserved.
Examples
--------
>>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0))
>>> np.trim_zeros(a)
array([1, 2, 3, 0, 2, 1])
>>> np.trim_zeros(a, 'b')
array([0, 0, 0, 1, 2, 3, 0, 2, 1])
The input data type is preserved, list/tuple in means list/tuple out.
>>> np.trim_zeros([0, 1, 2, 0])
[1, 2]
"""
first = 0
trim = trim.upper()
if 'F' in trim:
for i in filt:
if i != 0.: break
else: first = first + 1
last = len(filt)
if 'B' in trim:
for i in filt[::-1]:
if i != 0.: break
else: last = last - 1
return filt[first:last]
import sys
if sys.hexversion < 0x2040000:
from sets import Set as set
@deprecate
def unique(x):
"""
This function is deprecated. Use numpy.lib.arraysetops.unique()
instead.
"""
try:
tmp = x.flatten()
if tmp.size == 0:
return tmp
tmp.sort()
idx = concatenate(([True],tmp[1:]!=tmp[:-1]))
return tmp[idx]
except AttributeError:
items = list(set(x))
items.sort()
return asarray(items)
def extract(condition, arr):
"""
Return the elements of an array that satisfy some condition.
This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If
`condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``.
Parameters
----------
condition : array_like
An array whose nonzero or True entries indicate the elements of `arr`
to extract.
arr : array_like
Input array of the same size as `condition`.
See Also
--------
take, put, putmask, compress
Examples
--------
>>> arr = np.arange(12).reshape((3, 4))
>>> arr
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> condition = np.mod(arr, 3)==0
>>> condition
array([[ True, False, False, True],
[False, False, True, False],
[False, True, False, False]], dtype=bool)
>>> np.extract(condition, arr)
array([0, 3, 6, 9])
If `condition` is boolean:
>>> arr[condition]
array([0, 3, 6, 9])
"""
return _nx.take(ravel(arr), nonzero(ravel(condition))[0])
def place(arr, mask, vals):
"""
Change elements of an array based on conditional and input values.
Similar to ``np.putmask(arr, mask, vals)``, the difference is that `place`
uses the first N elements of `vals`, where N is the number of True values
in `mask`, while `putmask` uses the elements where `mask` is True.
Note that `extract` does the exact opposite of `place`.
Parameters
----------
arr : array_like
Array to put data into.
mask : array_like
Boolean mask array. Must have the same size as `a`.
vals : 1-D sequence
Values to put into `a`. Only the first N elements are used, where
N is the number of True values in `mask`. If `vals` is smaller
than N it will be repeated.
See Also
--------
putmask, put, take, extract
Examples
--------
>>> arr = np.arange(6).reshape(2, 3)
>>> np.place(arr, arr>2, [44, 55])
>>> arr
array([[ 0, 1, 2],
[44, 55, 44]])
"""
return _insert(arr, mask, vals)
def _nanop(op, fill, a, axis=None):
"""
General operation on arrays with not-a-number values.
Parameters
----------
op : callable
Operation to perform.
fill : float
NaN values are set to fill before doing the operation.
a : array-like
Input array.
axis : {int, None}, optional
Axis along which the operation is computed.
By default the input is flattened.
Returns
-------
y : {ndarray, scalar}
Processed data.
"""
y = array(a, subok=True)
# We only need to take care of NaN's in floating point arrays
if np.issubdtype(y.dtype, np.integer):
return op(y, axis=axis)
mask = isnan(a)
# y[mask] = fill
# We can't use fancy indexing here as it'll mess w/ MaskedArrays
# Instead, let's fill the array directly...
np.putmask(y, mask, fill)
res = op(y, axis=axis)
mask_all_along_axis = mask.all(axis=axis)
# Along some axes, only nan's were encountered. As such, any values
# calculated along that axis should be set to nan.
if mask_all_along_axis.any():
if np.isscalar(res):
res = np.nan
else:
res[mask_all_along_axis] = np.nan
return res
def nansum(a, axis=None):
"""
Return the sum of array elements over a given axis treating
Not a Numbers (NaNs) as zero.
Parameters
----------
a : array_like
Array containing numbers whose sum is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the sum is computed. The default is to compute
the sum of the flattened array.
Returns
-------
y : ndarray
An array with the same shape as a, with the specified axis removed.
If a is a 0-d array, or if axis is None, a scalar is returned with
the same dtype as `a`.
See Also
--------
numpy.sum : Sum across array including Not a Numbers.
isnan : Shows which elements are Not a Number (NaN).
isfinite: Shows which elements are not: Not a Number, positive and
negative infinity
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
If positive or negative infinity are present the result is positive or
negative infinity. But if both positive and negative infinity are present,
the result is Not A Number (NaN).
Arithmetic is modular when using integer types (all elements of `a` must
be finite i.e. no elements that are NaNs, positive infinity and negative
infinity because NaNs are floating point types), and no error is raised
on overflow.
Examples
--------
>>> np.nansum(1)
1
>>> np.nansum([1])
1
>>> np.nansum([1, np.nan])
1.0
>>> a = np.array([[1, 1], [1, np.nan]])
>>> np.nansum(a)
3.0
>>> np.nansum(a, axis=0)
array([ 2., 1.])
When positive infinity and negative infinity are present
>>> np.nansum([1, np.nan, np.inf])
inf
>>> np.nansum([1, np.nan, np.NINF])
-inf
>>> np.nansum([1, np.nan, np.inf, np.NINF])
nan
"""
return _nanop(np.sum, 0, a, axis)
def nanmin(a, axis=None):
"""
Return the minimum of an array or minimum along an axis ignoring any NaNs.
Parameters
----------
a : array_like
Array containing numbers whose minimum is desired.
axis : int, optional
Axis along which the minimum is computed.The default is to compute
the minimum of the flattened array.
Returns
-------
nanmin : ndarray
A new array or a scalar array with the result.
See Also
--------
numpy.amin : Minimum across array including any Not a Numbers.
numpy.nanmax : Maximum across array ignoring any Not a Numbers.
isnan : Shows which elements are Not a Number (NaN).
isfinite: Shows which elements are not: Not a Number, positive and
negative infinity
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Positive infinity is treated as a very large number and negative infinity
is treated as a very small (i.e. negative) number.
If the input has a integer type the function is equivalent to np.min.
Examples
--------
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanmin(a)
1.0
>>> np.nanmin(a, axis=0)
array([ 1., 2.])
>>> np.nanmin(a, axis=1)
array([ 1., 3.])
When positive infinity and negative infinity are present:
>>> np.nanmin([1, 2, np.nan, np.inf])
1.0
>>> np.nanmin([1, 2, np.nan, np.NINF])
-inf
"""
a = np.asanyarray(a)
if axis is not None:
return np.fmin.reduce(a, axis)
else:
return np.fmin.reduce(a.flat)
def nanargmin(a, axis=None):
"""
Return indices of the minimum values over an axis, ignoring NaNs.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default flattened input is used.
Returns
-------
index_array : ndarray
An array of indices or a single index value.
See Also
--------
argmin, nanargmax
Examples
--------
>>> a = np.array([[np.nan, 4], [2, 3]])
>>> np.argmin(a)
0
>>> np.nanargmin(a)
2
>>> np.nanargmin(a, axis=0)
array([1, 1])
>>> np.nanargmin(a, axis=1)
array([1, 0])
"""
return _nanop(np.argmin, np.inf, a, axis)
def nanmax(a, axis=None):
"""
Return the maximum of an array or maximum along an axis ignoring any NaNs.
Parameters
----------
a : array_like
Array containing numbers whose maximum is desired. If `a` is not
an array, a conversion is attempted.
axis : int, optional
Axis along which the maximum is computed. The default is to compute
the maximum of the flattened array.
Returns
-------
nanmax : ndarray
An array with the same shape as `a`, with the specified axis removed.
If `a` is a 0-d array, or if axis is None, a ndarray scalar is
returned. The the same dtype as `a` is returned.
See Also
--------
numpy.amax : Maximum across array including any Not a Numbers.
numpy.nanmin : Minimum across array ignoring any Not a Numbers.
isnan : Shows which elements are Not a Number (NaN).
isfinite: Shows which elements are not: Not a Number, positive and
negative infinity
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Positive infinity is treated as a very large number and negative infinity
is treated as a very small (i.e. negative) number.
If the input has a integer type the function is equivalent to np.max.
Examples
--------
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nanmax(a)
3.0
>>> np.nanmax(a, axis=0)
array([ 3., 2.])
>>> np.nanmax(a, axis=1)
array([ 2., 3.])
When positive infinity and negative infinity are present:
>>> np.nanmax([1, 2, np.nan, np.NINF])
2.0
>>> np.nanmax([1, 2, np.nan, np.inf])
inf
"""
a = np.asanyarray(a)
if axis is not None:
return np.fmax.reduce(a, axis)
else:
return np.fmax.reduce(a.flat)
def nanargmax(a, axis=None):
"""
Return indices of the maximum values over an axis, ignoring NaNs.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default flattened input is used.
Returns
-------
index_array : ndarray
An array of indices or a single index value.
See Also
--------
argmax, nanargmin
Examples
--------
>>> a = np.array([[np.nan, 4], [2, 3]])
>>> np.argmax(a)
0
>>> np.nanargmax(a)
1
>>> np.nanargmax(a, axis=0)
array([1, 0])
>>> np.nanargmax(a, axis=1)
array([1, 1])
"""
return _nanop(np.argmax, -np.inf, a, axis)
def disp(mesg, device=None, linefeed=True):
"""
Display a message on a device.
Parameters
----------
mesg : str
Message to display.
device : object
Device to write message. If None, defaults to ``sys.stdout`` which is
very similar to ``print``. `device` needs to have ``write()`` and
``flush()`` methods.
linefeed : bool, optional
Option whether to print a line feed or not. Defaults to True.
Raises
------
AttributeError
If `device` does not have a ``write()`` or ``flush()`` method.
Examples
--------
Besides ``sys.stdout``, a file-like object can also be used as it has
both required methods:
>>> from StringIO import StringIO
>>> buf = StringIO()
>>> np.disp('"Display" in a file', device=buf)
>>> buf.getvalue()
'"Display" in a file\\n'
"""
if device is None:
import sys
device = sys.stdout
if linefeed:
device.write('%s\n' % mesg)
else:
device.write('%s' % mesg)
device.flush()
return
# return number of input arguments and
# number of default arguments
def _get_nargs(obj):
import re
terr = re.compile(r'.*? takes (exactly|at least) (?P<exargs>(\d+)|(\w+))' +
r' argument(s|) \((?P<gargs>(\d+)|(\w+)) given\)')
def _convert_to_int(strval):
try:
result = int(strval)
except ValueError:
if strval=='zero':
result = 0
elif strval=='one':
result = 1
elif strval=='two':
result = 2
# How high to go? English only?
else:
raise
return result
if not callable(obj):
raise TypeError(
"Object is not callable.")
if sys.version_info[0] >= 3:
# inspect currently fails for binary extensions
# like math.cos. So fall back to other methods if
# it fails.
import inspect
try:
spec = inspect.getargspec(obj)
nargs = len(spec.args)
if spec.defaults:
ndefaults = len(spec.defaults)
else:
ndefaults = 0
if inspect.ismethod(obj):
nargs -= 1
return nargs, ndefaults
except:
pass
if hasattr(obj,'func_code'):
fcode = obj.func_code
nargs = fcode.co_argcount
if obj.func_defaults is not None:
ndefaults = len(obj.func_defaults)
else:
ndefaults = 0
if isinstance(obj, types.MethodType):
nargs -= 1
return nargs, ndefaults
try:
obj()
return 0, 0
except TypeError, msg:
m = terr.match(str(msg))
if m:
nargs = _convert_to_int(m.group('exargs'))
ndefaults = _convert_to_int(m.group('gargs'))
if isinstance(obj, types.MethodType):
nargs -= 1
return nargs, ndefaults
raise ValueError(
"failed to determine the number of arguments for %s" % (obj))
class vectorize(object):
"""
vectorize(pyfunc, otypes='', doc=None)
Generalized function class.
Define a vectorized function which takes a nested sequence
of objects or numpy arrays as inputs and returns a
numpy array as output. The vectorized function evaluates `pyfunc` over
successive tuples of the input arrays like the python map function,
except it uses the broadcasting rules of numpy.
The data type of the output of `vectorized` is determined by calling
the function with the first element of the input. This can be avoided
by specifying the `otypes` argument.
Parameters
----------
pyfunc : callable
A python function or method.
otypes : str or list of dtypes, optional
The output data type. It must be specified as either a string of
typecode characters or a list of data type specifiers. There should
be one data type specifier for each output.
doc : str, optional
The docstring for the function. If None, the docstring will be the
`pyfunc` one.
Examples
--------
>>> def myfunc(a, b):
... \"\"\"Return a-b if a>b, otherwise return a+b\"\"\"
... if a > b:
... return a - b
... else:
... return a + b
>>> vfunc = np.vectorize(myfunc)
>>> vfunc([1, 2, 3, 4], 2)
array([3, 4, 1, 2])
The docstring is taken from the input function to `vectorize` unless it
is specified
>>> vfunc.__doc__
'Return a-b if a>b, otherwise return a+b'
>>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`')
>>> vfunc.__doc__
'Vectorized `myfunc`'
The output type is determined by evaluating the first element of the input,
unless it is specified
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.int32'>
>>> vfunc = np.vectorize(myfunc, otypes=[np.float])
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
<type 'numpy.float64'>
"""
def __init__(self, pyfunc, otypes='', doc=None):
self.thefunc = pyfunc
self.ufunc = None
nin, ndefault = _get_nargs(pyfunc)
if nin == 0 and ndefault == 0:
self.nin = None
self.nin_wo_defaults = None
else:
self.nin = nin
self.nin_wo_defaults = nin - ndefault
self.nout = None
if doc is None:
self.__doc__ = pyfunc.__doc__
else:
self.__doc__ = doc
if isinstance(otypes, str):
self.otypes = otypes
for char in self.otypes:
if char not in typecodes['All']:
raise ValueError(
"invalid otype specified")
elif iterable(otypes):
self.otypes = ''.join([_nx.dtype(x).char for x in otypes])
else:
raise ValueError(
"Invalid otype specification")
self.lastcallargs = 0
def __call__(self, *args):
# get number of outputs and output types by calling
# the function on the first entries of args
nargs = len(args)
if self.nin:
if (nargs > self.nin) or (nargs < self.nin_wo_defaults):
raise ValueError(
"Invalid number of arguments")
# we need a new ufunc if this is being called with more arguments.
if (self.lastcallargs != nargs):
self.lastcallargs = nargs
self.ufunc = None
self.nout = None
if self.nout is None or self.otypes == '':
newargs = []
for arg in args:
newargs.append(asarray(arg).flat[0])
theout = self.thefunc(*newargs)
if isinstance(theout, tuple):
self.nout = len(theout)
else:
self.nout = 1
theout = (theout,)
if self.otypes == '':
otypes = []
for k in range(self.nout):
otypes.append(asarray(theout[k]).dtype.char)
self.otypes = ''.join(otypes)
# Create ufunc if not already created
if (self.ufunc is None):
self.ufunc = frompyfunc(self.thefunc, nargs, self.nout)
# Convert to object arrays first
newargs = [array(arg,copy=False,subok=True,dtype=object) for arg in args]
if self.nout == 1:
_res = array(self.ufunc(*newargs),copy=False,
subok=True,dtype=self.otypes[0])
else:
_res = tuple([array(x,copy=False,subok=True,dtype=c) \
for x, c in zip(self.ufunc(*newargs), self.otypes)])
return _res
def cov(m, y=None, rowvar=1, bias=0, ddof=None):
"""
Estimate a covariance matrix, given data.
Covariance indicates the level to which two variables vary together.
If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`,
then the covariance matrix element :math:`C_{ij}` is the covariance of
:math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance
of :math:`x_i`.
Parameters
----------
m : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
form as that of `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations given (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : int, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
Returns
-------
out : ndarray
The covariance matrix of the variables.
See Also
--------
corrcoef : Normalized covariance matrix
Examples
--------
Consider two variables, :math:`x_0` and :math:`x_1`, which
correlate perfectly, but in opposite directions:
>>> x = np.array([[0, 2], [1, 1], [2, 0]]).T
>>> x
array([[0, 1, 2],
[2, 1, 0]])
Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance
matrix shows this clearly:
>>> np.cov(x)
array([[ 1., -1.],
[-1., 1.]])
Note that element :math:`C_{0,1}`, which shows the correlation between
:math:`x_0` and :math:`x_1`, is negative.
Further, note how `x` and `y` are combined:
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
>>> X = np.vstack((x,y))
>>> print np.cov(X)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x, y)
[[ 11.71 -4.286 ]
[ -4.286 2.14413333]]
>>> print np.cov(x)
11.71
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError("ddof must be integer")
X = array(m, ndmin=2, dtype=float)
if X.size == 0:
# handle empty arrays
return np.array(m)
if X.shape[0] == 1:
rowvar = 1
if rowvar:
axis = 0
tup = (slice(None),newaxis)
else:
axis = 1
tup = (newaxis, slice(None))
if y is not None:
y = array(y, copy=False, ndmin=2, dtype=float)
X = concatenate((X,y), axis)
X -= X.mean(axis=1-axis)[tup]
if rowvar:
N = X.shape[1]
else:
N = X.shape[0]
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
fact = float(N - ddof)
if not rowvar:
return (dot(X.T, X.conj()) / fact).squeeze()
else:
return (dot(X, X.T.conj()) / fact).squeeze()
def corrcoef(x, y=None, rowvar=1, bias=0, ddof=None):
"""
Return correlation coefficients.
Please refer to the documentation for `cov` for more detail. The
relationship between the correlation coefficient matrix, `P`, and the
covariance matrix, `C`, is
.. math:: P_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} * C_{jj} } }
The values of `P` are between -1 and 1, inclusive.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `m` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `m`.
rowvar : int, optional
If `rowvar` is non-zero (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : int, optional
Default normalization is by ``(N - 1)``, where ``N`` is the number of
observations (unbiased estimate). If `bias` is 1, then
normalization is by ``N``. These values can be overridden by using
the keyword ``ddof`` in numpy versions >= 1.5.
ddof : {None, int}, optional
.. versionadded:: 1.5
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
Returns
-------
out : ndarray
The correlation coefficient matrix of the variables.
See Also
--------
cov : Covariance matrix
"""
c = cov(x, y, rowvar, bias, ddof)
if c.size == 0:
# handle empty arrays
return c
try:
d = diag(c)
except ValueError: # scalar covariance
return 1
return c/sqrt(multiply.outer(d,d))
def blackman(M):
"""
Return the Blackman window.
The Blackman window is a taper formed by using the the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an empty
array is returned.
Returns
-------
out : ndarray
The window, normalized to one (the value one appears only if the
number of samples is odd).
See Also
--------
bartlett, hamming, hanning, kaiser
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M)
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> from numpy import blackman
>>> blackman(12)
array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01,
4.14397981e-01, 7.36045180e-01, 9.67046769e-01,
9.67046769e-01, 7.36045180e-01, 4.14397981e-01,
1.59903635e-01, 3.26064346e-02, -1.38777878e-17])
Plot the window and the frequency response:
>>> from numpy import clip, log10, array, blackman, linspace
>>> from numpy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = blackman(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = abs(fftshift(A))
>>> freq = linspace(-0.5,0.5,len(A))
>>> response = 20*log10(mag)
>>> response = clip(response,-100,100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Blackman window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0,M)
return 0.42-0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
def bartlett(M):
"""
Return the Bartlett window.
The Bartlett window is very similar to a triangular window, except
that the end points are at zero. It is often used in signal
processing for tapering a signal, without generating too much
ripple in the frequency domain.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : array
The triangular window, normalized to one (the value one
appears only if the number of samples is odd), with the first
and last samples equal to zero.
See Also
--------
blackman, hamming, hanning, kaiser
Notes
-----
The Bartlett window is defined as
.. math:: w(n) = \\frac{2}{M-1} \\left(
\\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right|
\\right)
Most references to the Bartlett window come from the signal
processing literature, where it is used as one of many windowing
functions for smoothing values. Note that convolution with this
window produces linear interpolation. It is also known as an
apodization (which means"removing the foot", i.e. smoothing
discontinuities at the beginning and end of the sampled signal) or
tapering function. The fourier transform of the Bartlett is the product
of two sinc functions.
Note the excellent discussion in Kanasewich.
References
----------
.. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika 37, 1-16, 1950.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 109-110.
.. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal
Processing", Prentice-Hall, 1999, pp. 468-471.
.. [4] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 429.
Examples
--------
>>> np.bartlett(12)
array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273,
0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636,
0.18181818, 0. ])
Plot the window and its frequency response (requires SciPy and matplotlib):
>>> from numpy import clip, log10, array, bartlett, linspace
>>> from numpy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = bartlett(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = abs(fftshift(A))
>>> freq = linspace(-0.5,0.5,len(A))
>>> response = 20*log10(mag)
>>> response = clip(response,-100,100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Bartlett window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0,M)
return where(less_equal(n,(M-1)/2.0),2.0*n/(M-1),2.0-2.0*n/(M-1))
def hanning(M):
"""
Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray, shape(M,)
The window, normalized to one (the value one
appears only if `M` is odd).
See Also
--------
bartlett, blackman, hamming, kaiser
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hanning was named for Julius van Hann, an Austrian meterologist. It is
also known as the Cosine Bell. Some authors prefer that it be called a
Hann window, to help avoid confusion with the very similar Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> from numpy import hanning
>>> hanning(12)
array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037,
0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249,
0.07937323, 0. ])
Plot the window and its frequency response:
>>> from numpy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = np.hanning(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = abs(fftshift(A))
>>> freq = np.linspace(-0.5,0.5,len(A))
>>> response = 20*np.log10(mag)
>>> response = np.clip(response,-100,100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of the Hann window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
# XXX: this docstring is inconsistent with other filter windows, e.g.
# Blackman and Bartlett - they should all follow the same convention for
# clarity. Either use np. for all numpy members (as above), or import all
# numpy members (as in Blackman and Bartlett examples)
if M < 1:
return array([])
if M == 1:
return ones(1, float)
n = arange(0,M)
return 0.5-0.5*cos(2.0*pi*n/(M-1))
def hamming(M):
"""
Return the Hamming window.
The Hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
Returns
-------
out : ndarray
The window, normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hanning, kaiser
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 + 0.46cos\\left(\\frac{2\\pi{n}}{M-1}\\right)
\\qquad 0 \\leq n \\leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and
is described in Blackman and Tukey. It was recommended for smoothing the
truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hamming(12)
array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594,
0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909,
0.15302337, 0.08 ])
Plot the window and the frequency response:
>>> from numpy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = np.hamming(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
>>> response = 20 * np.log10(mag)
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Hamming window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
if M < 1:
return array([])
if M == 1:
return ones(1,float)
n = arange(0,M)
return 0.54-0.46*cos(2.0*pi*n/(M-1))
## Code from cephes for i0
_i0A = [
-4.41534164647933937950E-18,
3.33079451882223809783E-17,
-2.43127984654795469359E-16,
1.71539128555513303061E-15,
-1.16853328779934516808E-14,
7.67618549860493561688E-14,
-4.85644678311192946090E-13,
2.95505266312963983461E-12,
-1.72682629144155570723E-11,
9.67580903537323691224E-11,
-5.18979560163526290666E-10,
2.65982372468238665035E-9,
-1.30002500998624804212E-8,
6.04699502254191894932E-8,
-2.67079385394061173391E-7,
1.11738753912010371815E-6,
-4.41673835845875056359E-6,
1.64484480707288970893E-5,
-5.75419501008210370398E-5,
1.88502885095841655729E-4,
-5.76375574538582365885E-4,
1.63947561694133579842E-3,
-4.32430999505057594430E-3,
1.05464603945949983183E-2,
-2.37374148058994688156E-2,
4.93052842396707084878E-2,
-9.49010970480476444210E-2,
1.71620901522208775349E-1,
-3.04682672343198398683E-1,
6.76795274409476084995E-1]
_i0B = [
-7.23318048787475395456E-18,
-4.83050448594418207126E-18,
4.46562142029675999901E-17,
3.46122286769746109310E-17,
-2.82762398051658348494E-16,
-3.42548561967721913462E-16,
1.77256013305652638360E-15,
3.81168066935262242075E-15,
-9.55484669882830764870E-15,
-4.15056934728722208663E-14,
1.54008621752140982691E-14,
3.85277838274214270114E-13,
7.18012445138366623367E-13,
-1.79417853150680611778E-12,
-1.32158118404477131188E-11,
-3.14991652796324136454E-11,
1.18891471078464383424E-11,
4.94060238822496958910E-10,
3.39623202570838634515E-9,
2.26666899049817806459E-8,
2.04891858946906374183E-7,
2.89137052083475648297E-6,
6.88975834691682398426E-5,
3.36911647825569408990E-3,
8.04490411014108831608E-1]
def _chbevl(x, vals):
b0 = vals[0]
b1 = 0.0
for i in xrange(1,len(vals)):
b2 = b1
b1 = b0
b0 = x*b1 - b2 + vals[i]
return 0.5*(b0 - b2)
def _i0_1(x):
return exp(x) * _chbevl(x/2.0-2, _i0A)
def _i0_2(x):
return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x)
def i0(x):
"""
Modified Bessel function of the first kind, order 0.
Usually denoted :math:`I_0`. This function does broadcast, but will *not*
"up-cast" int dtype arguments unless accompanied by at least one float or
complex dtype argument (see Raises below).
Parameters
----------
x : array_like, dtype float or complex
Argument of the Bessel function.
Returns
-------
out : ndarray, shape = x.shape, dtype = x.dtype
The modified Bessel function evaluated at each of the elements of `x`.
Raises
------
TypeError: array cannot be safely cast to required type
If argument consists exclusively of int dtypes.
See Also
--------
scipy.special.iv, scipy.special.ive
Notes
-----
We use the algorithm published by Clenshaw [1]_ and referenced by
Abramowitz and Stegun [2]_, for which the function domain is partitioned
into the two intervals [0,8] and (8,inf), and Chebyshev polynomial
expansions are employed in each interval. Relative error on the domain
[0,30] using IEEE arithmetic is documented [3]_ as having a peak of 5.8e-16
with an rms of 1.4e-16 (n = 30000).
References
----------
.. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions," in
*National Physical Laboratory Mathematical Tables*, vol. 5, London:
Her Majesty's Stationery Office, 1962.
.. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical
Functions*, 10th printing, New York: Dover, 1964, pp. 379.
http://www.math.sfu.ca/~cbm/aands/page_379.htm
.. [3] http://kobesearch.cpan.org/htdocs/Math-Cephes/Math/Cephes.html
Examples
--------
>>> np.i0([0.])
array(1.0)
>>> np.i0([0., 1. + 2j])
array([ 1.00000000+0.j , 0.18785373+0.64616944j])
"""
x = atleast_1d(x).copy()
y = empty_like(x)
ind = (x<0)
x[ind] = -x[ind]
ind = (x<=8.0)
y[ind] = _i0_1(x[ind])
ind2 = ~ind
y[ind2] = _i0_2(x[ind2])
return y.squeeze()
## End of cephes code for i0
def kaiser(M,beta):
"""
Return the Kaiser window.
The Kaiser window is a taper formed by using a Bessel function.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
beta : float
Shape parameter for window.
Returns
-------
out : array
The window, normalized to one (the value one
appears only if the number of samples is odd).
See Also
--------
bartlett, blackman, hamming, hanning
Notes
-----
The Kaiser window is defined as
.. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}}
\\right)/I_0(\\beta)
with
.. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2},
where :math:`I_0` is the modified zeroth-order Bessel function.
The Kaiser was named for Jim Kaiser, who discovered a simple approximation
to the DPSS window based on Bessel functions.
The Kaiser window is a very good approximation to the Digital Prolate
Spheroidal Sequence, or Slepian window, which is the transform which
maximizes the energy in the main lobe of the window relative to total
energy.
The Kaiser can approximate many other windows by varying the beta
parameter.
==== =======================
beta Window shape
==== =======================
0 Rectangular
5 Similar to a Hamming
6 Similar to a Hanning
8.6 Similar to a Blackman
==== =======================
A beta value of 14 is probably a good starting point. Note that as beta
gets large, the window narrows, and so the number of samples needs to be
large enough to sample the increasingly narrow spike, otherwise nans will
get returned.
Most references to the Kaiser window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by
digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285.
John Wiley and Sons, New York, (1966).
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 177-178.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
Examples
--------
>>> from numpy import kaiser
>>> kaiser(12, 14)
array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02,
2.29737120e-01, 5.99885316e-01, 9.45674898e-01,
9.45674898e-01, 5.99885316e-01, 2.29737120e-01,
4.65200189e-02, 3.46009194e-03, 7.72686684e-06])
Plot the window and the frequency response:
>>> from numpy import clip, log10, array, kaiser, linspace
>>> from numpy.fft import fft, fftshift
>>> import matplotlib.pyplot as plt
>>> window = kaiser(51, 14)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Sample")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
>>> plt.figure()
<matplotlib.figure.Figure object at 0x...>
>>> A = fft(window, 2048) / 25.5
>>> mag = abs(fftshift(A))
>>> freq = linspace(-0.5,0.5,len(A))
>>> response = 20*log10(mag)
>>> response = clip(response,-100,100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Kaiser window")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Magnitude [dB]")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("Normalized frequency [cycles per sample]")
<matplotlib.text.Text object at 0x...>
>>> plt.axis('tight')
(-0.5, 0.5, -100.0, ...)
>>> plt.show()
"""
from numpy.dual import i0
if M == 1:
return np.array([1.])
n = arange(0,M)
alpha = (M-1)/2.0
return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta))
def sinc(x):
"""
Return the sinc function.
The sinc function is :math:`\\sin(\\pi x)/(\\pi x)`.
Parameters
----------
x : ndarray
Array (possibly multi-dimensional) of values for which to to
calculate ``sinc(x)``.
Returns
-------
out : ndarray
``sinc(x)``, which has the same shape as the input.
Notes
-----
``sinc(0)`` is the limit value 1.
The name sinc is short for "sine cardinal" or "sinus cardinalis".
The sinc function is used in various signal processing applications,
including in anti-aliasing, in the construction of a
Lanczos resampling filter, and in interpolation.
For bandlimited interpolation of discrete-time signals, the ideal
interpolation kernel is proportional to the sinc function.
References
----------
.. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web
Resource. http://mathworld.wolfram.com/SincFunction.html
.. [2] Wikipedia, "Sinc function",
http://en.wikipedia.org/wiki/Sinc_function
Examples
--------
>>> x = np.arange(-20., 21.)/5.
>>> np.sinc(x)
array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02,
-8.90384387e-02, -5.84680802e-02, 3.89804309e-17,
6.68206631e-02, 1.16434881e-01, 1.26137788e-01,
8.50444803e-02, -3.89804309e-17, -1.03943254e-01,
-1.89206682e-01, -2.16236208e-01, -1.55914881e-01,
3.89804309e-17, 2.33872321e-01, 5.04551152e-01,
7.56826729e-01, 9.35489284e-01, 1.00000000e+00,
9.35489284e-01, 7.56826729e-01, 5.04551152e-01,
2.33872321e-01, 3.89804309e-17, -1.55914881e-01,
-2.16236208e-01, -1.89206682e-01, -1.03943254e-01,
-3.89804309e-17, 8.50444803e-02, 1.26137788e-01,
1.16434881e-01, 6.68206631e-02, 3.89804309e-17,
-5.84680802e-02, -8.90384387e-02, -8.40918587e-02,
-4.92362781e-02, -3.89804309e-17])
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, np.sinc(x))
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Sinc Function")
<matplotlib.text.Text object at 0x...>
>>> plt.ylabel("Amplitude")
<matplotlib.text.Text object at 0x...>
>>> plt.xlabel("X")
<matplotlib.text.Text object at 0x...>
>>> plt.show()
It works in 2-D as well:
>>> x = np.arange(-200., 201.)/50.
>>> xx = np.outer(x, x)
>>> plt.imshow(np.sinc(xx))
<matplotlib.image.AxesImage object at 0x...>
"""
x = np.asanyarray(x)
y = pi* where(x == 0, 1.0e-20, x)
return sin(y)/y
def msort(a):
"""
Return a copy of an array sorted along the first axis.
Parameters
----------
a : array_like
Array to be sorted.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
sort
Notes
-----
``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``.
"""
b = array(a,subok=True,copy=True)
b.sort(0)
return b
def median(a, axis=None, out=None, overwrite_input=False):
"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : {None, int}, optional
Axis along which the medians are computed. The default (axis=None)
is to compute the median along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : {False, True}, optional
If True, then allow use of memory of input array (a) for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted. Default is
False. Note that, if `overwrite_input` is True and the input
is not already an ndarray, an error will be raised.
Returns
-------
median : ndarray
A new array holding the result (unless `out` is specified, in
which case that array is returned instead). If the input contains
integers, or floats of smaller precision than 64, then the output
data-type is float64. Otherwise, the output data-type is the same
as that of the input.
See Also
--------
mean, percentile
Notes
-----
Given a vector V of length N, the median of V is the middle value of
a sorted copy of V, ``V_sorted`` - i.e., ``V_sorted[(N-1)/2]``, when N is
odd. When N is even, it is the average of the two middle values of
``V_sorted``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.median(a)
3.5
>>> np.median(a, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.median(a, axis=1)
array([ 7., 2.])
>>> m = np.median(a, axis=0)
>>> out = np.zeros_like(m)
>>> np.median(a, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.median(b, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.median(b, axis=None, overwrite_input=True)
3.5
>>> assert not np.all(a==b)
"""
if overwrite_input:
if axis is None:
sorted = a.ravel()
sorted.sort()
else:
a.sort(axis=axis)
sorted = a
else:
sorted = sort(a, axis=axis)
if axis is None:
axis = 0
indexer = [slice(None)] * sorted.ndim
index = int(sorted.shape[axis]/2)
if sorted.shape[axis] % 2 == 1:
# index with slice to allow mean (below) to work
indexer[axis] = slice(index, index+1)
else:
indexer[axis] = slice(index-1, index+1)
# Use mean in odd and even case to coerce data type
# and check, use out array.
return mean(sorted[indexer], axis=axis, out=out)
def percentile(a, q, axis=None, out=None, overwrite_input=False):
"""
Compute the qth percentile of the data along the specified axis.
Returns the qth percentile of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
q : float in range of [0,100] (or sequence of floats)
Percentile to compute which must be between 0 and 100 inclusive.
axis : int, optional
Axis along which the percentiles are computed. The default (None)
is to compute the median along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted.
Default is False. Note that, if `overwrite_input` is True and the
input is not already an array, an error will be raised.
Returns
-------
pcntile : ndarray
A new array holding the result (unless `out` is specified, in
which case that array is returned instead). If the input contains
integers, or floats of smaller precision than 64, then the output
data-type is float64. Otherwise, the output data-type is the same
as that of the input.
See Also
--------
mean, median
Notes
-----
Given a vector V of length N, the qth percentile of V is the qth ranked
value in a sorted copy of V. A weighted average of the two nearest
neighbors is used if the normalized ranking does not match q exactly.
The same as the median if ``q=0.5``, the same as the minimum if ``q=0``
and the same as the maximum if ``q=1``.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, 50)
3.5
>>> np.percentile(a, 0.5, axis=0)
array([ 6.5, 4.5, 2.5])
>>> np.percentile(a, 50, axis=1)
array([ 7., 2.])
>>> m = np.percentile(a, 50, axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, 50, axis=0, out=m)
array([ 6.5, 4.5, 2.5])
>>> m
array([ 6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.percentile(b, 50, axis=1, overwrite_input=True)
array([ 7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.percentile(b, 50, axis=None, overwrite_input=True)
3.5
"""
a = np.asarray(a)
if q == 0:
return a.min(axis=axis, out=out)
elif q == 100:
return a.max(axis=axis, out=out)
if overwrite_input:
if axis is None:
sorted = a.ravel()
sorted.sort()
else:
a.sort(axis=axis)
sorted = a
else:
sorted = sort(a, axis=axis)
if axis is None:
axis = 0
return _compute_qth_percentile(sorted, q, axis, out)
# handle sequence of q's without calling sort multiple times
def _compute_qth_percentile(sorted, q, axis, out):
if not isscalar(q):
p = [_compute_qth_percentile(sorted, qi, axis, None)
for qi in q]
if out is not None:
out.flat = p
return p
q = q / 100.0
if (q < 0) or (q > 1):
raise ValueError, "percentile must be either in the range [0,100]"
indexer = [slice(None)] * sorted.ndim
Nx = sorted.shape[axis]
index = q*(Nx-1)
i = int(index)
if i == index:
indexer[axis] = slice(i, i+1)
weights = array(1)
sumval = 1.0
else:
indexer[axis] = slice(i, i+2)
j = i + 1
weights = array([(j - index), (index - i)],float)
wshape = [1]*sorted.ndim
wshape[axis] = 2
weights.shape = wshape
sumval = weights.sum()
# Use add.reduce in both cases to coerce data type as well as
# check and use out array.
return add.reduce(sorted[indexer]*weights, axis=axis, out=out)/sumval
def trapz(y, x=None, dx=1.0, axis=-1):
"""
Integrate along the given axis using the composite trapezoidal rule.
Integrate `y` (`x`) along given axis.
Parameters
----------
y : array_like
Input array to integrate.
x : array_like, optional
If `x` is None, then spacing between all `y` elements is `dx`.
dx : scalar, optional
If `x` is None, spacing given by `dx` is assumed. Default is 1.
axis : int, optional
Specify the axis.
Returns
-------
out : float
Definite integral as approximated by trapezoidal rule.
See Also
--------
sum, cumsum
Notes
-----
Image [2]_ illustrates trapezoidal rule -- y-axis locations of points will
be taken from `y` array, by default x-axis distances between points will be
1.0, alternatively they can be provided with `x` array or with `dx` scalar.
Return value will be equal to combined area under the red lines.
References
----------
.. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule
.. [2] Illustration image:
http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
Examples
--------
>>> np.trapz([1,2,3])
4.0
>>> np.trapz([1,2,3], x=[4,6,8])
8.0
>>> np.trapz([1,2,3], dx=2)
8.0
>>> a = np.arange(6).reshape(2, 3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.trapz(a, axis=0)
array([ 1.5, 2.5, 3.5])
>>> np.trapz(a, axis=1)
array([ 2., 8.])
"""
y = asanyarray(y)
if x is None:
d = dx
else:
x = asanyarray(x)
if x.ndim == 1:
d = diff(x)
# reshape to correct shape
shape = [1]*y.ndim
shape[axis] = d.shape[0]
d = d.reshape(shape)
else:
d = diff(x, axis=axis)
nd = len(y.shape)
slice1 = [slice(None)]*nd
slice2 = [slice(None)]*nd
slice1[axis] = slice(1,None)
slice2[axis] = slice(None,-1)
try:
ret = (d * (y[slice1] +y [slice2]) / 2.0).sum(axis)
except ValueError: # Operations didn't work, cast to ndarray
d = np.asarray(d)
y = np.asarray(y)
ret = add.reduce(d * (y[slice1]+y[slice2])/2.0, axis)
return ret
#always succeed
def add_newdoc(place, obj, doc):
"""Adds documentation to obj which is in module place.
If doc is a string add it to obj as a docstring
If doc is a tuple, then the first element is interpreted as
an attribute of obj and the second as the docstring
(method, docstring)
If doc is a list, then each element of the list should be a
sequence of length two --> [(method1, docstring1),
(method2, docstring2), ...]
This routine never raises an error.
"""
try:
new = {}
exec 'from %s import %s' % (place, obj) in new
if isinstance(doc, str):
add_docstring(new[obj], doc.strip())
elif isinstance(doc, tuple):
add_docstring(getattr(new[obj], doc[0]), doc[1].strip())
elif isinstance(doc, list):
for val in doc:
add_docstring(getattr(new[obj], val[0]), val[1].strip())
except:
pass
# From matplotlib
def meshgrid(x,y):
"""
Return coordinate matrices from two coordinate vectors.
Parameters
----------
x, y : ndarray
Two 1-D arrays representing the x and y coordinates of a grid.
Returns
-------
X, Y : ndarray
For vectors `x`, `y` with lengths ``Nx=len(x)`` and ``Ny=len(y)``,
return `X`, `Y` where `X` and `Y` are ``(Ny, Nx)`` shaped arrays
with the elements of `x` and y repeated to fill the matrix along
the first dimension for `x`, the second for `y`.
See Also
--------
index_tricks.mgrid : Construct a multi-dimensional "meshgrid"
using indexing notation.
index_tricks.ogrid : Construct an open multi-dimensional "meshgrid"
using indexing notation.
Examples
--------
>>> X, Y = np.meshgrid([1,2,3], [4,5,6,7])
>>> X
array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3],
[1, 2, 3]])
>>> Y
array([[4, 4, 4],
[5, 5, 5],
[6, 6, 6],
[7, 7, 7]])
`meshgrid` is very useful to evaluate functions on a grid.
>>> x = np.arange(-5, 5, 0.1)
>>> y = np.arange(-5, 5, 0.1)
>>> xx, yy = np.meshgrid(x, y)
>>> z = np.sin(xx**2+yy**2)/(xx**2+yy**2)
"""
x = asarray(x)
y = asarray(y)
numRows, numCols = len(y), len(x) # yes, reversed
x = x.reshape(1,numCols)
X = x.repeat(numRows, axis=0)
y = y.reshape(numRows,1)
Y = y.repeat(numCols, axis=1)
return X, Y
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted.
Parameters
----------
arr : array_like
Input array.
obj : slice, int or array of ints
Indicate which sub-arrays to remove.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : ndarray
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
See Also
--------
insert : Insert elements into an array.
append : Append elements at the end of an array.
Examples
--------
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr
array([[ 1, 2, 3, 4],
[ 5, 6, 7, 8],
[ 9, 10, 11, 12]])
>>> np.delete(arr, 1, 0)
array([[ 1, 2, 3, 4],
[ 9, 10, 11, 12]])
>>> np.delete(arr, np.s_[::2], 1)
array([[ 2, 4],
[ 6, 8],
[10, 12]])
>>> np.delete(arr, [1,3,5], None)
array([ 1, 3, 5, 7, 8, 9, 10, 11, 12])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim;
axis = ndim-1;
if ndim == 0:
if wrap:
return wrap(arr)
else:
return arr.copy()
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, (int, long, integer)):
if (obj < 0): obj += N
if (obj < 0 or obj >=N):
raise ValueError(
"invalid entry")
newshape[axis]-=1;
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, obj)
new[slobj] = arr[slobj]
slobj[axis] = slice(obj,None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(obj+1,None)
new[slobj] = arr[slobj2]
elif isinstance(obj, slice):
start, stop, step = obj.indices(N)
numtodel = len(xrange(start, stop, step))
if numtodel <= 0:
if wrap:
return wrap(new)
else:
return arr.copy()
newshape[axis] -= numtodel
new = empty(newshape, arr.dtype, arr.flags.fnc)
# copy initial chunk
if start == 0:
pass
else:
slobj[axis] = slice(None, start)
new[slobj] = arr[slobj]
# copy end chunck
if stop == N:
pass
else:
slobj[axis] = slice(stop-numtodel,None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(stop, None)
new[slobj] = arr[slobj2]
# copy middle pieces
if step == 1:
pass
else: # use array indexing.
obj = arange(start, stop, step, dtype=intp)
all = arange(start, stop, dtype=intp)
obj = setdiff1d(all, obj)
slobj[axis] = slice(start, stop-numtodel)
slobj2 = [slice(None)]*ndim
slobj2[axis] = obj
new[slobj] = arr[slobj2]
else: # default behavior
obj = array(obj, dtype=intp, copy=0, ndmin=1)
all = arange(N, dtype=intp)
obj = setdiff1d(all, obj)
slobj[axis] = obj
new = arr[slobj]
if wrap:
return wrap(new)
else:
return new
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
Parameters
----------
arr : array_like
Input array.
obj : int, slice or sequence of ints
Object that defines the index or indices before which `values` is
inserted.
values : array_like
Values to insert into `arr`. If the type of `values` is different
from that of `arr`, `values` is converted to the type of `arr`.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : ndarray
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
See Also
--------
append : Append elements at the end of an array.
delete : Delete elements from an array.
Examples
--------
>>> a = np.array([[1, 1], [2, 2], [3, 3]])
>>> a
array([[1, 1],
[2, 2],
[3, 3]])
>>> np.insert(a, 1, 5)
array([1, 5, 1, 2, 2, 3, 3])
>>> np.insert(a, 1, 5, axis=1)
array([[1, 5, 1],
[2, 5, 2],
[3, 5, 3]])
>>> b = a.flatten()
>>> b
array([1, 1, 2, 2, 3, 3])
>>> np.insert(b, [2, 2], [5, 6])
array([1, 1, 5, 6, 2, 2, 3, 3])
>>> np.insert(b, slice(2, 4), [5, 6])
array([1, 1, 5, 2, 6, 2, 3, 3])
>>> np.insert(b, [2, 2], [7.13, False]) # type casting
array([1, 1, 7, 0, 2, 2, 3, 3])
>>> x = np.arange(8).reshape(2, 4)
>>> idx = (1, 3)
>>> np.insert(x, idx, 999, axis=1)
array([[ 0, 999, 1, 2, 999, 3],
[ 4, 999, 5, 6, 999, 7]])
"""
wrap = None
if type(arr) is not ndarray:
try:
wrap = arr.__array_wrap__
except AttributeError:
pass
arr = asarray(arr)
ndim = arr.ndim
if axis is None:
if ndim != 1:
arr = arr.ravel()
ndim = arr.ndim
axis = ndim-1
if (ndim == 0):
arr = arr.copy()
arr[...] = values
if wrap:
return wrap(arr)
else:
return arr
slobj = [slice(None)]*ndim
N = arr.shape[axis]
newshape = list(arr.shape)
if isinstance(obj, (int, long, integer)):
if (obj < 0): obj += N
if obj < 0 or obj > N:
raise ValueError(
"index (%d) out of range (0<=index<=%d) "\
"in dimension %d" % (obj, N, axis))
newshape[axis] += 1;
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj[axis] = slice(None, obj)
new[slobj] = arr[slobj]
slobj[axis] = obj
new[slobj] = values
slobj[axis] = slice(obj+1,None)
slobj2 = [slice(None)]*ndim
slobj2[axis] = slice(obj,None)
new[slobj] = arr[slobj2]
if wrap:
return wrap(new)
return new
elif isinstance(obj, slice):
# turn it into a range object
obj = arange(*obj.indices(N),**{'dtype':intp})
# get two sets of indices
# one is the indices which will hold the new stuff
# two is the indices where arr will be copied over
obj = asarray(obj, dtype=intp)
numnew = len(obj)
index1 = obj + arange(numnew)
index2 = setdiff1d(arange(numnew+N),index1)
newshape[axis] += numnew
new = empty(newshape, arr.dtype, arr.flags.fnc)
slobj2 = [slice(None)]*ndim
slobj[axis] = index1
slobj2[axis] = index2
new[slobj] = values
new[slobj2] = arr
if wrap:
return wrap(new)
return new
def append(arr, values, axis=None):
"""
Append values to the end of an array.
Parameters
----------
arr : array_like
Values are appended to a copy of this array.
values : array_like
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If `axis`
is not specified, `values` can be any shape and will be flattened
before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not given,
both `arr` and `values` are flattened before use.
Returns
-------
out : ndarray
A copy of `arr` with `values` appended to `axis`. Note that `append`
does not occur in-place: a new array is allocated and filled. If
`axis` is None, `out` is a flattened array.
See Also
--------
insert : Insert elements into an array.
delete : Delete elements from an array.
Examples
--------
>>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]])
array([1, 2, 3, 4, 5, 6, 7, 8, 9])
When `axis` is specified, `values` must have the correct shape.
>>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
>>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
Traceback (most recent call last):
...
ValueError: arrays must have same number of dimensions
"""
arr = asanyarray(arr)
if axis is None:
if arr.ndim != 1:
arr = arr.ravel()
values = ravel(values)
axis = arr.ndim-1
return concatenate((arr, values), axis=axis)
|
apache-2.0
|
jld23/saspy
|
saspy/tests/test_sasdata.py
|
1
|
18298
|
import os
import unittest
import pandas as pd
from IPython.utils.tempdir import TemporaryDirectory
from pandas.util.testing import assert_frame_equal
import saspy
from saspy.sasdata import SASdata
from saspy.sasresults import SASresults
class TestSASdataObject(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.sas = saspy.SASsession(results='HTML')
@classmethod
def tearDownClass(cls):
if cls.sas:
cls.sas._endsas()
def test_SASdata(self):
"""
test sasdata method
"""
cars = self.sas.sasdata('cars', libref='sashelp', results='text')
self.assertIsInstance(cars, SASdata, msg="cars = sas.sasdata(...) failed")
def test_SASdata_batch(self):
"""
test set_batch()
"""
cars = self.sas.sasdata('cars', libref='sashelp', results='text')
self.sas.set_batch(True)
ll = cars.head()
self.assertIsInstance(ll, dict, msg="set_batch(True) didn't return dict")
def test_SASdata_head(self):
"""
test head()
"""
cars = self.sas.sasdata('cars', libref='sashelp', results='text')
self.sas.set_batch(True)
ll = cars.head()
expected = ['1', 'Acura', 'MDX', 'SUV', 'Asia', 'All', '$36,945', '$33,337',
'3.5', '6', '265', '17', '23', '4451', '106', '189']
rows = ll['LST'].splitlines()
retrieved = []
for i in range(len(rows)):
retrieved.append(rows[i].split())
self.assertIn(expected, retrieved, msg="cars.head() result didn't contain row 1")
@unittest.skip("Test failes with extra header info")
def test_SASdata_tail(self):
"""
test tail()
"""
cars = self.sas.sasdata('cars', libref='sashelp', results='text')
self.sas.set_batch(True)
ll = cars.tail()
expected = ['424', 'Volvo', 'C70', 'LPT', 'convertible', '2dr', 'Sedan', 'Europe', 'Front',
'$40,565', '$38,203', '2.4', '5', '197', '21', '28', '3450', '105', '186']
rows = ll['LST'].splitlines()
retrieved = []
for i in range(len(rows)):
retrieved.append(rows[i].split())
self.assertIn(expected, retrieved, msg="cars.tail() result didn't contain row 1")
def test_SASdata_tailPD(self):
"""
test tail()
"""
cars = self.sas.sasdata('cars', libref='sashelp', results='pandas')
self.sas.set_batch(True)
ll = cars.tail()
self.assertEqual(ll.shape, (5, 15), msg="wrong shape returned")
self.assertIsInstance(ll, pd.DataFrame, "Is return type correct")
def test_SASdata_contents(self):
"""
test contents()
"""
cars = self.sas.sasdata('cars', libref='sashelp', results='text')
self.sas.set_batch(True)
ll = cars.contents()
expected = ['Data', 'Set', 'Name', 'SASHELP.CARS', 'Observations', '428']
rows = ll['LST'].splitlines()
retrieved = []
for i in range(len(rows)):
retrieved.append(rows[i].split())
self.assertIn(expected, retrieved, msg="cars.contents() result didn't contain expected result")
def test_SASdata_describe(self):
"""
test describe()
"""
self.skipTest("column output doesn't match the current method. I'm skipping the test for now")
cars = self.sas.sasdata('cars', libref='sashelp', results='text')
self.sas.set_batch(True)
ll = cars.describe()
expected = ['MSRP', '428', '0', '27635', '32775', '19432', '10280', '20330', '27635']
rows = ll['LST'].splitlines()
retrieved = []
for i in range(len(rows)):
retrieved.append(rows[i].split())
self.assertIn(expected, retrieved, msg="cars.describe() result didn't contain expected result")
def test_SASdata_describe2(self):
"""
test describe()
"""
cars = self.sas.sasdata('cars', libref='sashelp')
self.sas.set_batch(True)
cars.set_results('PANDAS')
ll = cars.describe()
self.assertIsInstance(ll, pd.DataFrame, msg='ll is not a dataframe')
expected = ['MSRP', '428', '0', '27635', '32775', '19432', '10280', '20330', '27635', '39215', '192465']
self.assertEqual(['%.0f' % elem for elem in list(ll.iloc[0].dropna())[1:]], expected[1:],
msg="cars.describe() result didn't contain expected result")
self.assertEqual(expected[0],list(ll.iloc[0].dropna())[0],
msg="cars.describe() result didn't contain expected result")
def test_SASdata_results(self):
"""
test set_results()
"""
cars = self.sas.sasdata('cars', libref='sashelp', results='text')
self.sas.set_batch(True)
cars.set_results('HTML')
ll = cars.describe()
expected = '<!DOCTYPE html>'
row1 = ll['LST'].splitlines()[0]
self.assertEqual(expected, row1, msg="cars.set_results() result weren't HTML")
cars.set_results('TEXT')
ll = cars.describe()
row1 = ll['LST'].splitlines()[0]
self.assertNotEqual(expected, row1, msg="cars.set_results() result weren't TEXT")
def test_SASdata_hist(self):
"""
test hist()
"""
cars = self.sas.sasdata('cars', libref='sashelp', results='text')
self.sas.set_batch(True)
cars.set_results('TEXT')
ll = cars.hist('MSRP')
expected = 'alt="The SGPlot Procedure" src="data:image/png;base64'
self.assertIsInstance(ll, dict, msg="cars.hist(...) didn't return dict")
self.assertGreater(len(ll['LST']), 40000, msg="cars.hist(...) result were too short")
self.assertIn(expected, ll['LST'], msg="cars.hist(...) result weren't what was expected")
cars.set_results('HTML')
def test_SASdata_series(self):
"""
test series()
"""
self.sas.set_batch(True)
ll = self.sas.submit('''proc sql;
create table sales as
select month, sum(actual) as tot_sales, sum(predict) as predicted_sales
from sashelp.prdsale
group by 1
order by month ;quit;
''')
sales = self.sas.sasdata('sales')
ll = sales.series(y=['tot_sales', 'predicted_sales'], x='month', title='total vs. predicted sales')
expected = 'alt="The SGPlot Procedure" src="data:image/png;base64'
self.assertIsInstance(ll, dict, msg="cars.series(...) didn't return dict")
self.assertGreater(len(ll['LST']), 70000, msg="cars.series(...) result were too short")
self.assertIn(expected, ll['LST'], msg="cars.series(...) result weren't what was expected")
def test_SASdata_heatmap(self):
"""
test heatmap()
"""
cars = self.sas.sasdata('cars', libref='sashelp', results='text')
self.sas.set_batch(True)
ll = cars.heatmap('MSRP', 'horsepower')
expected = 'alt="The SGPlot Procedure" src="data:image/png;base64'
self.assertIsInstance(ll, dict, msg="cars.heatmap(...) didn't return dict")
self.assertGreater(len(ll['LST']), 30000, msg="cars.heatmap(...) result were too short")
self.assertIn(expected, ll['LST'], msg="cars.heatmap(...) result weren't what was expected")
def test_SASdata_sort1(self):
"""
Create dataset in WORK
"""
self.sas.submit("data cars; set sashelp.cars; id=_n_;run;")
wkcars = self.sas.sasdata('cars')
# Sort data in place by one variable
wkcars.sort('type')
self.assertIsInstance(wkcars, SASdata, msg="Sort didn't return SASdata Object")
def test_SASdata_sort2(self):
"""
Create dataset in WORK
"""
self.sas.submit("data cars; set sashelp.cars; id=_n_;run;")
wkcars = self.sas.sasdata('cars')
# Sort data in plce by multiple variables
wkcars.sort('type descending origin')
self.assertIsInstance(wkcars, SASdata, msg="Sort didn't return SASdata Object")
def test_SASdata_sort3(self):
"""
Create dataset in WORK
"""
self.sas.submit("data cars; set sashelp.cars; id=_n_;run;")
wkcars = self.sas.sasdata('cars')
# create a second object pointing to the same data set
dup = wkcars.sort('type')
self.assertEqual(wkcars, dup, msg="Sort objects are not equal but should be")
def test_SASdata_sort4(self):
"""
Create dataset in WORK
"""
self.sas.submit("data cars; set sashelp.cars; id=_n_;run;")
wkcars = self.sas.sasdata('cars')
# create a second object with a different sort order
diff = self.sas.sasdata('diff')
diff = wkcars.sort('origin', diff)
self.assertNotEqual(wkcars, diff, msg="Sort objects are equal but should not be")
def test_SASdata_sort5(self):
"""
Create dataset in WORK
"""
self.sas.submit("data cars; set sashelp.cars; id=_n_;run;")
wkcars = self.sas.sasdata('cars')
# create object within call
wkcars.sort('type')
out1 = wkcars.sort('origin', self.sas.sasdata('out1'))
self.assertIsInstance(out1, SASdata, msg="Sort didn't return new SASdata Object")
self.assertNotEqual(wkcars, out1, msg="Sort objects are equal but should not be")
def test_SASdata_sort6(self):
"""
Create dataset in WORK
"""
self.sas.submit("data cars; set sashelp.cars; id=_n_;run;")
wkcars = self.sas.sasdata('cars')
# sort by missing variable
self.assertRaises(RuntimeError, lambda: wkcars.sort('foobar'))
def test_SASdata_score1(self):
"""
Create dataset in WORK
"""
self.sas.submit("data cars; set sashelp.cars; id=_n_;run;")
wkcars = self.sas.sasdata('cars')
a = wkcars.columnInfo()
wkcars.score(code='P_originUSA = origin;')
b = wkcars.columnInfo()
self.assertNotEqual(a, b, msg="B should have an extra column P_originUSA")
def test_SASdata_score2(self):
"""
Create dataset in WORK
"""
self.sas.submit("data cars; set sashelp.cars; id=_n_;run;")
wkcars = self.sas.sasdata('cars')
wkcars.set_results('PANDAS')
wkcars2 = self.sas.sasdata('cars2', 'work')
wkcars2.set_results('PANDAS')
a = wkcars.columnInfo()
wkcars.score(code='P_originUSA = origin;', out=wkcars2)
b = wkcars.columnInfo()
self.assertFalse(assert_frame_equal(a, b), msg="B should be identical to a")
self.assertIsInstance(wkcars2, SASdata, "Does out dataset exist")
def test_SASdata_score3(self):
with TemporaryDirectory() as temppath:
with open(os.path.join(temppath, 'score.sas'), 'w') as f:
f.write('P_originUSA = origin;')
# Create dataset in WORK
self.sas.submit("data cars; set sashelp.cars; id=_n_;run;")
wkcars = self.sas.sasdata('cars')
wkcars.set_results('PANDAS')
wkcars2 = self.sas.sasdata('cars2', 'work')
wkcars2.set_results('PANDAS')
a = wkcars.columnInfo()
wkcars.score(file=f.name, out=wkcars2)
b = wkcars.columnInfo()
self.assertFalse(assert_frame_equal(a, b), msg="B should be identical to a")
self.assertIsInstance(wkcars2, SASdata, "Does out dataset exist")
def test_SASdata_score4(self):
with TemporaryDirectory() as temppath:
with open(os.path.join(temppath, 'score.sas'), 'w') as f:
f.write('P_originUSA = origin;')
# Create dataset in WORK
self.sas.submit("data cars; set sashelp.cars; id=_n_;run;")
wkcars = self.sas.sasdata('cars')
a = wkcars.columnInfo()
wkcars.score(file=f.name)
b = wkcars.columnInfo()
self.assertNotEqual(a, b, msg="B should have an extra column P_originUSA")
def test_regScoreAssess(self):
stat = self.sas.sasstat()
self.sas.submit("""
data work.class;
set sashelp.class;
run;
""")
tr = self.sas.sasdata("class", "work")
tr.set_results('PANDAS')
with TemporaryDirectory() as temppath:
fname = os.path.join(temppath, 'hpreg_code.sas')
b = stat.hpreg(data=tr, model='weight=height', code=fname)
tr.score(file=os.path.join(temppath, 'hpreg_code.sas'))
# check that p_weight is in columnInfo
self.assertTrue('P_Weight' in tr.columnInfo()['Variable'].values, msg="Prediction Column not found")
res1 = tr.assessModel(target='weight', prediction='P_weight', nominal=False)
a = ['ASSESSMENTBINSTATISTICS', 'ASSESSMENTSTATISTICS', 'LOG']
self.assertEqual(sorted(a), sorted(res1.__dir__()),
msg=u" model failed to return correct objects expected:{0:s} returned:{1:s}".format(
str(a), str(b)))
self.assertIsInstance(res1, SASresults, "Is return type correct")
def test_regScoreAssess2(self):
stat = self.sas.sasstat()
self.sas.submit("""
data work.class;
set sashelp.class;
run;
""")
tr = self.sas.sasdata("class", "work")
tr.set_results('PANDAS')
with TemporaryDirectory() as temppath:
fname = os.path.join(temppath, 'hplogistic_code.sas')
b = stat.hplogistic(data=tr, cls= 'sex', model='sex = weight height', code=fname)
# This also works with hardcoded strings
# b = stat.hplogistic(data=tr, cls='sex', model='sex = weight height', code=r'c:\public\foo.sas')
tr.score(file=fname)
# check that P_SexF is in columnInfo
self.assertTrue('P_SexF' in tr.columnInfo()['Variable'].values, msg="Prediction Column not found")
res1 = tr.assessModel(target='sex', prediction='P_SexF', nominal=True, event='F')
a = ['ASSESSMENTBINSTATISTICS', 'ASSESSMENTSTATISTICS', 'LOG', 'SGPLOT']
self.assertEqual(sorted(a), sorted(res1.__dir__()),
msg=u" model failed to return correct objects expected:{0:s} returned:{1:s}".format(
str(a), str(b)))
self.assertIsInstance(res1, SASresults, "Is return type correct")
def test_partition1(self):
self.sas.submit("""
data work.class;
set sashelp.class;
run;
""")
tr = self.sas.sasdata("class", "work")
tr.set_results('PANDAS')
tr.partition(var='sex', fraction=.5, kfold=1, out=None, singleOut=True)
self.assertTrue('_PartInd_' in tr.columnInfo()['Variable'].values, msg="Partition Column not found")
def test_partition2(self):
self.sas.submit("""
data work.class;
set sashelp.class;
run;
""")
tr = self.sas.sasdata("class", "work")
tr.set_results('PANDAS')
tr.partition(var='sex', fraction=.5, kfold=2, out=None, singleOut=True)
self.assertTrue('_cvfold2' in tr.columnInfo()['Variable'].values, msg="Partition Column not found")
def test_partition3(self):
self.sas.submit("""
data work.class;
set sashelp.class;
run;
""")
tr = self.sas.sasdata("class", "work")
out = self.sas.sasdata("class2", "work")
tr.set_results('PANDAS')
out.set_results('PANDAS')
tr.partition(var='sex', fraction=.5, kfold=2, out=out, singleOut=True)
self.assertFalse('_cvfold1' in tr.columnInfo()['Variable'].values, msg="Writing to wrong table")
self.assertFalse('_PartInd_ ' in tr.columnInfo()['Variable'].values, msg="Writing to wrong table")
self.assertTrue('_cvfold2' in out.columnInfo()['Variable'].values, msg="Partition Column not found")
def test_partition4(self):
self.sas.submit("""
data work.class;
set sashelp.class;
run;
""")
tr = self.sas.sasdata("class", "work")
out = self.sas.sasdata("class2", "work")
tr.set_results('PANDAS')
out.set_results('PANDAS')
res1 = tr.partition(var='sex', fraction=.5, kfold=2, out=out, singleOut=False)
self.assertFalse('_cvfold1' in tr.columnInfo()['Variable'].values, msg="Writing to wrong table")
self.assertFalse('_PartInd_ ' in tr.columnInfo()['Variable'].values, msg="Writing to wrong table")
self.assertTrue('_cvfold2' in out.columnInfo()['Variable'].values, msg="Partition Column not found")
self.assertIsInstance(res1, list, "Is return type correct")
self.assertIsInstance(res1[0], tuple, "Is return type correct")
self.assertIsInstance(res1[0][1], SASdata, "Is return type correct")
def test_partition5(self):
self.sas.submit("""
data work.class;
set sashelp.class;
run;
""")
tr = self.sas.sasdata("class", "work")
tr.set_results('PANDAS')
tr.partition(fraction=.5, kfold=1, out=None, singleOut=True)
self.assertTrue('_PartInd_' in tr.columnInfo()['Variable'].values, msg="Partition Column not found")
def test_info1(self):
tr = self.sas.sasdata("class", "sashelp")
tr.set_results('Pandas')
res = tr.info()
self.assertIsInstance(res, pd.DataFrame, msg='Data frame not returned')
self.assertEqual(res.shape, (5, 4), msg="wrong shape returned")
def test_info2(self):
tr = self.sas.sasdata("class", "sashelp")
tr.set_results('text')
res = tr.info()
self.assertIsNone(res, msg="only works with Pandas")
def test_info3(self):
tr = self.sas.sasdata("class", "sashelp")
tr.set_results('html')
res = tr.info()
self.assertIsNone(res, msg="only works with Pandas")
if __name__ == "__main__":
unittest.main()
|
apache-2.0
|
GuessWhoSamFoo/pandas
|
pandas/tests/io/formats/test_printing.py
|
2
|
6914
|
# -*- coding: utf-8 -*-
import numpy as np
import pytest
import pandas as pd
from pandas import compat
import pandas.core.config as cf
import pandas.io.formats.format as fmt
import pandas.io.formats.printing as printing
def test_adjoin():
data = [['a', 'b', 'c'], ['dd', 'ee', 'ff'], ['ggg', 'hhh', 'iii']]
expected = 'a dd ggg\nb ee hhh\nc ff iii'
adjoined = printing.adjoin(2, *data)
assert (adjoined == expected)
def test_repr_binary_type():
import string
letters = string.ascii_letters
btype = compat.binary_type
try:
raw = btype(letters, encoding=cf.get_option('display.encoding'))
except TypeError:
raw = btype(letters)
b = compat.text_type(compat.bytes_to_str(raw))
res = printing.pprint_thing(b, quote_strings=True)
assert res == repr(b)
res = printing.pprint_thing(b, quote_strings=False)
assert res == b
class TestFormattBase(object):
def test_adjoin(self):
data = [['a', 'b', 'c'], ['dd', 'ee', 'ff'], ['ggg', 'hhh', 'iii']]
expected = 'a dd ggg\nb ee hhh\nc ff iii'
adjoined = printing.adjoin(2, *data)
assert adjoined == expected
def test_adjoin_unicode(self):
data = [[u'あ', 'b', 'c'], ['dd', u'ええ', 'ff'], ['ggg', 'hhh', u'いいい']]
expected = u'あ dd ggg\nb ええ hhh\nc ff いいい'
adjoined = printing.adjoin(2, *data)
assert adjoined == expected
adj = fmt.EastAsianTextAdjustment()
expected = u"""あ dd ggg
b ええ hhh
c ff いいい"""
adjoined = adj.adjoin(2, *data)
assert adjoined == expected
cols = adjoined.split('\n')
assert adj.len(cols[0]) == 13
assert adj.len(cols[1]) == 13
assert adj.len(cols[2]) == 16
expected = u"""あ dd ggg
b ええ hhh
c ff いいい"""
adjoined = adj.adjoin(7, *data)
assert adjoined == expected
cols = adjoined.split('\n')
assert adj.len(cols[0]) == 23
assert adj.len(cols[1]) == 23
assert adj.len(cols[2]) == 26
def test_justify(self):
adj = fmt.EastAsianTextAdjustment()
def just(x, *args, **kwargs):
# wrapper to test single str
return adj.justify([x], *args, **kwargs)[0]
assert just('abc', 5, mode='left') == 'abc '
assert just('abc', 5, mode='center') == ' abc '
assert just('abc', 5, mode='right') == ' abc'
assert just(u'abc', 5, mode='left') == 'abc '
assert just(u'abc', 5, mode='center') == ' abc '
assert just(u'abc', 5, mode='right') == ' abc'
assert just(u'パンダ', 5, mode='left') == u'パンダ'
assert just(u'パンダ', 5, mode='center') == u'パンダ'
assert just(u'パンダ', 5, mode='right') == u'パンダ'
assert just(u'パンダ', 10, mode='left') == u'パンダ '
assert just(u'パンダ', 10, mode='center') == u' パンダ '
assert just(u'パンダ', 10, mode='right') == u' パンダ'
def test_east_asian_len(self):
adj = fmt.EastAsianTextAdjustment()
assert adj.len('abc') == 3
assert adj.len(u'abc') == 3
assert adj.len(u'パンダ') == 6
assert adj.len(u'パンダ') == 5
assert adj.len(u'パンダpanda') == 11
assert adj.len(u'パンダpanda') == 10
def test_ambiguous_width(self):
adj = fmt.EastAsianTextAdjustment()
assert adj.len(u'¡¡ab') == 4
with cf.option_context('display.unicode.ambiguous_as_wide', True):
adj = fmt.EastAsianTextAdjustment()
assert adj.len(u'¡¡ab') == 6
data = [[u'あ', 'b', 'c'], ['dd', u'ええ', 'ff'],
['ggg', u'¡¡ab', u'いいい']]
expected = u'あ dd ggg \nb ええ ¡¡ab\nc ff いいい'
adjoined = adj.adjoin(2, *data)
assert adjoined == expected
class TestTableSchemaRepr(object):
@classmethod
def setup_class(cls):
pytest.importorskip('IPython')
from IPython.core.interactiveshell import InteractiveShell
cls.display_formatter = InteractiveShell.instance().display_formatter
def test_publishes(self):
df = pd.DataFrame({"A": [1, 2]})
objects = [df['A'], df, df] # dataframe / series
expected_keys = [
{'text/plain', 'application/vnd.dataresource+json'},
{'text/plain', 'text/html', 'application/vnd.dataresource+json'},
]
opt = pd.option_context('display.html.table_schema', True)
for obj, expected in zip(objects, expected_keys):
with opt:
formatted = self.display_formatter.format(obj)
assert set(formatted[0].keys()) == expected
with_latex = pd.option_context('display.latex.repr', True)
with opt, with_latex:
formatted = self.display_formatter.format(obj)
expected = {'text/plain', 'text/html', 'text/latex',
'application/vnd.dataresource+json'}
assert set(formatted[0].keys()) == expected
def test_publishes_not_implemented(self):
# column MultiIndex
# GH 15996
midx = pd.MultiIndex.from_product([['A', 'B'], ['a', 'b', 'c']])
df = pd.DataFrame(np.random.randn(5, len(midx)), columns=midx)
opt = pd.option_context('display.html.table_schema', True)
with opt:
formatted = self.display_formatter.format(df)
expected = {'text/plain', 'text/html'}
assert set(formatted[0].keys()) == expected
def test_config_on(self):
df = pd.DataFrame({"A": [1, 2]})
with pd.option_context("display.html.table_schema", True):
result = df._repr_data_resource_()
assert result is not None
def test_config_default_off(self):
df = pd.DataFrame({"A": [1, 2]})
with pd.option_context("display.html.table_schema", False):
result = df._repr_data_resource_()
assert result is None
def test_enable_data_resource_formatter(self):
# GH 10491
formatters = self.display_formatter.formatters
mimetype = 'application/vnd.dataresource+json'
with pd.option_context('display.html.table_schema', True):
assert 'application/vnd.dataresource+json' in formatters
assert formatters[mimetype].enabled
# still there, just disabled
assert 'application/vnd.dataresource+json' in formatters
assert not formatters[mimetype].enabled
# able to re-set
with pd.option_context('display.html.table_schema', True):
assert 'application/vnd.dataresource+json' in formatters
assert formatters[mimetype].enabled
# smoke test that it works
self.display_formatter.format(cf)
|
bsd-3-clause
|
0todd0000/spm1d
|
spm1d/rft1d/examples/val_max_2_twosample_t_0d.py
|
2
|
1327
|
from math import sqrt
import numpy as np
from scipy import stats
from matplotlib import pyplot
#(0) Set parameters:
np.random.seed(0)
nResponsesA = 5
nResponsesB = 5
nIterations = 5000
### derived parameters:
nA,nB = nResponsesA, nResponsesB
df = nA + nB - 2
#(1) Generate Gaussian data and compute test statistic:
T = []
for i in range(nIterations):
yA,yB = np.random.randn(nResponsesA), np.random.randn(nResponsesB)
mA,mB = yA.mean(), yB.mean()
sA,sB = yA.std(ddof=1), yB.std(ddof=1)
s = sqrt( ((nA-1)*sA*sA + (nB-1)*sB*sB) / df )
t = (mA-mB) / ( s *sqrt(1.0/nA + 1.0/nB))
T.append(t)
T = np.asarray(T)
#(2) Survival functions:
heights = np.linspace(1, 4, 21)
sf = np.array( [ (T>h).mean() for h in heights] )
sfE = stats.t.sf(heights, df) #theoretical
sfN = stats.norm.sf(heights) #standard normal (for comparison)
#(3) Plot results:
pyplot.close('all')
ax = pyplot.axes()
ax.plot(heights, sf, 'o', label='Simulated')
ax.plot(heights, sfE, '-', label='Theoretical')
ax.plot(heights, sfN, 'r-', label='Standard normal')
ax.set_xlabel('$u$', size=20)
ax.set_ylabel('$P (t > u)$', size=20)
ax.legend()
ax.set_title('Two-sample t validation (0D)', size=20)
pyplot.show()
|
gpl-3.0
|
pprett/scikit-learn
|
examples/covariance/plot_lw_vs_oas.py
|
159
|
2951
|
"""
=============================
Ledoit-Wolf vs OAS estimation
=============================
The usual covariance maximum likelihood estimate can be regularized
using shrinkage. Ledoit and Wolf proposed a close formula to compute
the asymptotically optimal shrinkage parameter (minimizing a MSE
criterion), yielding the Ledoit-Wolf covariance estimate.
Chen et al. proposed an improvement of the Ledoit-Wolf shrinkage
parameter, the OAS coefficient, whose convergence is significantly
better under the assumption that the data are Gaussian.
This example, inspired from Chen's publication [1], shows a comparison
of the estimated MSE of the LW and OAS methods, using Gaussian
distributed data.
[1] "Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import toeplitz, cholesky
from sklearn.covariance import LedoitWolf, OAS
np.random.seed(0)
###############################################################################
n_features = 100
# simulation covariance matrix (AR(1) process)
r = 0.1
real_cov = toeplitz(r ** np.arange(n_features))
coloring_matrix = cholesky(real_cov)
n_samples_range = np.arange(6, 31, 1)
repeat = 100
lw_mse = np.zeros((n_samples_range.size, repeat))
oa_mse = np.zeros((n_samples_range.size, repeat))
lw_shrinkage = np.zeros((n_samples_range.size, repeat))
oa_shrinkage = np.zeros((n_samples_range.size, repeat))
for i, n_samples in enumerate(n_samples_range):
for j in range(repeat):
X = np.dot(
np.random.normal(size=(n_samples, n_features)), coloring_matrix.T)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X)
lw_mse[i, j] = lw.error_norm(real_cov, scaling=False)
lw_shrinkage[i, j] = lw.shrinkage_
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X)
oa_mse[i, j] = oa.error_norm(real_cov, scaling=False)
oa_shrinkage[i, j] = oa.shrinkage_
# plot MSE
plt.subplot(2, 1, 1)
plt.errorbar(n_samples_range, lw_mse.mean(1), yerr=lw_mse.std(1),
label='Ledoit-Wolf', color='navy', lw=2)
plt.errorbar(n_samples_range, oa_mse.mean(1), yerr=oa_mse.std(1),
label='OAS', color='darkorange', lw=2)
plt.ylabel("Squared error")
plt.legend(loc="upper right")
plt.title("Comparison of covariance estimators")
plt.xlim(5, 31)
# plot shrinkage coefficient
plt.subplot(2, 1, 2)
plt.errorbar(n_samples_range, lw_shrinkage.mean(1), yerr=lw_shrinkage.std(1),
label='Ledoit-Wolf', color='navy', lw=2)
plt.errorbar(n_samples_range, oa_shrinkage.mean(1), yerr=oa_shrinkage.std(1),
label='OAS', color='darkorange', lw=2)
plt.xlabel("n_samples")
plt.ylabel("Shrinkage")
plt.legend(loc="lower right")
plt.ylim(plt.ylim()[0], 1. + (plt.ylim()[1] - plt.ylim()[0]) / 10.)
plt.xlim(5, 31)
plt.show()
|
bsd-3-clause
|
bellhops/airflow
|
airflow/hooks/hive_hooks.py
|
1
|
17167
|
from __future__ import print_function
from builtins import zip
from past.builtins import basestring
import csv
import logging
import re
import subprocess
from tempfile import NamedTemporaryFile
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from hive_service import ThriftHive
import pyhs2
from airflow.utils import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.utils import TemporaryDirectory
from airflow.configuration import conf
import airflow.security.utils as utils
class HiveCliHook(BaseHook):
"""
Simple wrapper around the hive CLI.
It also supports the ``beeline``
a lighter CLI that runs JDBC and is replacing the heavier
traditional CLI. To enable ``beeline``, set the use_beeline param in the
extra field of your connection as in ``{ "use_beeline": true }``
Note that you can also set default hive CLI parameters using the
``hive_cli_params`` to be used in your connection as in
``{"hive_cli_params": "-hiveconf mapred.job.tracker=some.jobtracker:444"}``
"""
def __init__(
self,
hive_cli_conn_id="hive_cli_default",
run_as=None):
conn = self.get_connection(hive_cli_conn_id)
self.hive_cli_params = conn.extra_dejson.get('hive_cli_params', '')
self.use_beeline = conn.extra_dejson.get('use_beeline', False)
self.conn = conn
self.run_as = run_as
def run_cli(self, hql, schema=None, verbose=True):
"""
Run an hql statement using the hive cli
>>> hh = HiveCliHook()
>>> result = hh.run_cli("USE airflow;")
>>> ("OK" in result)
True
"""
conn = self.conn
schema = schema or conn.schema
if schema:
hql = "USE {schema};\n{hql}".format(**locals())
with TemporaryDirectory(prefix='airflow_hiveop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir) as f:
f.write(hql)
f.flush()
fname = f.name
hive_bin = 'hive'
cmd_extra = []
if self.use_beeline:
hive_bin = 'beeline'
if conf.get('core', 'security') == 'kerberos':
template = conn.extra_dejson.get('principal',"hive/_HOST@EXAMPLE.COM")
template = utils.replace_hostname_pattern(utils.get_components(template))
proxy_user = ""
if conn.extra_dejson.get('proxy_user') == "login" and conn.login:
proxy_user = "hive.server2.proxy.user={0}".format(conn.login)
elif conn.extra_dejson.get('proxy_user') == "owner" and self.run_as:
proxy_user = "hive.server2.proxy.user={0}".format(self.run_as)
jdbc_url = (
"jdbc:hive2://"
"{0}:{1}/{2}"
";principal={3}{4}"
).format(conn.host, conn.port, conn.schema, template, proxy_user)
else:
jdbc_url = (
"jdbc:hive2://"
"{0}:{1}/{2}"
";auth=noSasl"
).format(conn.host, conn.port, conn.schema)
cmd_extra += ['-u', jdbc_url]
if conn.login:
cmd_extra += ['-n', conn.login]
if conn.password:
cmd_extra += ['-p', conn.password]
hive_cmd = [hive_bin, '-f', fname] + cmd_extra
if self.hive_cli_params:
hive_params_list = self.hive_cli_params.split()
hive_cmd.extend(hive_params_list)
if verbose:
logging.info(" ".join(hive_cmd))
sp = subprocess.Popen(
hive_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=tmp_dir)
self.sp = sp
stdout = ''
for line in iter(sp.stdout.readline, ''):
stdout += line
if verbose:
logging.info(line.strip())
sp.wait()
if sp.returncode:
raise AirflowException(stdout)
return stdout
def test_hql(self, hql):
"""
Test an hql statement using the hive cli and EXPLAIN
"""
create, insert, other = [], [], []
for query in hql.split(';'): # naive
query_original = query
query = query.lower().strip()
if query.startswith('create table'):
create.append(query_original)
elif query.startswith(('set ',
'add jar ',
'create temporary function')):
other.append(query_original)
elif query.startswith('insert'):
insert.append(query_original)
other = ';'.join(other)
for query_set in [create, insert]:
for query in query_set:
query_preview = ' '.join(query.split())[:50]
logging.info("Testing HQL [{0} (...)]".format(query_preview))
if query_set == insert:
query = other + '; explain ' + query
else:
query = 'explain ' + query
try:
self.run_cli(query, verbose=False)
except AirflowException as e:
message = e.args[0].split('\n')[-2]
logging.info(message)
error_loc = re.search('(\d+):(\d+)', message)
if error_loc and error_loc.group(1).isdigit():
l = int(error_loc.group(1))
begin = max(l-2, 0)
end = min(l+3, len(query.split('\n')))
context = '\n'.join(query.split('\n')[begin:end])
logging.info("Context :\n {0}".format(context))
else:
logging.info("SUCCESS")
def load_file(
self,
filepath,
table,
delimiter=",",
field_dict=None,
create=True,
overwrite=True,
partition=None,
recreate=False):
"""
Loads a local file into Hive
Note that the table generated in Hive uses ``STORED AS textfile``
which isn't the most efficient serialization format. If a
large amount of data is loaded and/or if the tables gets
queried considerably, you may want to use this operator only to
stage the data into a temporary table before loading it into its
final destination using a ``HiveOperator``.
:param table: target Hive table, use dot notation to target a
specific database
:type table: str
:param create: whether to create the table if it doesn't exist
:type create: bool
:param recreate: whether to drop and recreate the table at every
execution
:type recreate: bool
:param partition: target partition as a dict of partition columns
and values
:type partition: dict
:param delimiter: field delimiter in the file
:type delimiter: str
"""
hql = ''
if recreate:
hql += "DROP TABLE IF EXISTS {table};\n"
if create or recreate:
fields = ",\n ".join(
[k + ' ' + v for k, v in field_dict.items()])
hql += "CREATE TABLE IF NOT EXISTS {table} (\n{fields})\n"
if partition:
pfields = ",\n ".join(
[p + " STRING" for p in partition])
hql += "PARTITIONED BY ({pfields})\n"
hql += "ROW FORMAT DELIMITED\n"
hql += "FIELDS TERMINATED BY '{delimiter}'\n"
hql += "STORED AS textfile;"
hql = hql.format(**locals())
logging.info(hql)
self.run_cli(hql)
hql = "LOAD DATA LOCAL INPATH '{filepath}' "
if overwrite:
hql += "OVERWRITE "
hql += "INTO TABLE {table} "
if partition:
pvals = ", ".join(
["{0}='{1}'".format(k, v) for k, v in partition.items()])
hql += "PARTITION ({pvals});"
hql = hql.format(**locals())
logging.info(hql)
self.run_cli(hql)
def kill(self):
if hasattr(self, 'sp'):
if self.sp.poll() is None:
print("Killing the Hive job")
self.sp.kill()
class HiveMetastoreHook(BaseHook):
'''
Wrapper to interact with the Hive Metastore
'''
def __init__(self, metastore_conn_id='metastore_default'):
self.metastore_conn = self.get_connection(metastore_conn_id)
self.metastore = self.get_metastore_client()
def __getstate__(self):
# This is for pickling to work despite the thirft hive client not
# being pickable
d = dict(self.__dict__)
del d['metastore']
return d
def __setstate__(self, d):
self.__dict__.update(d)
self.__dict__['metastore'] = self.get_metastore_client()
def get_metastore_client(self):
'''
Returns a Hive thrift client.
'''
ms = self.metastore_conn
transport = TSocket.TSocket(ms.host, ms.port)
transport = TTransport.TBufferedTransport(transport)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
return ThriftHive.Client(protocol)
def get_conn(self):
return self.metastore
def check_for_partition(self, schema, table, partition):
'''
Checks whether a partition exists
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.check_for_partition('airflow', t, "ds='2015-01-01'")
True
'''
self.metastore._oprot.trans.open()
partitions = self.metastore.get_partitions_by_filter(
schema, table, partition, 1)
self.metastore._oprot.trans.close()
if partitions:
return True
else:
return False
def get_table(self, table_name, db='default'):
'''
Get a metastore table object
>>> hh = HiveMetastoreHook()
>>> t = hh.get_table(db='airflow', table_name='static_babynames')
>>> t.tableName
'static_babynames'
>>> [col.name for col in t.sd.cols]
['state', 'year', 'name', 'gender', 'num']
'''
self.metastore._oprot.trans.open()
if db == 'default' and '.' in table_name:
db, table_name = table_name.split('.')[:2]
table = self.metastore.get_table(dbname=db, tbl_name=table_name)
self.metastore._oprot.trans.close()
return table
def get_tables(self, db, pattern='*'):
'''
Get a metastore table object
'''
self.metastore._oprot.trans.open()
tables = self.metastore.get_tables(db_name=db, pattern=pattern)
objs = self.metastore.get_table_objects_by_name(db, tables)
self.metastore._oprot.trans.close()
return objs
def get_databases(self, pattern='*'):
'''
Get a metastore table object
'''
self.metastore._oprot.trans.open()
dbs = self.metastore.get_databases(pattern)
self.metastore._oprot.trans.close()
return dbs
def get_partitions(
self, schema, table_name, filter=None):
'''
Returns a list of all partitions in a table. Works only
for tables with less than 32767 (java short max val).
For subpartitioned table, the number might easily exceed this.
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> parts = hh.get_partitions(schema='airflow', table_name=t)
>>> len(parts)
1
>>> parts
[{'ds': '2015-01-01'}]
'''
self.metastore._oprot.trans.open()
table = self.metastore.get_table(dbname=schema, tbl_name=table_name)
if len(table.partitionKeys) == 0:
raise AirflowException("The table isn't partitioned")
else:
if filter:
parts = self.metastore.get_partitions_by_filter(
db_name=schema, tbl_name=table_name,
filter=filter, max_parts=32767)
else:
parts = self.metastore.get_partitions(
db_name=schema, tbl_name=table_name, max_parts=32767)
self.metastore._oprot.trans.close()
pnames = [p.name for p in table.partitionKeys]
return [dict(zip(pnames, p.values)) for p in parts]
def max_partition(self, schema, table_name, field=None, filter=None):
'''
Returns the maximum value for all partitions in a table. Works only
for tables that have a single partition key. For subpartitioned
table, we recommend using signal tables.
>>> hh = HiveMetastoreHook()
>>> t = 'static_babynames_partitioned'
>>> hh.max_partition(schema='airflow', table_name=t)
'2015-01-01'
'''
parts = self.get_partitions(schema, table_name, filter)
if not parts:
return None
elif len(parts[0]) == 1:
field = list(parts[0].keys())[0]
elif not field:
raise AirflowException(
"Please specify the field you want the max "
"value for")
return max([p[field] for p in parts])
class HiveServer2Hook(BaseHook):
'''
Wrapper around the pyhs2 library
Note that the default authMechanism is NOSASL, to override it you
can specify it in the ``extra`` of your connection in the UI as in
``{"authMechanism": "PLAIN"}``. Refer to the pyhs2 for more details.
'''
def __init__(self, hiveserver2_conn_id='hiveserver2_default'):
self.hiveserver2_conn_id = hiveserver2_conn_id
def get_conn(self):
db = self.get_connection(self.hiveserver2_conn_id)
auth_mechanism = db.extra_dejson.get('authMechanism', 'NOSASL')
if conf.get('core', 'security') == 'kerberos':
auth_mechanism = db.extra_dejson.get('authMechanism', 'KERBEROS')
return pyhs2.connect(
host=db.host,
port=db.port,
authMechanism=auth_mechanism,
user=db.login,
database=db.schema or 'default')
def get_results(self, hql, schema='default', arraysize=1000):
with self.get_conn() as conn:
if isinstance(hql, basestring):
hql = [hql]
results = {
'data': [],
'header': [],
}
for statement in hql:
with conn.cursor() as cur:
cur.execute(statement)
records = cur.fetchall()
if records:
results = {
'data': records,
'header': cur.getSchema(),
}
return results
def to_csv(self, hql, csv_filepath, schema='default'):
schema = schema or 'default'
with self.get_conn() as conn:
with conn.cursor() as cur:
logging.info("Running query: " + hql)
cur.execute(hql)
schema = cur.getSchema()
with open(csv_filepath, 'w') as f:
writer = csv.writer(f)
writer.writerow([c['columnName'] for c in cur.getSchema()])
i = 0
while cur.hasMoreRows:
rows = [row for row in cur.fetchmany() if row]
writer.writerows(rows)
i += len(rows)
logging.info("Written {0} rows so far.".format(i))
logging.info("Done. Loaded a total of {0} rows.".format(i))
def get_records(self, hql, schema='default'):
'''
Get a set of records from a Hive query.
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> len(hh.get_records(sql))
100
'''
return self.get_results(hql, schema=schema)['data']
def get_pandas_df(self, hql, schema='default'):
'''
Get a pandas dataframe from a Hive query
>>> hh = HiveServer2Hook()
>>> sql = "SELECT * FROM airflow.static_babynames LIMIT 100"
>>> df = hh.get_pandas_df(sql)
>>> len(df.index)
100
'''
import pandas as pd
res = self.get_results(hql, schema=schema)
df = pd.DataFrame(res['data'])
df.columns = [c['columnName'] for c in res['header']]
return df
|
apache-2.0
|
CERNDocumentServer/invenio
|
modules/bibauthorid/lib/bibauthorid_tortoise.py
|
3
|
16189
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2011, 2012 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from invenio import bibauthorid_config as bconfig
from datetime import datetime
import os
#import cPickle as SER
import msgpack as SER
import gzip as filehandler
import gc
import numpy as np
#This is supposed to defeat a bit of the python vm performance losses:
import sys
sys.setcheckinterval(1000000)
try:
from collections import defaultdict
except:
from invenio.containerutils import defaultdict
from itertools import groupby, chain, repeat
from invenio.bibauthorid_general_utils import update_status, update_status_final, override_stdout_config, override_stdout_config
override_stdout_config(fileout=True, stdout=False)
from invenio.bibauthorid_cluster_set import delayed_cluster_sets_from_marktables
from invenio.bibauthorid_cluster_set import delayed_cluster_sets_from_personid
from invenio.bibauthorid_wedge import wedge
from invenio.bibauthorid_name_utils import generate_last_name_cluster_str
from invenio.bibauthorid_backinterface import empty_tortoise_results_table
from invenio.bibauthorid_backinterface import remove_clusters_by_name
from invenio.bibauthorid_general_utils import bibauthor_print
from invenio.bibauthorid_prob_matrix import prepare_matirx
#Scheduler is [temporarily] deprecated in favour of the much simpler schedule_workers
#from invenio.bibauthorid_scheduler import schedule, matrix_coefs
from invenio.bibauthorid_least_squares import to_function as create_approx_func
from invenio.bibauthorid_general_utils import schedule_workers
#python2.4 compatibility
from invenio.bibauthorid_general_utils import bai_all as all
'''
There are three main entry points to tortoise
i) tortoise
Performs disambiguation iteration.
The arguemnt pure indicates whether to use
the claims and the rejections or not.
Use pure=True only to test the accuracy of tortoise.
ii) tortoise_from_scratch
NOT RECOMMENDED!
Use this function only if you have just
installed invenio and this is your first
disambiguation or if personid is broken.
iii) tortoise_last_name
Computes the clusters for only one last name
group. Is is primary used for testing. It
may also be used to fix a broken last name
cluster. It does not involve multiprocessing
so it is convinient to debug with pdb.
'''
# Exit codes:
# The standard ones are not well documented
# so we are using random numbers.
def tortoise_from_scratch():
bibauthor_print("Preparing cluster sets.")
cluster_sets, _lnames, sizes = delayed_cluster_sets_from_marktables()
bibauthor_print("Building all matrices.")
schedule_workers(lambda x: force_create_matrix(x, force=True), cluster_sets)
empty_tortoise_results_table()
bibauthor_print("Preparing cluster sets.")
cluster_sets, _lnames, sizes = delayed_cluster_sets_from_marktables()
bibauthor_print("Starting disambiguation.")
schedule_workers(wedge, cluster_sets)
def tortoise(pure=False,
force_matrix_creation=False,
skip_matrix_creation=False,
last_run=None):
assert not force_matrix_creation or not skip_matrix_creation
# The computation must be forced in case we want
# to compute pure results
force_matrix_creation = force_matrix_creation or pure
if not skip_matrix_creation:
bibauthor_print("Preparing cluster sets.")
clusters, _lnames, sizes = delayed_cluster_sets_from_personid(pure, last_run)
bibauthor_print("Building all matrices.")
schedule_workers(lambda x: force_create_matrix(x, force=force_matrix_creation), clusters)
bibauthor_print("Preparing cluster sets.")
clusters, _lnames, sizes = delayed_cluster_sets_from_personid(pure, last_run)
bibauthor_print("Starting disambiguation.")
schedule_workers(wedge_and_store, clusters)
def tortoise_last_name(name, from_mark=True, pure=False):
bibauthor_print('Start working on %s' % name)
assert not(from_mark and pure)
lname = generate_last_name_cluster_str(name)
if from_mark:
bibauthor_print(' ... from mark!')
clusters, lnames, sizes = delayed_cluster_sets_from_marktables([lname])
bibauthor_print(' ... delayed done')
else:
bibauthor_print(' ... from pid, pure=%s'%str(pure))
clusters, lnames, sizes = delayed_cluster_sets_from_personid(pure)
bibauthor_print(' ... delayed pure done!')
# try:
idx = lnames.index(lname)
cluster = clusters[idx]
size = sizes[idx]
cluster_set = cluster()
bibauthor_print("Found, %s(%s). Total number of bibs: %d." % (name, lname, size))
create_matrix(cluster_set, False)
wedge_and_store(cluster_set)
# except (IndexError, ValueError), e:
# print e
# raise e
# bibauthor_print("Sorry, %s(%s) not found in the last name clusters" % (name, lname))
def tortoise_last_names(names_list):
schedule_workers(tortoise_last_name, names_list)
def _collect_statistics_lname_coeff(params):
lname = params[0]
coeff = params[1]
clusters, lnames, sizes = delayed_cluster_sets_from_marktables([lname])
try:
idx = lnames.index(lname)
cluster = clusters[idx]
size = sizes[idx]
bibauthor_print("Found, %s. Total number of bibs: %d." % (lname, size))
cluster_set = cluster()
create_matrix(cluster_set, False)
bibs = cluster_set.num_all_bibs
expected = bibs * (bibs - 1) / 2
bibauthor_print("Start working on %s. Total number of bibs: %d, "
"maximum number of comparisons: %d"
% (cluster_set.last_name, bibs, expected))
wedge(cluster_set, True, coeff)
remove_clusters_by_name(cluster_set.last_name)
except (IndexError, ValueError):
bibauthor_print("Sorry, %s not found in the last name clusters," % (lname))
def _create_matrix(lname):
clusters, lnames, sizes = delayed_cluster_sets_from_marktables([lname])
try:
idx = lnames.index(lname)
cluster = clusters[idx]
size = sizes[idx]
bibauthor_print("Found, %s. Total number of bibs: %d." % (lname, size))
cluster_set = cluster()
create_matrix(cluster_set, False)
bibs = cluster_set.num_all_bibs
expected = bibs * (bibs - 1) / 2
bibauthor_print("Start working on %s. Total number of bibs: %d, "
"maximum number of comparisons: %d"
% (cluster_set.last_name, bibs, expected))
cluster_set.store()
except (IndexError, ValueError):
bibauthor_print("Sorry, %s not found in the last name clusters, not creating matrix" % (lname))
def tortoise_tweak_coefficient(lastnames, min_coef, max_coef, stepping, build_matrix=True):
bibauthor_print('Coefficient tweaking!')
bibauthor_print('Cluster sets from mark...')
lnames = set([generate_last_name_cluster_str(n) for n in lastnames])
coefficients = [x/100. for x in range(int(min_coef*100),int(max_coef*100),int(stepping*100))]
if build_matrix:
schedule_workers(_create_matrix, lnames)
schedule_workers(_collect_statistics_lname_coeff, ((x,y) for x in lnames for y in coefficients ))
def tortoise_coefficient_statistics(pickle_output=None, generate_graphs=True):
import matplotlib.pyplot as plt
plt.ioff()
def _gen_plot(data, filename):
plt.clf()
ax = plt.subplot(111)
ax.grid(visible=True)
x = sorted(data.keys())
w = [data[k][0] for k in x]
try:
wscf = max(w)
except:
wscf = 0
w = [float(i)/wscf for i in w]
y = [data[k][1] for k in x]
maxi = [data[k][3] for k in x]
mini = [data[k][2] for k in x]
lengs = [data[k][4] for k in x]
try:
ml = float(max(lengs))
except:
ml = 1
lengs = [k/ml for k in lengs]
normalengs = [data[k][5] for k in x]
ax.plot(x,y,'-o',label='avg')
ax.plot(x,maxi,'-o', label='max')
ax.plot(x,mini,'-o', label='min')
ax.plot(x,w, '-x', label='norm %s' % str(wscf))
ax.plot(x,lengs,'-o',label='acl %s' % str(int(ml)))
ax.plot(x,normalengs, '-o', label='ncl')
plt.ylim(ymax = 1., ymin = -0.01)
plt.xlim(xmax = 1., xmin = -0.01)
ax.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,ncol=6, mode="expand", borderaxespad=0.)
plt.savefig(filename)
override_stdout_config(stdout=True)
files = ['/tmp/baistats/'+x for x in os.listdir('/tmp/baistats/') if x.startswith('cluster_status_report_pid')]
fnum = float(len(files))
quanta = .1/fnum
total_stats = 0
used_coeffs = set()
used_clusters = set()
#av_counter, avg, min, max, nclus, normalized_avg
cluster_stats = defaultdict(lambda : defaultdict(lambda : [0.,0.,0.,0.,0.,0.]))
coeff_stats = defaultdict(lambda : [0.,0.,0.,0.,0.,0.])
def gen_graphs(only_synthetic=False):
update_status(0, 'Generating coefficients graph...')
_gen_plot(coeff_stats, '/tmp/graphs/AAAAA-coefficients.svg')
if not only_synthetic:
cn = cluster_stats.keys()
l = float(len(cn))
for i,c in enumerate(cn):
update_status(i/l, 'Generating name graphs... %s' % str(c))
_gen_plot(cluster_stats[c], '/tmp/graphs/CS-%s.png' % str(c))
for i,fi in enumerate(files):
if generate_graphs:
if i%1000 ==0:
gen_graphs(True)
f = filehandler.open(fi,'r')
status = i/fnum
update_status(status, 'Loading '+ fi[fi.find('lastname')+9:])
contents = SER.load(f)
f.close()
cur_coef = contents[0]
cur_clust = contents[1]
cur_maxlen = float(contents[3])
if cur_coef:
total_stats += 1
used_coeffs.add(cur_coef)
used_clusters.add(cur_clust)
update_status(status+0.2*quanta, ' Computing averages...')
cur_clen = len(contents[2])
cur_coeffs = [x[2] for x in contents[2]]
cur_clustnumber = float(len(set([x[0] for x in contents[2]])))
assert cur_clustnumber > 0 and cur_clustnumber < cur_maxlen, "Error, found log with strange clustnumber! %s %s %s %s" % (str(cur_clust), str(cur_coef), str(cur_maxlen),
str(cur_clustnumber))
if cur_coeffs:
assert len(cur_coeffs) == cur_clen and cur_coeffs, "Error, there is a cluster witohut stuff? %s %s %s"% (str(cur_clust), str(cur_coef), str(cur_coeffs))
assert all([x >= 0 and x <= 1 for x in cur_coeffs]), "Error, a coefficient is wrong here! Check me! %s %s %s" % (str(cur_clust), str(cur_coef), str(cur_coeffs))
cur_min = min(cur_coeffs)
cur_max = max(cur_coeffs)
cur_avg = sum(cur_coeffs)/cur_clen
update_status(status+0.4*quanta, ' comulative per coeff...')
avi = coeff_stats[cur_coef][0]
#number of points
coeff_stats[cur_coef][0] = avi+1
#average of coefficients
coeff_stats[cur_coef][1] = (coeff_stats[cur_coef][1]*avi + cur_avg)/(avi+1)
#min coeff
coeff_stats[cur_coef][2] = min(coeff_stats[cur_coef][2], cur_min)
#max coeff
coeff_stats[cur_coef][3] = max(coeff_stats[cur_coef][3], cur_max)
#avg number of clusters
coeff_stats[cur_coef][4] = (coeff_stats[cur_coef][4]*avi + cur_clustnumber)/(avi+1)
#normalized avg number of clusters
coeff_stats[cur_coef][5] = (coeff_stats[cur_coef][5]*avi + cur_clustnumber/cur_maxlen)/(avi+1)
update_status(status+0.6*quanta, ' comulative per cluster per coeff...')
avi = cluster_stats[cur_clust][cur_coef][0]
cluster_stats[cur_clust][cur_coef][0] = avi+1
cluster_stats[cur_clust][cur_coef][1] = (cluster_stats[cur_clust][cur_coef][1]*avi + cur_avg)/(avi+1)
cluster_stats[cur_clust][cur_coef][2] = min(cluster_stats[cur_clust][cur_coef][2], cur_min)
cluster_stats[cur_clust][cur_coef][3] = max(cluster_stats[cur_clust][cur_coef][3], cur_max)
cluster_stats[cur_clust][cur_coef][4] = (cluster_stats[cur_clust][cur_coef][4]*avi + cur_clustnumber)/(avi+1)
cluster_stats[cur_clust][cur_coef][5] = (cluster_stats[cur_clust][cur_coef][5]*avi + cur_clustnumber/cur_maxlen)/(avi+1)
update_status_final('Done!')
if generate_graphs:
gen_graphs()
if pickle_output:
update_status(0,'Dumping to file...')
f = open(pickle_output,'w')
SER.dump({'cluster_stats':dict((x,dict(cluster_stats[x])) for x in cluster_stats.iterkeys()), 'coeff_stats':dict((coeff_stats))}, f)
f.close()
def create_matrix(cluster_set, force):
bibs = cluster_set.num_all_bibs
expected = bibs * (bibs - 1) / 2
bibauthor_print("Start building matrix for %s. Total number of bibs: %d, "
"maximum number of comparisons: %d"
% (cluster_set.last_name, bibs, expected))
return prepare_matirx(cluster_set, force)
def force_create_matrix(cluster_set, force):
bibauthor_print("Building a cluster set.")
return create_matrix(cluster_set(), force)
def wedge_and_store(cluster_set):
bibs = cluster_set.num_all_bibs
expected = bibs * (bibs - 1) / 2
bibauthor_print("Start working on %s. Total number of bibs: %d, "
"maximum number of comparisons: %d"
% (cluster_set.last_name, bibs, expected))
wedge(cluster_set)
remove_clusters_by_name(cluster_set.last_name)
cluster_set.store()
return True
def force_wedge_and_store(cluster_set):
bibauthor_print("Building a cluster set.")
return wedge_and_store(cluster_set())
#[temporarily] deprecated
#def schedule_create_matrix(cluster_sets, sizes, force):
# def create_job(cluster):
# def ret():
# return force_create_matrix(cluster, force)
# return ret
#
# memfile_path = None
# if bconfig.DEBUG_PROCESS_PEAK_MEMORY:
# tt = datetime.now()
# tt = (tt.hour, tt.minute, tt.day, tt.month, tt.year)
# memfile_path = ('%smatrix_memory_%d:%d_%d-%d-%d.log' %
# ((bconfig.TORTOISE_FILES_PATH,) + tt))
#
# return schedule(map(create_job, cluster_sets),
# sizes,
# create_approx_func(matrix_coefs),
# memfile_path)
#
#
#def schedule_wedge_and_store(cluster_sets, sizes):
# def create_job(cluster):
# def ret():
# return force_wedge_and_store(cluster)
# return ret
#
# memfile_path = None
# if bconfig.DEBUG_PROCESS_PEAK_MEMORY:
# tt = datetime.now()
# tt = (tt.hour, tt.minute, tt.day, tt.month, tt.year)
# memfile_path = ('%swedge_memory_%d:%d_%d-%d-%d.log' %
# ((bconfig.TORTOISE_FILES_PATH,) + tt))
#
# return schedule(map(create_job, cluster_sets),
# sizes,
# create_approx_func(matrix_coefs),
# memfile_path)
|
gpl-2.0
|
timoMa/vigra
|
vigranumpy/examples/grid_graph_shortestpath.py
|
8
|
3978
|
import vigra
import vigra.graphs as vigraph
import pylab
import numpy
np=numpy
import sys
import matplotlib
import pylab as plt
import math
from matplotlib.widgets import Slider, Button, RadioButtons
def makeWeights(gamma):
global hessian,gradmag,gridGraph
print "hessian",hessian.min(),hessian.max()
print "raw ",raw.min(),raw.max()
wImg= numpy.exp((gradmag**0.5)*gamma*-1.0)#**0.5
wImg = numpy.array(wImg).astype(numpy.float32)
w=vigra.graphs.implicitMeanEdgeMap(gridGraph,wImg)
return w
def makeVisuImage(path,img):
coords = (path[:,0],path[:,1])
visuimg =img.copy()
iR=visuimg[:,:,0]
iG=visuimg[:,:,1]
iB=visuimg[:,:,2]
iR[coords]=255
iG[coords]=0
iB[coords]=0
visuimg-=visuimg.min()
visuimg/=visuimg.max()
return visuimg
f = '100075.jpg'
f = '69015.jpg'
#f = "/media/tbeier/GSP1RMCPRFR/iso.03530.png"
img = vigra.impex.readImage(f)
print img.shape
if(img.shape[2]==1):
img = numpy.concatenate([img]*3,axis=2)
imgLab = img
imgLab = vigra.taggedView(imgLab,'xyc')
else:
imgLab = vigra.colors.transform_RGB2Lab(img)
sigma = 1.0
imgLab-=imgLab.min()
imgLab/=imgLab.max()
imgLab*=255
img-=img.min()
img/=img.max()
img*=255
print imgLab.shape
print "interpolate image"
imgLabSmall = imgLab
# make a few edge weights
gradmag = numpy.squeeze(vigra.filters.gaussianGradientMagnitude(imgLabSmall,sigma))
hessian = numpy.squeeze(vigra.filters.hessianOfGaussianEigenvalues(imgLabSmall[:,:,0],sigma))[:,:,0]
hessian-=hessian.min()
raw = 256-imgLabSmall[:,:,0].copy()
gridGraph = vigraph.gridGraph(imgLab.shape[:2],False)
weights = makeWeights(3.0)
pathFinder = vigraph.ShortestPathPathDijkstra(gridGraph)
visuimg =img.copy()
ax = plt.gca()
fig = plt.gcf()
visuimg-=visuimg.min()
visuimg/=visuimg.max()
implot = ax.imshow(numpy.swapaxes(visuimg,0,1),cmap='gray')
clickList=[]
frozen = False
axslider = plt.axes([0.0, 0.00, 0.4, 0.075])
axfreeze = plt.axes([0.6, 0.00, 0.1, 0.075])
axunfreeze = plt.axes([0.8, 0.00, 0.1, 0.075])
bfreeze = Button(axfreeze, 'freeze')
bunfreeze = Button(axunfreeze, 'unfrease and clear')
sgamma = Slider(axslider, 'gamma', 0.01, 5.0, valinit=1.0)
def onclick(event):
global clickList
global weights
global img
if event.xdata != None and event.ydata != None:
xRaw,yRaw = event.xdata,event.ydata
if not frozen and xRaw >=0.0 and yRaw>=0.0 and xRaw<img.shape[0] and yRaw<img.shape[1]:
x,y = long(math.floor(event.xdata)),long(math.floor(event.ydata))
clickList.append((x,y))
if len(clickList)==2:
source = gridGraph.coordinateToNode(clickList[0])
target = gridGraph.coordinateToNode(clickList[1])
weights = makeWeights(sgamma.val)
#path = pathFinder.run(weights, source,target).path(pathType='coordinates')
path = pathFinder.run(weights, source).path(pathType='coordinates',target=target)
visuimg = makeVisuImage(path,img)
implot.set_data(numpy.swapaxes(visuimg,0,1))
plt.draw()
def freeze(event):
global frozen
frozen=True
def unfreeze(event):
global frozen,clickList
frozen=False
clickList = []
def onslide(event):
global img,gradmag,weights,clickList,sgamma
weights = makeWeights(sgamma.val)
print "onslide",clickList
if len(clickList)>=2:
print "we have path"
source = gridGraph.coordinateToNode(clickList[0])
target = gridGraph.coordinateToNode(clickList[1])
path = pathFinder.run(weights, source,target).path(pathType='coordinates')
visuimg = makeVisuImage(path,img)
implot.set_data(numpy.swapaxes(visuimg,0,1))
plt.draw()
bfreeze.on_clicked(freeze)
bunfreeze.on_clicked(unfreeze)
sgamma.on_changed(onslide)
cid = fig.canvas.mpl_connect('button_press_event', onclick)
plt.show()
|
mit
|
askerry/pyxley
|
examples/datatables/project/app.py
|
11
|
3137
|
from flask import Flask
from flask import request, jsonify, render_template, make_response
import pandas as pd
import json
import sys
import glob
from react import jsx
import numpy as np
import re
import argparse
from pyxley.charts.datatables import DataTable
from pyxley import SimpleComponent
from pyxley.filters import SelectButton
from collections import OrderedDict
parser = argparse.ArgumentParser(description="Flask Template")
parser.add_argument("--env", help="production or local", default="local")
args = parser.parse_args()
TITLE = "Pyxley"
scripts = [
"./bower_components/jquery/dist/jquery.min.js",
"./bower_components/datatables/media/js/jquery.dataTables.js",
"./dataTables.fixedColumns.js",
"./bower_components/d3/d3.min.js",
"./bower_components/require/build/require.min.js",
"./bower_components/react/react.js",
"./bower_components/react-bootstrap/react-bootstrap.min.js",
"./conf_int.js",
"./bower_components/pyxley/build/pyxley.js"
]
css = [
"./bower_components/bootstrap/dist/css/bootstrap.min.css",
"./bower_components/datatables/media/css/jquery.dataTables.min.css",
"./css/main.css"
]
df = pd.DataFrame(json.load(open("./static/data.json", "r")))
df = df.dropna()
df["salary"] = df["salary"].apply(lambda x: float(re.sub("[^\d\.]", "", x)))
df["lower"] = ( 1. - (0.03*np.random.randn(df.shape[0]) + 0.15))
df["upper"] = ( 1. + (0.03*np.random.randn(df.shape[0]) + 0.15))
df["salary_upper"] = df["upper"]*df["salary"]
df["salary_lower"] = df["lower"]*df["salary"]
cols = OrderedDict([
("position", {"label": "Position"}),
("office", {"label": "Office"}),
("start_date", {"label": "Start Date"}),
("salary_lower", {"label": "Salary Range",
"confidence": {
"lower": "salary_lower",
"upper": "salary_upper"
}
})
])
addfunc = """
new $.fn.dataTable.FixedColumns(this, {
leftColumns: 1,
rightColumns: 0
});
confidence_interval(this.api().column(3, {"page":"current"}).data(), "mytable");
"""
drawfunc = """
confidence_interval(this.api().column(3, {"page":"current"}).data(), "mytable");
"""
tb = DataTable("mytable", "/mytable/", df,
columns=cols,
paging=True,
pageLength=9,
scrollX=True,
columnDefs=[{
"render": """<svg width="156" height="20"><g></g></svg>""",
"orderable": False,
"targets": 3
}],
sDom='<"top">rt<"bottom"lp><"clear">',
deferRender=True,
initComplete=addfunc,
drawCallback=drawfunc)
app = Flask(__name__)
tb.register_route(app)
ui = SimpleComponent(
"Table",
"./static/bower_components/pyxley/build/pyxley.js",
"component_id",
tb.params
)
sb = ui.render("./static/layout.js")
@app.route('/test', methods=["GET"])
def testtest():
return jsonify(jsfunc)
@app.route('/', methods=["GET"])
@app.route('/index', methods=["GET"])
def index():
_scripts = [
"./layout.js"
]
return render_template('index.html',
title=TITLE,
base_scripts=scripts,
page_scripts=_scripts,
css=css)
if __name__ == "__main__":
app.run(debug=True)
|
mit
|
AGPeddle/agloolik-elliptic
|
simpleMesher.py
|
1
|
1768
|
from __future__ import division
from __future__ import absolute_import
import pickle
import meshpy.triangle as triangle
import numpy as np
import numpy.linalg as la
from six.moves import range
import matplotlib.pyplot as pt
def round_trip_connect(start, end):
return [(i, i+1) for i in range(start, end)] + [(end, start)]
def main():
points = []
facets = []
circ_start = len(points)
points.extend(
(3 * np.cos(angle), 3 * np.sin(angle))
for angle in np.linspace(0, 2*np.pi, 30, endpoint=False))
#boundaryPts1 = points[0:20]
#boundaryFacets1 = []
#boundaryFacets1.extend(round_trip_connect(21,31))
boundaryPts1 = points[0:31]
boundaryFacets1 = []
facets.extend(round_trip_connect(circ_start, len(points)-1))
def needs_refinement(vertices, area):
bary = np.sum(np.array(vertices), axis=0)/3
max_area = 0.001 + (la.norm(bary, np.inf)-1)*0.1
return bool(area > max_area)
def refinement2(vertices, area):
return(area > 0.5)
info = triangle.MeshInfo()
info.set_points(points)
info.set_facets(facets)
mesh = triangle.build(info, refinement_func = refinement2)
mesh_points = np.array(mesh.points)
mesh_tris = np.array(mesh.elements)
boundary_points1 = []
i = 0
for meshPt in mesh_points:
for point in boundaryPts1:
if point == tuple(meshPt):
boundary_points1.append(i)
break
i+=1
mesh_out = [mesh_points, mesh_tris, [boundary_points1],[boundaryFacets1]]
with open('mesh2.msh','wb') as outFile:
pickle.dump(mesh_out,outFile)
pt.triplot(mesh_points[:, 0], mesh_points[:, 1], mesh_tris)
pt.show()
if __name__ == "__main__":
main()
|
gpl-2.0
|
pv/scikit-learn
|
sklearn/linear_model/bayes.py
|
220
|
15248
|
"""
Various bayesian regression
"""
from __future__ import print_function
# Authors: V. Michel, F. Pedregosa, A. Gramfort
# License: BSD 3 clause
from math import log
import numpy as np
from scipy import linalg
from .base import LinearModel
from ..base import RegressorMixin
from ..utils.extmath import fast_logdet, pinvh
from ..utils import check_X_y
###############################################################################
# BayesianRidge regression
class BayesianRidge(LinearModel, RegressorMixin):
"""Bayesian ridge regression
Fit a Bayesian ridge model and optimize the regularization parameters
lambda (precision of the weights) and alpha (precision of the noise).
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300.
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter.
Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter.
Default is 1.e-6
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.BayesianRidge()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
BayesianRidge(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, tol=0.001, verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
-----
See examples/linear_model/plot_bayesian_ridge.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
fit_intercept=True, normalize=False, copy_X=True,
verbose=False):
self.n_iter = n_iter
self.tol = tol
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.fit_intercept = fit_intercept
self.normalize = normalize
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the model
Parameters
----------
X : numpy array of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples]
Target values
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
n_samples, n_features = X.shape
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = 1.
verbose = self.verbose
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
self.scores_ = list()
coef_old_ = None
XT_y = np.dot(X.T, y)
U, S, Vh = linalg.svd(X, full_matrices=False)
eigen_vals_ = S ** 2
### Convergence loop of the bayesian ridge regression
for iter_ in range(self.n_iter):
### Compute mu and sigma
# sigma_ = lambda_ / alpha_ * np.eye(n_features) + np.dot(X.T, X)
# coef_ = sigma_^-1 * XT * y
if n_samples > n_features:
coef_ = np.dot(Vh.T,
Vh / (eigen_vals_ + lambda_ / alpha_)[:, None])
coef_ = np.dot(coef_, XT_y)
if self.compute_score:
logdet_sigma_ = - np.sum(
np.log(lambda_ + alpha_ * eigen_vals_))
else:
coef_ = np.dot(X.T, np.dot(
U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T))
coef_ = np.dot(coef_, y)
if self.compute_score:
logdet_sigma_ = lambda_ * np.ones(n_features)
logdet_sigma_[:n_samples] += alpha_ * eigen_vals_
logdet_sigma_ = - np.sum(np.log(logdet_sigma_))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = (np.sum((alpha_ * eigen_vals_)
/ (lambda_ + alpha_ * eigen_vals_)))
lambda_ = ((gamma_ + 2 * lambda_1)
/ (np.sum(coef_ ** 2) + 2 * lambda_2))
alpha_ = ((n_samples - gamma_ + 2 * alpha_1)
/ (rmse_ + 2 * alpha_2))
### Compute the objective function
if self.compute_score:
s = lambda_1 * log(lambda_) - lambda_2 * lambda_
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (n_features * log(lambda_)
+ n_samples * log(alpha_)
- alpha_ * rmse_
- (lambda_ * np.sum(coef_ ** 2))
- logdet_sigma_
- n_samples * log(2 * np.pi))
self.scores_.append(s)
### Check for convergence
if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Convergence after ", str(iter_), " iterations")
break
coef_old_ = np.copy(coef_)
self.alpha_ = alpha_
self.lambda_ = lambda_
self.coef_ = coef_
self._set_intercept(X_mean, y_mean, X_std)
return self
###############################################################################
# ARD (Automatic Relevance Determination) regression
class ARDRegression(LinearModel, RegressorMixin):
"""Bayesian ARD regression.
Fit the weights of a regression model, using an ARD prior. The weights of
the regression model are assumed to be in Gaussian distributions.
Also estimate the parameters lambda (precisions of the distributions of the
weights) and alpha (precision of the distribution of the noise).
The estimation is done by an iterative procedures (Evidence Maximization)
Read more in the :ref:`User Guide <bayesian_regression>`.
Parameters
----------
n_iter : int, optional
Maximum number of iterations. Default is 300
tol : float, optional
Stop the algorithm if w has converged. Default is 1.e-3.
alpha_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the alpha parameter. Default is 1.e-6.
alpha_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the alpha parameter. Default is 1.e-6.
lambda_1 : float, optional
Hyper-parameter : shape parameter for the Gamma distribution prior
over the lambda parameter. Default is 1.e-6.
lambda_2 : float, optional
Hyper-parameter : inverse scale parameter (rate parameter) for the
Gamma distribution prior over the lambda parameter. Default is 1.e-6.
compute_score : boolean, optional
If True, compute the objective function at each step of the model.
Default is False.
threshold_lambda : float, optional
threshold for removing (pruning) weights with high precision from
the computation. Default is 1.e+4.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
Default is True.
normalize : boolean, optional, default False
If True, the regressors X will be normalized before regression.
copy_X : boolean, optional, default True.
If True, X will be copied; else, it may be overwritten.
verbose : boolean, optional, default False
Verbose mode when fitting the model.
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of distribution)
alpha_ : float
estimated precision of the noise.
lambda_ : array, shape = (n_features)
estimated precisions of the weights.
sigma_ : array, shape = (n_features, n_features)
estimated variance-covariance matrix of the weights
scores_ : float
if computed, value of the objective function (to be maximized)
Examples
--------
>>> from sklearn import linear_model
>>> clf = linear_model.ARDRegression()
>>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2])
... # doctest: +NORMALIZE_WHITESPACE
ARDRegression(alpha_1=1e-06, alpha_2=1e-06, compute_score=False,
copy_X=True, fit_intercept=True, lambda_1=1e-06, lambda_2=1e-06,
n_iter=300, normalize=False, threshold_lambda=10000.0, tol=0.001,
verbose=False)
>>> clf.predict([[1, 1]])
array([ 1.])
Notes
--------
See examples/linear_model/plot_ard.py for an example.
"""
def __init__(self, n_iter=300, tol=1.e-3, alpha_1=1.e-6, alpha_2=1.e-6,
lambda_1=1.e-6, lambda_2=1.e-6, compute_score=False,
threshold_lambda=1.e+4, fit_intercept=True, normalize=False,
copy_X=True, verbose=False):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.normalize = normalize
self.alpha_1 = alpha_1
self.alpha_2 = alpha_2
self.lambda_1 = lambda_1
self.lambda_2 = lambda_2
self.compute_score = compute_score
self.threshold_lambda = threshold_lambda
self.copy_X = copy_X
self.verbose = verbose
def fit(self, X, y):
"""Fit the ARDRegression model according to the given training data
and parameters.
Iterative procedure to maximize the evidence
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y, dtype=np.float64, y_numeric=True)
n_samples, n_features = X.shape
coef_ = np.zeros(n_features)
X, y, X_mean, y_mean, X_std = self._center_data(
X, y, self.fit_intercept, self.normalize, self.copy_X)
### Launch the convergence loop
keep_lambda = np.ones(n_features, dtype=bool)
lambda_1 = self.lambda_1
lambda_2 = self.lambda_2
alpha_1 = self.alpha_1
alpha_2 = self.alpha_2
verbose = self.verbose
### Initialization of the values of the parameters
alpha_ = 1. / np.var(y)
lambda_ = np.ones(n_features)
self.scores_ = list()
coef_old_ = None
### Iterative procedure of ARDRegression
for iter_ in range(self.n_iter):
### Compute mu and sigma (using Woodbury matrix identity)
sigma_ = pinvh(np.eye(n_samples) / alpha_ +
np.dot(X[:, keep_lambda] *
np.reshape(1. / lambda_[keep_lambda], [1, -1]),
X[:, keep_lambda].T))
sigma_ = np.dot(sigma_, X[:, keep_lambda]
* np.reshape(1. / lambda_[keep_lambda], [1, -1]))
sigma_ = - np.dot(np.reshape(1. / lambda_[keep_lambda], [-1, 1])
* X[:, keep_lambda].T, sigma_)
sigma_.flat[::(sigma_.shape[1] + 1)] += 1. / lambda_[keep_lambda]
coef_[keep_lambda] = alpha_ * np.dot(
sigma_, np.dot(X[:, keep_lambda].T, y))
### Update alpha and lambda
rmse_ = np.sum((y - np.dot(X, coef_)) ** 2)
gamma_ = 1. - lambda_[keep_lambda] * np.diag(sigma_)
lambda_[keep_lambda] = ((gamma_ + 2. * lambda_1)
/ ((coef_[keep_lambda]) ** 2
+ 2. * lambda_2))
alpha_ = ((n_samples - gamma_.sum() + 2. * alpha_1)
/ (rmse_ + 2. * alpha_2))
### Prune the weights with a precision over a threshold
keep_lambda = lambda_ < self.threshold_lambda
coef_[~keep_lambda] = 0
### Compute the objective function
if self.compute_score:
s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum()
s += alpha_1 * log(alpha_) - alpha_2 * alpha_
s += 0.5 * (fast_logdet(sigma_) + n_samples * log(alpha_)
+ np.sum(np.log(lambda_)))
s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_ ** 2).sum())
self.scores_.append(s)
### Check for convergence
if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol:
if verbose:
print("Converged after %s iterations" % iter_)
break
coef_old_ = np.copy(coef_)
self.coef_ = coef_
self.alpha_ = alpha_
self.sigma_ = sigma_
self.lambda_ = lambda_
self._set_intercept(X_mean, y_mean, X_std)
return self
|
bsd-3-clause
|
eg-zhang/scikit-learn
|
examples/model_selection/plot_precision_recall.py
|
249
|
6150
|
"""
================
Precision-Recall
================
Example of Precision-Recall metric to evaluate classifier output quality.
In information retrieval, precision is a measure of result relevancy, while
recall is a measure of how many truly relevant results are returned. A high
area under the curve represents both high recall and high precision, where high
precision relates to a low false positive rate, and high recall relates to a
low false negative rate. High scores for both show that the classifier is
returning accurate results (high precision), as well as returning a majority of
all positive results (high recall).
A system with high recall but low precision returns many results, but most of
its predicted labels are incorrect when compared to the training labels. A
system with high precision but low recall is just the opposite, returning very
few results, but most of its predicted labels are correct when compared to the
training labels. An ideal system with high precision and high recall will
return many results, with all results labeled correctly.
Precision (:math:`P`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false positives
(:math:`F_p`).
:math:`P = \\frac{T_p}{T_p+F_p}`
Recall (:math:`R`) is defined as the number of true positives (:math:`T_p`)
over the number of true positives plus the number of false negatives
(:math:`F_n`).
:math:`R = \\frac{T_p}{T_p + F_n}`
These quantities are also related to the (:math:`F_1`) score, which is defined
as the harmonic mean of precision and recall.
:math:`F1 = 2\\frac{P \\times R}{P+R}`
It is important to note that the precision may not decrease with recall. The
definition of precision (:math:`\\frac{T_p}{T_p + F_p}`) shows that lowering
the threshold of a classifier may increase the denominator, by increasing the
number of results returned. If the threshold was previously set too high, the
new results may all be true positives, which will increase precision. If the
previous threshold was about right or too low, further lowering the threshold
will introduce false positives, decreasing precision.
Recall is defined as :math:`\\frac{T_p}{T_p+F_n}`, where :math:`T_p+F_n` does
not depend on the classifier threshold. This means that lowering the classifier
threshold may increase recall, by increasing the number of true positive
results. It is also possible that lowering the threshold may leave recall
unchanged, while the precision fluctuates.
The relationship between recall and precision can be observed in the
stairstep area of the plot - at the edges of these steps a small change
in the threshold considerably reduces precision, with only a minor gain in
recall. See the corner at recall = .59, precision = .8 for an example of this
phenomenon.
Precision-recall curves are typically used in binary classification to study
the output of a classifier. In order to extend Precision-recall curve and
average precision to multi-class or multi-label classification, it is necessary
to binarize the output. One curve can be drawn per label, but one can also draw
a precision-recall curve by considering each element of the label indicator
matrix as a binary prediction (micro-averaging).
.. note::
See also :func:`sklearn.metrics.average_precision_score`,
:func:`sklearn.metrics.recall_score`,
:func:`sklearn.metrics.precision_score`,
:func:`sklearn.metrics.f1_score`
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn import svm, datasets
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Binarize the output
y = label_binarize(y, classes=[0, 1, 2])
n_classes = y.shape[1]
# Add noisy features
random_state = np.random.RandomState(0)
n_samples, n_features = X.shape
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# Split into training and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=random_state)
# Run classifier
classifier = OneVsRestClassifier(svm.SVC(kernel='linear', probability=True,
random_state=random_state))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute Precision-Recall and plot curve
precision = dict()
recall = dict()
average_precision = dict()
for i in range(n_classes):
precision[i], recall[i], _ = precision_recall_curve(y_test[:, i],
y_score[:, i])
average_precision[i] = average_precision_score(y_test[:, i], y_score[:, i])
# Compute micro-average ROC curve and ROC area
precision["micro"], recall["micro"], _ = precision_recall_curve(y_test.ravel(),
y_score.ravel())
average_precision["micro"] = average_precision_score(y_test, y_score,
average="micro")
# Plot Precision-Recall curve
plt.clf()
plt.plot(recall[0], precision[0], label='Precision-Recall curve')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.0])
plt.title('Precision-Recall example: AUC={0:0.2f}'.format(average_precision[0]))
plt.legend(loc="lower left")
plt.show()
# Plot Precision-Recall curve for each class
plt.clf()
plt.plot(recall["micro"], precision["micro"],
label='micro-average Precision-recall curve (area = {0:0.2f})'
''.format(average_precision["micro"]))
for i in range(n_classes):
plt.plot(recall[i], precision[i],
label='Precision-recall curve of class {0} (area = {1:0.2f})'
''.format(i, average_precision[i]))
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Extension of Precision-Recall curve to multi-class')
plt.legend(loc="lower right")
plt.show()
|
bsd-3-clause
|
gfyoung/pandas
|
pandas/core/aggregation.py
|
1
|
24635
|
"""
aggregation.py contains utility functions to handle multiple named and lambda
kwarg aggregations in groupby and DataFrame/Series aggregation
"""
from __future__ import annotations
from collections import defaultdict
from functools import partial
from typing import (
TYPE_CHECKING,
Any,
Callable,
DefaultDict,
Dict,
Hashable,
Iterable,
List,
Optional,
Sequence,
Tuple,
Union,
cast,
)
from pandas._typing import (
AggFuncType,
AggFuncTypeBase,
AggFuncTypeDict,
AggObjType,
Axis,
FrameOrSeries,
FrameOrSeriesUnion,
)
from pandas.core.dtypes.cast import is_nested_object
from pandas.core.dtypes.common import is_dict_like, is_list_like
from pandas.core.dtypes.generic import ABCDataFrame, ABCNDFrame, ABCSeries
from pandas.core.algorithms import safe_sort
from pandas.core.base import DataError, SpecificationError
import pandas.core.common as com
from pandas.core.indexes.api import Index
if TYPE_CHECKING:
from pandas.core.series import Series
def reconstruct_func(
func: Optional[AggFuncType], **kwargs
) -> Tuple[bool, Optional[AggFuncType], Optional[List[str]], Optional[List[int]]]:
"""
This is the internal function to reconstruct func given if there is relabeling
or not and also normalize the keyword to get new order of columns.
If named aggregation is applied, `func` will be None, and kwargs contains the
column and aggregation function information to be parsed;
If named aggregation is not applied, `func` is either string (e.g. 'min') or
Callable, or list of them (e.g. ['min', np.max]), or the dictionary of column name
and str/Callable/list of them (e.g. {'A': 'min'}, or {'A': [np.min, lambda x: x]})
If relabeling is True, will return relabeling, reconstructed func, column
names, and the reconstructed order of columns.
If relabeling is False, the columns and order will be None.
Parameters
----------
func: agg function (e.g. 'min' or Callable) or list of agg functions
(e.g. ['min', np.max]) or dictionary (e.g. {'A': ['min', np.max]}).
**kwargs: dict, kwargs used in is_multi_agg_with_relabel and
normalize_keyword_aggregation function for relabelling
Returns
-------
relabelling: bool, if there is relabelling or not
func: normalized and mangled func
columns: list of column names
order: list of columns indices
Examples
--------
>>> reconstruct_func(None, **{"foo": ("col", "min")})
(True, defaultdict(<class 'list'>, {'col': ['min']}), ('foo',), array([0]))
>>> reconstruct_func("min")
(False, 'min', None, None)
"""
relabeling = func is None and is_multi_agg_with_relabel(**kwargs)
columns: Optional[List[str]] = None
order: Optional[List[int]] = None
if not relabeling:
if isinstance(func, list) and len(func) > len(set(func)):
# GH 28426 will raise error if duplicated function names are used and
# there is no reassigned name
raise SpecificationError(
"Function names must be unique if there is no new column names "
"assigned"
)
elif func is None:
# nicer error message
raise TypeError("Must provide 'func' or tuples of '(column, aggfunc).")
if relabeling:
func, columns, order = normalize_keyword_aggregation(kwargs)
return relabeling, func, columns, order
def is_multi_agg_with_relabel(**kwargs) -> bool:
"""
Check whether kwargs passed to .agg look like multi-agg with relabeling.
Parameters
----------
**kwargs : dict
Returns
-------
bool
Examples
--------
>>> is_multi_agg_with_relabel(a="max")
False
>>> is_multi_agg_with_relabel(a_max=("a", "max"), a_min=("a", "min"))
True
>>> is_multi_agg_with_relabel()
False
"""
return all(isinstance(v, tuple) and len(v) == 2 for v in kwargs.values()) and (
len(kwargs) > 0
)
def normalize_keyword_aggregation(kwargs: dict) -> Tuple[dict, List[str], List[int]]:
"""
Normalize user-provided "named aggregation" kwargs.
Transforms from the new ``Mapping[str, NamedAgg]`` style kwargs
to the old Dict[str, List[scalar]]].
Parameters
----------
kwargs : dict
Returns
-------
aggspec : dict
The transformed kwargs.
columns : List[str]
The user-provided keys.
col_idx_order : List[int]
List of columns indices.
Examples
--------
>>> normalize_keyword_aggregation({"output": ("input", "sum")})
(defaultdict(<class 'list'>, {'input': ['sum']}), ('output',), array([0]))
"""
# Normalize the aggregation functions as Mapping[column, List[func]],
# process normally, then fixup the names.
# TODO: aggspec type: typing.Dict[str, List[AggScalar]]
# May be hitting https://github.com/python/mypy/issues/5958
# saying it doesn't have an attribute __name__
aggspec: DefaultDict = defaultdict(list)
order = []
columns, pairs = list(zip(*kwargs.items()))
for name, (column, aggfunc) in zip(columns, pairs):
aggspec[column].append(aggfunc)
order.append((column, com.get_callable_name(aggfunc) or aggfunc))
# uniquify aggfunc name if duplicated in order list
uniquified_order = _make_unique_kwarg_list(order)
# GH 25719, due to aggspec will change the order of assigned columns in aggregation
# uniquified_aggspec will store uniquified order list and will compare it with order
# based on index
aggspec_order = [
(column, com.get_callable_name(aggfunc) or aggfunc)
for column, aggfuncs in aggspec.items()
for aggfunc in aggfuncs
]
uniquified_aggspec = _make_unique_kwarg_list(aggspec_order)
# get the new index of columns by comparison
col_idx_order = Index(uniquified_aggspec).get_indexer(uniquified_order)
return aggspec, columns, col_idx_order
def _make_unique_kwarg_list(
seq: Sequence[Tuple[Any, Any]]
) -> Sequence[Tuple[Any, Any]]:
"""
Uniquify aggfunc name of the pairs in the order list
Examples:
--------
>>> kwarg_list = [('a', '<lambda>'), ('a', '<lambda>'), ('b', '<lambda>')]
>>> _make_unique_kwarg_list(kwarg_list)
[('a', '<lambda>_0'), ('a', '<lambda>_1'), ('b', '<lambda>')]
"""
return [
(pair[0], "_".join([pair[1], str(seq[:i].count(pair))]))
if seq.count(pair) > 1
else pair
for i, pair in enumerate(seq)
]
# TODO: Can't use, because mypy doesn't like us setting __name__
# error: "partial[Any]" has no attribute "__name__"
# the type is:
# typing.Sequence[Callable[..., ScalarResult]]
# -> typing.Sequence[Callable[..., ScalarResult]]:
def _managle_lambda_list(aggfuncs: Sequence[Any]) -> Sequence[Any]:
"""
Possibly mangle a list of aggfuncs.
Parameters
----------
aggfuncs : Sequence
Returns
-------
mangled: list-like
A new AggSpec sequence, where lambdas have been converted
to have unique names.
Notes
-----
If just one aggfunc is passed, the name will not be mangled.
"""
if len(aggfuncs) <= 1:
# don't mangle for .agg([lambda x: .])
return aggfuncs
i = 0
mangled_aggfuncs = []
for aggfunc in aggfuncs:
if com.get_callable_name(aggfunc) == "<lambda>":
aggfunc = partial(aggfunc)
aggfunc.__name__ = f"<lambda_{i}>"
i += 1
mangled_aggfuncs.append(aggfunc)
return mangled_aggfuncs
def maybe_mangle_lambdas(agg_spec: Any) -> Any:
"""
Make new lambdas with unique names.
Parameters
----------
agg_spec : Any
An argument to GroupBy.agg.
Non-dict-like `agg_spec` are pass through as is.
For dict-like `agg_spec` a new spec is returned
with name-mangled lambdas.
Returns
-------
mangled : Any
Same type as the input.
Examples
--------
>>> maybe_mangle_lambdas('sum')
'sum'
>>> maybe_mangle_lambdas([lambda: 1, lambda: 2]) # doctest: +SKIP
[<function __main__.<lambda_0>,
<function pandas...._make_lambda.<locals>.f(*args, **kwargs)>]
"""
is_dict = is_dict_like(agg_spec)
if not (is_dict or is_list_like(agg_spec)):
return agg_spec
mangled_aggspec = type(agg_spec)() # dict or OrderedDict
if is_dict:
for key, aggfuncs in agg_spec.items():
if is_list_like(aggfuncs) and not is_dict_like(aggfuncs):
mangled_aggfuncs = _managle_lambda_list(aggfuncs)
else:
mangled_aggfuncs = aggfuncs
mangled_aggspec[key] = mangled_aggfuncs
else:
mangled_aggspec = _managle_lambda_list(agg_spec)
return mangled_aggspec
def relabel_result(
result: FrameOrSeries,
func: Dict[str, List[Union[Callable, str]]],
columns: Iterable[Hashable],
order: Iterable[int],
) -> Dict[Hashable, Series]:
"""
Internal function to reorder result if relabelling is True for
dataframe.agg, and return the reordered result in dict.
Parameters:
----------
result: Result from aggregation
func: Dict of (column name, funcs)
columns: New columns name for relabelling
order: New order for relabelling
Examples:
---------
>>> result = DataFrame({"A": [np.nan, 2, np.nan],
... "C": [6, np.nan, np.nan], "B": [np.nan, 4, 2.5]}) # doctest: +SKIP
>>> funcs = {"A": ["max"], "C": ["max"], "B": ["mean", "min"]}
>>> columns = ("foo", "aab", "bar", "dat")
>>> order = [0, 1, 2, 3]
>>> _relabel_result(result, func, columns, order) # doctest: +SKIP
dict(A=Series([2.0, NaN, NaN, NaN], index=["foo", "aab", "bar", "dat"]),
C=Series([NaN, 6.0, NaN, NaN], index=["foo", "aab", "bar", "dat"]),
B=Series([NaN, NaN, 2.5, 4.0], index=["foo", "aab", "bar", "dat"]))
"""
reordered_indexes = [
pair[0] for pair in sorted(zip(columns, order), key=lambda t: t[1])
]
reordered_result_in_dict: Dict[Hashable, Series] = {}
idx = 0
reorder_mask = not isinstance(result, ABCSeries) and len(result.columns) > 1
for col, fun in func.items():
s = result[col].dropna()
# In the `_aggregate`, the callable names are obtained and used in `result`, and
# these names are ordered alphabetically. e.g.
# C2 C1
# <lambda> 1 NaN
# amax NaN 4.0
# max NaN 4.0
# sum 18.0 6.0
# Therefore, the order of functions for each column could be shuffled
# accordingly so need to get the callable name if it is not parsed names, and
# reorder the aggregated result for each column.
# e.g. if df.agg(c1=("C2", sum), c2=("C2", lambda x: min(x))), correct order is
# [sum, <lambda>], but in `result`, it will be [<lambda>, sum], and we need to
# reorder so that aggregated values map to their functions regarding the order.
# However there is only one column being used for aggregation, not need to
# reorder since the index is not sorted, and keep as is in `funcs`, e.g.
# A
# min 1.0
# mean 1.5
# mean 1.5
if reorder_mask:
fun = [
com.get_callable_name(f) if not isinstance(f, str) else f for f in fun
]
col_idx_order = Index(s.index).get_indexer(fun)
s = s[col_idx_order]
# assign the new user-provided "named aggregation" as index names, and reindex
# it based on the whole user-provided names.
s.index = reordered_indexes[idx : idx + len(fun)]
reordered_result_in_dict[col] = s.reindex(columns, copy=False)
idx = idx + len(fun)
return reordered_result_in_dict
def validate_func_kwargs(
kwargs: dict,
) -> Tuple[List[str], List[Union[str, Callable[..., Any]]]]:
"""
Validates types of user-provided "named aggregation" kwargs.
`TypeError` is raised if aggfunc is not `str` or callable.
Parameters
----------
kwargs : dict
Returns
-------
columns : List[str]
List of user-provied keys.
func : List[Union[str, callable[...,Any]]]
List of user-provided aggfuncs
Examples
--------
>>> validate_func_kwargs({'one': 'min', 'two': 'max'})
(['one', 'two'], ['min', 'max'])
"""
tuple_given_message = "func is expected but received {} in **kwargs."
columns = list(kwargs)
func = []
for col_func in kwargs.values():
if not (isinstance(col_func, str) or callable(col_func)):
raise TypeError(tuple_given_message.format(type(col_func).__name__))
func.append(col_func)
if not columns:
no_arg_message = "Must provide 'func' or named aggregation **kwargs."
raise TypeError(no_arg_message)
return columns, func
def transform(
obj: FrameOrSeries, func: AggFuncType, axis: Axis, *args, **kwargs
) -> FrameOrSeriesUnion:
"""
Transform a DataFrame or Series
Parameters
----------
obj : DataFrame or Series
Object to compute the transform on.
func : string, function, list, or dictionary
Function(s) to compute the transform with.
axis : {0 or 'index', 1 or 'columns'}
Axis along which the function is applied:
* 0 or 'index': apply function to each column.
* 1 or 'columns': apply function to each row.
Returns
-------
DataFrame or Series
Result of applying ``func`` along the given axis of the
Series or DataFrame.
Raises
------
ValueError
If the transform function fails or does not transform.
"""
is_series = obj.ndim == 1
if obj._get_axis_number(axis) == 1:
assert not is_series
return transform(obj.T, func, 0, *args, **kwargs).T
if is_list_like(func) and not is_dict_like(func):
func = cast(List[AggFuncTypeBase], func)
# Convert func equivalent dict
if is_series:
func = {com.get_callable_name(v) or v: v for v in func}
else:
func = {col: func for col in obj}
if is_dict_like(func):
func = cast(AggFuncTypeDict, func)
return transform_dict_like(obj, func, *args, **kwargs)
# func is either str or callable
func = cast(AggFuncTypeBase, func)
try:
result = transform_str_or_callable(obj, func, *args, **kwargs)
except Exception:
raise ValueError("Transform function failed")
# Functions that transform may return empty Series/DataFrame
# when the dtype is not appropriate
if isinstance(result, (ABCSeries, ABCDataFrame)) and result.empty:
raise ValueError("Transform function failed")
if not isinstance(result, (ABCSeries, ABCDataFrame)) or not result.index.equals(
obj.index
):
raise ValueError("Function did not transform")
return result
def transform_dict_like(
obj: FrameOrSeries,
func: AggFuncTypeDict,
*args,
**kwargs,
):
"""
Compute transform in the case of a dict-like func
"""
from pandas.core.reshape.concat import concat
if len(func) == 0:
raise ValueError("No transform functions were provided")
if obj.ndim != 1:
# Check for missing columns on a frame
cols = set(func.keys()) - set(obj.columns)
if len(cols) > 0:
cols_sorted = list(safe_sort(list(cols)))
raise SpecificationError(f"Column(s) {cols_sorted} do not exist")
# Can't use func.values(); wouldn't work for a Series
if any(is_dict_like(v) for _, v in func.items()):
# GH 15931 - deprecation of renaming keys
raise SpecificationError("nested renamer is not supported")
results: Dict[Hashable, FrameOrSeriesUnion] = {}
for name, how in func.items():
colg = obj._gotitem(name, ndim=1)
try:
results[name] = transform(colg, how, 0, *args, **kwargs)
except Exception as err:
if str(err) in {
"Function did not transform",
"No transform functions were provided",
}:
raise err
# combine results
if not results:
raise ValueError("Transform function failed")
return concat(results, axis=1)
def transform_str_or_callable(
obj: FrameOrSeries, func: AggFuncTypeBase, *args, **kwargs
) -> FrameOrSeriesUnion:
"""
Compute transform in the case of a string or callable func
"""
if isinstance(func, str):
return obj._try_aggregate_string_function(func, *args, **kwargs)
if not args and not kwargs:
f = obj._get_cython_func(func)
if f:
return getattr(obj, f)()
# Two possible ways to use a UDF - apply or call directly
try:
return obj.apply(func, args=args, **kwargs)
except Exception:
return func(obj, *args, **kwargs)
def aggregate(
obj: AggObjType,
arg: AggFuncType,
*args,
**kwargs,
):
"""
Provide an implementation for the aggregators.
Parameters
----------
obj : Pandas object to compute aggregation on.
arg : string, dict, function.
*args : args to pass on to the function.
**kwargs : kwargs to pass on to the function.
Returns
-------
tuple of result, how.
Notes
-----
how can be a string describe the required post-processing, or
None if not required.
"""
_axis = kwargs.pop("_axis", None)
if _axis is None:
_axis = getattr(obj, "axis", 0)
if isinstance(arg, str):
return obj._try_aggregate_string_function(arg, *args, **kwargs), None
elif is_dict_like(arg):
arg = cast(AggFuncTypeDict, arg)
return agg_dict_like(obj, arg, _axis), True
elif is_list_like(arg):
# we require a list, but not an 'str'
arg = cast(List[AggFuncTypeBase], arg)
return agg_list_like(obj, arg, _axis=_axis), None
else:
result = None
if callable(arg):
f = obj._get_cython_func(arg)
if f and not args and not kwargs:
return getattr(obj, f)(), None
# caller can react
return result, True
def agg_list_like(
obj: AggObjType,
arg: List[AggFuncTypeBase],
_axis: int,
) -> FrameOrSeriesUnion:
"""
Compute aggregation in the case of a list-like argument.
Parameters
----------
obj : Pandas object to compute aggregation on.
arg : list
Aggregations to compute.
_axis : int, 0 or 1
Axis to compute aggregation on.
Returns
-------
Result of aggregation.
"""
from pandas.core.reshape.concat import concat
if _axis != 0:
raise NotImplementedError("axis other than 0 is not supported")
if obj._selected_obj.ndim == 1:
selected_obj = obj._selected_obj
else:
selected_obj = obj._obj_with_exclusions
results = []
keys = []
# degenerate case
if selected_obj.ndim == 1:
for a in arg:
colg = obj._gotitem(selected_obj.name, ndim=1, subset=selected_obj)
try:
new_res = colg.aggregate(a)
except TypeError:
pass
else:
results.append(new_res)
# make sure we find a good name
name = com.get_callable_name(a) or a
keys.append(name)
# multiples
else:
for index, col in enumerate(selected_obj):
colg = obj._gotitem(col, ndim=1, subset=selected_obj.iloc[:, index])
try:
new_res = colg.aggregate(arg)
except (TypeError, DataError):
pass
except ValueError as err:
# cannot aggregate
if "Must produce aggregated value" in str(err):
# raised directly in _aggregate_named
pass
elif "no results" in str(err):
# raised directly in _aggregate_multiple_funcs
pass
else:
raise
else:
results.append(new_res)
keys.append(col)
# if we are empty
if not len(results):
raise ValueError("no results")
try:
return concat(results, keys=keys, axis=1, sort=False)
except TypeError as err:
# we are concatting non-NDFrame objects,
# e.g. a list of scalars
from pandas import Series
result = Series(results, index=keys, name=obj.name)
if is_nested_object(result):
raise ValueError(
"cannot combine transform and aggregation operations"
) from err
return result
def agg_dict_like(
obj: AggObjType,
arg: AggFuncTypeDict,
_axis: int,
) -> FrameOrSeriesUnion:
"""
Compute aggregation in the case of a dict-like argument.
Parameters
----------
obj : Pandas object to compute aggregation on.
arg : dict
label-aggregation pairs to compute.
_axis : int, 0 or 1
Axis to compute aggregation on.
Returns
-------
Result of aggregation.
"""
is_aggregator = lambda x: isinstance(x, (list, tuple, dict))
if _axis != 0: # pragma: no cover
raise ValueError("Can only pass dict with axis=0")
selected_obj = obj._selected_obj
# if we have a dict of any non-scalars
# eg. {'A' : ['mean']}, normalize all to
# be list-likes
# Cannot use arg.values() because arg may be a Series
if any(is_aggregator(x) for _, x in arg.items()):
new_arg: AggFuncTypeDict = {}
for k, v in arg.items():
if not isinstance(v, (tuple, list, dict)):
new_arg[k] = [v]
else:
new_arg[k] = v
# the keys must be in the columns
# for ndim=2, or renamers for ndim=1
# ok for now, but deprecated
# {'A': { 'ra': 'mean' }}
# {'A': { 'ra': ['mean'] }}
# {'ra': ['mean']}
# not ok
# {'ra' : { 'A' : 'mean' }}
if isinstance(v, dict):
raise SpecificationError("nested renamer is not supported")
elif isinstance(selected_obj, ABCSeries):
raise SpecificationError("nested renamer is not supported")
elif (
isinstance(selected_obj, ABCDataFrame) and k not in selected_obj.columns
):
raise KeyError(f"Column '{k}' does not exist!")
arg = new_arg
else:
# deprecation of renaming keys
# GH 15931
keys = list(arg.keys())
if isinstance(selected_obj, ABCDataFrame) and len(
selected_obj.columns.intersection(keys)
) != len(keys):
cols = list(
safe_sort(
list(set(keys) - set(selected_obj.columns.intersection(keys))),
)
)
raise SpecificationError(f"Column(s) {cols} do not exist")
from pandas.core.reshape.concat import concat
if selected_obj.ndim == 1:
# key only used for output
colg = obj._gotitem(obj._selection, ndim=1)
results = {key: colg.agg(how) for key, how in arg.items()}
else:
# key used for column selection and output
results = {key: obj._gotitem(key, ndim=1).agg(how) for key, how in arg.items()}
# set the final keys
keys = list(arg.keys())
# Avoid making two isinstance calls in all and any below
is_ndframe = [isinstance(r, ABCNDFrame) for r in results.values()]
# combine results
if all(is_ndframe):
keys_to_use = [k for k in keys if not results[k].empty]
# Have to check, if at least one DataFrame is not empty.
keys_to_use = keys_to_use if keys_to_use != [] else keys
axis = 0 if isinstance(obj, ABCSeries) else 1
result = concat({k: results[k] for k in keys_to_use}, axis=axis)
elif any(is_ndframe):
# There is a mix of NDFrames and scalars
raise ValueError(
"cannot perform both aggregation "
"and transformation operations "
"simultaneously"
)
else:
from pandas import Series
# we have a dict of scalars
# GH 36212 use name only if obj is a series
if obj.ndim == 1:
obj = cast("Series", obj)
name = obj.name
else:
name = None
result = Series(results, name=name)
return result
|
bsd-3-clause
|
hlin117/statsmodels
|
statsmodels/duration/tests/test_phreg.py
|
10
|
11984
|
import os
import numpy as np
from statsmodels.duration.hazard_regression import PHReg
from numpy.testing import (assert_allclose,
assert_equal)
import pandas as pd
# TODO: Include some corner cases: data sets with empty strata, strata
# with no events, entry times after censoring times, etc.
# All the R results
from . import survival_r_results
from . import survival_enet_r_results
"""
Tests of PHReg against R coxph.
Tests include entry times and stratification.
phreg_gentests.py generates the test data sets and puts them into the
results folder.
survival.R runs R on all the test data sets and constructs the
survival_r_results module.
"""
# Arguments passed to the PHReg fit method.
args = {"method": "bfgs", "disp": 0}
def get_results(n, p, ext, ties):
if ext is None:
coef_name = "coef_%d_%d_%s" % (n, p, ties)
se_name = "se_%d_%d_%s" % (n, p, ties)
time_name = "time_%d_%d_%s" % (n, p, ties)
hazard_name = "hazard_%d_%d_%s" % (n, p, ties)
else:
coef_name = "coef_%d_%d_%s_%s" % (n, p, ext, ties)
se_name = "se_%d_%d_%s_%s" % (n, p, ext, ties)
time_name = "time_%d_%d_%s_%s" % (n, p, ext, ties)
hazard_name = "hazard_%d_%d_%s_%s" % (n, p, ext, ties)
coef = getattr(survival_r_results, coef_name)
se = getattr(survival_r_results, se_name)
time = getattr(survival_r_results, time_name)
hazard = getattr(survival_r_results, hazard_name)
return coef, se, time, hazard
class TestPHReg(object):
# Load a data file from the results directory
def load_file(self, fname):
cur_dir = os.path.dirname(os.path.abspath(__file__))
data = np.genfromtxt(os.path.join(cur_dir, 'results', fname),
delimiter=" ")
time = data[:,0]
status = data[:,1]
entry = data[:,2]
exog = data[:,3:]
return time, status, entry, exog
# Run a single test against R output
def do1(self, fname, ties, entry_f, strata_f):
# Read the test data.
time, status, entry, exog = self.load_file(fname)
n = len(time)
vs = fname.split("_")
n = int(vs[2])
p = int(vs[3].split(".")[0])
ties1 = ties[0:3]
# Needs to match the kronecker statement in survival.R
strata = np.kron(range(5), np.ones(n // 5))
# No stratification or entry times
mod = PHReg(time, exog, status, ties=ties)
phrb = mod.fit(**args)
coef_r, se_r, time_r, hazard_r = get_results(n, p, None, ties1)
assert_allclose(phrb.params, coef_r, rtol=1e-3)
assert_allclose(phrb.bse, se_r, rtol=1e-4)
#time_h, cumhaz, surv = phrb.baseline_hazard[0]
# Entry times but no stratification
phrb = PHReg(time, exog, status, entry=entry,
ties=ties).fit(**args)
coef, se, time_r, hazard_r = get_results(n, p, "et", ties1)
assert_allclose(phrb.params, coef, rtol=1e-3)
assert_allclose(phrb.bse, se, rtol=1e-3)
# Stratification but no entry times
phrb = PHReg(time, exog, status, strata=strata,
ties=ties).fit(**args)
coef, se, time_r, hazard_r = get_results(n, p, "st", ties1)
assert_allclose(phrb.params, coef, rtol=1e-4)
assert_allclose(phrb.bse, se, rtol=1e-4)
# Stratification and entry times
phrb = PHReg(time, exog, status, entry=entry,
strata=strata, ties=ties).fit(**args)
coef, se, time_r, hazard_r = get_results(n, p, "et_st", ties1)
assert_allclose(phrb.params, coef, rtol=1e-3)
assert_allclose(phrb.bse, se, rtol=1e-4)
# Run all the tests
def test_r(self):
cur_dir = os.path.dirname(os.path.abspath(__file__))
rdir = os.path.join(cur_dir, 'results')
fnames = os.listdir(rdir)
fnames = [x for x in fnames if x.startswith("survival")
and x.endswith(".csv")]
for fname in fnames:
for ties in "breslow","efron":
for entry_f in False,True:
for strata_f in False,True:
yield (self.do1, fname, ties, entry_f,
strata_f)
def test_missing(self):
np.random.seed(34234)
time = 50 * np.random.uniform(size=200)
status = np.random.randint(0, 2, 200).astype(np.float64)
exog = np.random.normal(size=(200,4))
time[0:5] = np.nan
status[5:10] = np.nan
exog[10:15,:] = np.nan
md = PHReg(time, exog, status, missing='drop')
assert_allclose(len(md.endog), 185)
assert_allclose(len(md.status), 185)
assert_allclose(md.exog.shape, np.r_[185,4])
def test_formula(self):
np.random.seed(34234)
time = 50 * np.random.uniform(size=200)
status = np.random.randint(0, 2, 200).astype(np.float64)
exog = np.random.normal(size=(200,4))
entry = np.zeros_like(time)
entry[0:10] = time[0:10] / 2
df = pd.DataFrame({"time": time, "status": status,
"exog1": exog[:, 0], "exog2": exog[:, 1],
"exog3": exog[:, 2], "exog4": exog[:, 3],
"entry": entry})
mod1 = PHReg(time, exog, status, entry=entry)
rslt1 = mod1.fit()
fml = "time ~ 0 + exog1 + exog2 + exog3 + exog4"
mod2 = PHReg.from_formula(fml, df, status=status,
entry=entry)
rslt2 = mod2.fit()
mod3 = PHReg.from_formula(fml, df, status="status",
entry="entry")
rslt3 = mod3.fit()
assert_allclose(rslt1.params, rslt2.params)
assert_allclose(rslt1.params, rslt3.params)
assert_allclose(rslt1.bse, rslt2.bse)
assert_allclose(rslt1.bse, rslt3.bse)
def test_predict_formula(self):
n = 100
np.random.seed(34234)
time = 50 * np.random.uniform(size=n)
status = np.random.randint(0, 2, n).astype(np.float64)
exog = np.random.uniform(1, 2, size=(n, 2))
df = pd.DataFrame({"time": time, "status": status,
"exog1": exog[:, 0], "exog2": exog[:, 1]})
fml = "time ~ 0 + exog1 + np.log(exog2) + exog1*exog2"
model1 = PHReg.from_formula(fml, df, status=status)
result1 = model1.fit()
from patsy import dmatrix
dfp = dmatrix(model1.data.design_info.builder, df)
pr1 = result1.predict()
pr2 = result1.predict(exog=df)
pr3 = model1.predict(result1.params, exog=dfp) # No standard errors
pr4 = model1.predict(result1.params, cov_params=result1.cov_params(), exog=dfp)
prl = (pr1, pr2, pr3, pr4)
for i in range(4):
for j in range(i):
assert_allclose(prl[i].predicted_values, prl[j].predicted_values)
prl = (pr1, pr2, pr4)
for i in range(3):
for j in range(i):
assert_allclose(prl[i].standard_errors, prl[j].standard_errors)
def test_offset(self):
np.random.seed(34234)
time = 50 * np.random.uniform(size=200)
status = np.random.randint(0, 2, 200).astype(np.float64)
exog = np.random.normal(size=(200,4))
mod1 = PHReg(time, exog, status)
rslt1 = mod1.fit()
offset = exog[:,0] * rslt1.params[0]
exog = exog[:, 1:]
mod2 = PHReg(time, exog, status, offset=offset)
rslt2 = mod2.fit()
assert_allclose(rslt2.params, rslt1.params[1:])
def test_post_estimation(self):
# All regression tests
np.random.seed(34234)
time = 50 * np.random.uniform(size=200)
status = np.random.randint(0, 2, 200).astype(np.float64)
exog = np.random.normal(size=(200,4))
mod = PHReg(time, exog, status)
rslt = mod.fit()
mart_resid = rslt.martingale_residuals
assert_allclose(np.abs(mart_resid).sum(), 120.72475743348433)
w_avg = rslt.weighted_covariate_averages
assert_allclose(np.abs(w_avg[0]).sum(0),
np.r_[7.31008415, 9.77608674,10.89515885, 13.1106801])
bc_haz = rslt.baseline_cumulative_hazard
v = [np.mean(np.abs(x)) for x in bc_haz[0]]
w = np.r_[23.482841556421608, 0.44149255358417017,
0.68660114081275281]
assert_allclose(v, w)
score_resid = rslt.score_residuals
v = np.r_[ 0.50924792, 0.4533952, 0.4876718, 0.5441128]
w = np.abs(score_resid).mean(0)
assert_allclose(v, w)
groups = np.random.randint(0, 3, 200)
mod = PHReg(time, exog, status)
rslt = mod.fit(groups=groups)
robust_cov = rslt.cov_params()
v = [0.00513432, 0.01278423, 0.00810427, 0.00293147]
w = np.abs(robust_cov).mean(0)
assert_allclose(v, w, rtol=1e-6)
s_resid = rslt.schoenfeld_residuals
ii = np.flatnonzero(np.isfinite(s_resid).all(1))
s_resid = s_resid[ii, :]
v = np.r_[0.85154336, 0.72993748, 0.73758071, 0.78599333]
assert_allclose(np.abs(s_resid).mean(0), v)
def test_summary(self):
# smoke test
np.random.seed(34234)
time = 50 * np.random.uniform(size=200)
status = np.random.randint(0, 2, 200).astype(np.float64)
exog = np.random.normal(size=(200,4))
mod = PHReg(time, exog, status)
rslt = mod.fit()
rslt.summary()
def test_predict(self):
# All smoke tests. We should be able to convert the lhr and hr
# tests into real tests against R. There are many options to
# this function that may interact in complicated ways. Only a
# few key combinations are tested here.
np.random.seed(34234)
endog = 50 * np.random.uniform(size=200)
status = np.random.randint(0, 2, 200).astype(np.float64)
exog = np.random.normal(size=(200,4))
mod = PHReg(endog, exog, status)
rslt = mod.fit()
rslt.predict()
for pred_type in 'lhr', 'hr', 'cumhaz', 'surv':
rslt.predict(pred_type=pred_type)
rslt.predict(endog=endog[0:10], pred_type=pred_type)
rslt.predict(endog=endog[0:10], exog=exog[0:10,:],
pred_type=pred_type)
def test_get_distribution(self):
# Smoke test
np.random.seed(34234)
exog = np.random.normal(size=(200, 2))
lin_pred = exog.sum(1)
elin_pred = np.exp(-lin_pred)
time = -elin_pred * np.log(np.random.uniform(size=200))
mod = PHReg(time, exog)
rslt = mod.fit()
dist = rslt.get_distribution()
fitted_means = dist.mean()
true_means = elin_pred
fitted_var = dist.var()
fitted_sd = dist.std()
sample = dist.rvs()
def test_fit_regularized(self):
# Data set sizes
for n,p in (50,2),(100,5):
# Penalty weights
for js,s in enumerate([0,0.1]):
coef_name = "coef_%d_%d_%d" % (n, p, js)
coef = getattr(survival_enet_r_results, coef_name)
fname = "survival_data_%d_%d.csv" % (n, p)
time, status, entry, exog = self.load_file(fname)
exog -= exog.mean(0)
exog /= exog.std(0, ddof=1)
mod = PHReg(time, exog, status=status, ties='breslow')
rslt = mod.fit_regularized(alpha=s)
# The agreement isn't very high, the issue may be on
# their side. They seem to use some approximations
# that we are not using.
assert_allclose(rslt.params, coef, rtol=0.3)
# Smoke test for summary
smry = rslt.summary()
if __name__=="__main__":
import nose
nose.runmodule(argv=[__file__,'-vvs','-x','--pdb', '--pdb-failure'],
exit=False)
|
bsd-3-clause
|
justincassidy/scikit-learn
|
examples/ensemble/plot_forest_importances.py
|
241
|
1761
|
"""
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artificial classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(10):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(10), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(10), indices)
plt.xlim([-1, 10])
plt.show()
|
bsd-3-clause
|
lancezlin/ml_template_py
|
lib/python2.7/site-packages/matplotlib/backends/backend_pgf.py
|
7
|
36822
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import math
import os
import sys
import errno
import re
import shutil
import tempfile
import codecs
import atexit
import weakref
import warnings
import numpy as np
import matplotlib as mpl
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.backends.backend_mixed import MixedModeRenderer
from matplotlib.figure import Figure
from matplotlib.text import Text
from matplotlib.path import Path
from matplotlib import _png, rcParams
from matplotlib.cbook import is_string_like, is_writable_file_like
from matplotlib.compat import subprocess
from matplotlib.compat.subprocess import check_output
###############################################################################
# create a list of system fonts, all of these should work with xe/lua-latex
system_fonts = []
if sys.platform.startswith('win'):
from matplotlib import font_manager
from matplotlib.ft2font import FT2Font
for f in font_manager.win32InstalledFonts():
try:
system_fonts.append(FT2Font(str(f)).family_name)
except:
pass # unknown error, skip this font
else:
# assuming fontconfig is installed and the command 'fc-list' exists
try:
# list scalable (non-bitmap) fonts
fc_list = check_output(['fc-list', ':outline,scalable', 'family'])
fc_list = fc_list.decode('utf8')
system_fonts = [f.split(',')[0] for f in fc_list.splitlines()]
system_fonts = list(set(system_fonts))
except:
warnings.warn('error getting fonts from fc-list', UserWarning)
def get_texcommand():
"""Get chosen TeX system from rc."""
texsystem_options = ["xelatex", "lualatex", "pdflatex"]
texsystem = rcParams.get("pgf.texsystem", "xelatex")
return texsystem if texsystem in texsystem_options else "xelatex"
def get_fontspec():
"""Build fontspec preamble from rc."""
latex_fontspec = []
texcommand = get_texcommand()
if texcommand != "pdflatex":
latex_fontspec.append("\\usepackage{fontspec}")
if texcommand != "pdflatex" and rcParams.get("pgf.rcfonts", True):
# try to find fonts from rc parameters
families = ["serif", "sans-serif", "monospace"]
fontspecs = [r"\setmainfont{%s}", r"\setsansfont{%s}",
r"\setmonofont{%s}"]
for family, fontspec in zip(families, fontspecs):
matches = [f for f in rcParams["font." + family]
if f in system_fonts]
if matches:
latex_fontspec.append(fontspec % matches[0])
else:
pass # no fonts found, fallback to LaTeX defaule
return "\n".join(latex_fontspec)
def get_preamble():
"""Get LaTeX preamble from rc."""
latex_preamble = rcParams.get("pgf.preamble", "")
if type(latex_preamble) == list:
latex_preamble = "\n".join(latex_preamble)
return latex_preamble
###############################################################################
# This almost made me cry!!!
# In the end, it's better to use only one unit for all coordinates, since the
# arithmetic in latex seems to produce inaccurate conversions.
latex_pt_to_in = 1. / 72.27
latex_in_to_pt = 1. / latex_pt_to_in
mpl_pt_to_in = 1. / 72.
mpl_in_to_pt = 1. / mpl_pt_to_in
###############################################################################
# helper functions
NO_ESCAPE = r"(?<!\\)(?:\\\\)*"
re_mathsep = re.compile(NO_ESCAPE + r"\$")
re_escapetext = re.compile(NO_ESCAPE + "([_^$%])")
repl_escapetext = lambda m: "\\" + m.group(1)
re_mathdefault = re.compile(NO_ESCAPE + r"(\\mathdefault)")
repl_mathdefault = lambda m: m.group(0)[:-len(m.group(1))]
def common_texification(text):
"""
Do some necessary and/or useful substitutions for texts to be included in
LaTeX documents.
"""
# Sometimes, matplotlib adds the unknown command \mathdefault.
# Not using \mathnormal instead since this looks odd for the latex cm font.
text = re_mathdefault.sub(repl_mathdefault, text)
# split text into normaltext and inline math parts
parts = re_mathsep.split(text)
for i, s in enumerate(parts):
if not i % 2:
# textmode replacements
s = re_escapetext.sub(repl_escapetext, s)
else:
# mathmode replacements
s = r"\(\displaystyle %s\)" % s
parts[i] = s
return "".join(parts)
def writeln(fh, line):
# every line of a file included with \input must be terminated with %
# if not, latex will create additional vertical spaces for some reason
fh.write(line)
fh.write("%\n")
def _font_properties_str(prop):
# translate font properties to latex commands, return as string
commands = []
families = {"serif": r"\rmfamily", "sans": r"\sffamily",
"sans-serif": r"\sffamily", "monospace": r"\ttfamily"}
family = prop.get_family()[0]
if family in families:
commands.append(families[family])
elif family in system_fonts and get_texcommand() != "pdflatex":
commands.append(r"\setmainfont{%s}\rmfamily" % family)
else:
pass # print warning?
size = prop.get_size_in_points()
commands.append(r"\fontsize{%f}{%f}" % (size, size * 1.2))
styles = {"normal": r"", "italic": r"\itshape", "oblique": r"\slshape"}
commands.append(styles[prop.get_style()])
boldstyles = ["semibold", "demibold", "demi", "bold", "heavy",
"extra bold", "black"]
if prop.get_weight() in boldstyles:
commands.append(r"\bfseries")
commands.append(r"\selectfont")
return "".join(commands)
def make_pdf_to_png_converter():
"""
Returns a function that converts a pdf file to a png file.
"""
tools_available = []
# check for pdftocairo
try:
check_output(["pdftocairo", "-v"], stderr=subprocess.STDOUT)
tools_available.append("pdftocairo")
except:
pass
# check for ghostscript
gs, ver = mpl.checkdep_ghostscript()
if gs:
tools_available.append("gs")
# pick converter
if "pdftocairo" in tools_available:
def cairo_convert(pdffile, pngfile, dpi):
cmd = ["pdftocairo", "-singlefile", "-png",
"-r %d" % dpi, pdffile, os.path.splitext(pngfile)[0]]
# for some reason this doesn't work without shell
check_output(" ".join(cmd), shell=True, stderr=subprocess.STDOUT)
return cairo_convert
elif "gs" in tools_available:
def gs_convert(pdffile, pngfile, dpi):
cmd = [gs, '-dQUIET', '-dSAFER', '-dBATCH', '-dNOPAUSE', '-dNOPROMPT',
'-sDEVICE=png16m', '-dUseCIEColor', '-dTextAlphaBits=4',
'-dGraphicsAlphaBits=4', '-dDOINTERPOLATE', '-sOutputFile=%s' % pngfile,
'-r%d' % dpi, pdffile]
check_output(cmd, stderr=subprocess.STDOUT)
return gs_convert
else:
raise RuntimeError("No suitable pdf to png renderer found.")
class LatexError(Exception):
def __init__(self, message, latex_output=""):
Exception.__init__(self, message)
self.latex_output = latex_output
class LatexManagerFactory(object):
previous_instance = None
@staticmethod
def get_latex_manager():
texcommand = get_texcommand()
latex_header = LatexManager._build_latex_header()
prev = LatexManagerFactory.previous_instance
# check if the previous instance of LatexManager can be reused
if prev and prev.latex_header == latex_header and prev.texcommand == texcommand:
if rcParams.get("pgf.debug", False):
print("reusing LatexManager")
return prev
else:
if rcParams.get("pgf.debug", False):
print("creating LatexManager")
new_inst = LatexManager()
LatexManagerFactory.previous_instance = new_inst
return new_inst
class WeakSet(object):
# TODO: Poor man's weakref.WeakSet.
# Remove this once python 2.6 support is dropped from matplotlib.
def __init__(self):
self.weak_key_dict = weakref.WeakKeyDictionary()
def add(self, item):
self.weak_key_dict[item] = None
def discard(self, item):
if item in self.weak_key_dict:
del self.weak_key_dict[item]
def __iter__(self):
return six.iterkeys(self.weak_key_dict)
class LatexManager(object):
"""
The LatexManager opens an instance of the LaTeX application for
determining the metrics of text elements. The LaTeX environment can be
modified by setting fonts and/or a custem preamble in the rc parameters.
"""
_unclean_instances = WeakSet()
@staticmethod
def _build_latex_header():
latex_preamble = get_preamble()
latex_fontspec = get_fontspec()
# Create LaTeX header with some content, else LaTeX will load some
# math fonts later when we don't expect the additional output on stdout.
# TODO: is this sufficient?
latex_header = [r"\documentclass{minimal}",
latex_preamble,
latex_fontspec,
r"\begin{document}",
r"text $math \mu$", # force latex to load fonts now
r"\typeout{pgf_backend_query_start}"]
return "\n".join(latex_header)
@staticmethod
def _cleanup_remaining_instances():
unclean_instances = list(LatexManager._unclean_instances)
for latex_manager in unclean_instances:
latex_manager._cleanup()
def _stdin_writeln(self, s):
self.latex_stdin_utf8.write(s)
self.latex_stdin_utf8.write("\n")
self.latex_stdin_utf8.flush()
def _expect(self, s):
exp = s.encode("utf8")
buf = bytearray()
while True:
b = self.latex.stdout.read(1)
buf += b
if buf[-len(exp):] == exp:
break
if not len(b):
raise LatexError("LaTeX process halted", buf.decode("utf8"))
return buf.decode("utf8")
def _expect_prompt(self):
return self._expect("\n*")
def __init__(self):
# store references for __del__
self._os_path = os.path
self._shutil = shutil
self._debug = rcParams.get("pgf.debug", False)
# create a tmp directory for running latex, remember to cleanup
self.tmpdir = tempfile.mkdtemp(prefix="mpl_pgf_lm_")
LatexManager._unclean_instances.add(self)
# test the LaTeX setup to ensure a clean startup of the subprocess
self.texcommand = get_texcommand()
self.latex_header = LatexManager._build_latex_header()
latex_end = "\n\\makeatletter\n\\@@end\n"
try:
latex = subprocess.Popen([self.texcommand, "-halt-on-error"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
cwd=self.tmpdir)
except OSError as e:
if e.errno == errno.ENOENT:
raise RuntimeError("Latex command not found. "
"Install '%s' or change pgf.texsystem to the desired command."
% self.texcommand
)
else:
raise RuntimeError("Error starting process '%s'" % self.texcommand)
test_input = self.latex_header + latex_end
stdout, stderr = latex.communicate(test_input.encode("utf-8"))
if latex.returncode != 0:
raise LatexError("LaTeX returned an error, probably missing font or error in preamble:\n%s" % stdout)
# open LaTeX process for real work
latex = subprocess.Popen([self.texcommand, "-halt-on-error"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
cwd=self.tmpdir)
self.latex = latex
self.latex_stdin_utf8 = codecs.getwriter("utf8")(self.latex.stdin)
# write header with 'pgf_backend_query_start' token
self._stdin_writeln(self._build_latex_header())
# read all lines until our 'pgf_backend_query_start' token appears
self._expect("*pgf_backend_query_start")
self._expect_prompt()
# cache for strings already processed
self.str_cache = {}
def _cleanup(self):
if not self._os_path.isdir(self.tmpdir):
return
try:
self.latex.communicate()
self.latex_stdin_utf8.close()
self.latex.stdout.close()
except:
pass
try:
self._shutil.rmtree(self.tmpdir)
LatexManager._unclean_instances.discard(self)
except:
sys.stderr.write("error deleting tmp directory %s\n" % self.tmpdir)
def __del__(self):
if self._debug:
print("deleting LatexManager")
self._cleanup()
def get_width_height_descent(self, text, prop):
"""
Get the width, total height and descent for a text typesetted by the
current LaTeX environment.
"""
# apply font properties and define textbox
prop_cmds = _font_properties_str(prop)
textbox = "\\sbox0{%s %s}" % (prop_cmds, text)
# check cache
if textbox in self.str_cache:
return self.str_cache[textbox]
# send textbox to LaTeX and wait for prompt
self._stdin_writeln(textbox)
try:
self._expect_prompt()
except LatexError as e:
msg = "Error processing '%s'\nLaTeX Output:\n%s"
raise ValueError(msg % (text, e.latex_output))
# typeout width, height and text offset of the last textbox
self._stdin_writeln(r"\typeout{\the\wd0,\the\ht0,\the\dp0}")
# read answer from latex and advance to the next prompt
try:
answer = self._expect_prompt()
except LatexError as e:
msg = "Error processing '%s'\nLaTeX Output:\n%s"
raise ValueError(msg % (text, e.latex_output))
# parse metrics from the answer string
try:
width, height, offset = answer.splitlines()[0].split(",")
except:
msg = "Error processing '%s'\nLaTeX Output:\n%s" % (text, answer)
raise ValueError(msg)
w, h, o = float(width[:-2]), float(height[:-2]), float(offset[:-2])
# the height returned from LaTeX goes from base to top.
# the height matplotlib expects goes from bottom to top.
self.str_cache[textbox] = (w, h + o, o)
return w, h + o, o
class RendererPgf(RendererBase):
def __init__(self, figure, fh, dummy=False):
"""
Creates a new PGF renderer that translates any drawing instruction
into text commands to be interpreted in a latex pgfpicture environment.
Attributes:
* figure: Matplotlib figure to initialize height, width and dpi from.
* fh: File handle for the output of the drawing commands.
"""
RendererBase.__init__(self)
self.dpi = figure.dpi
self.fh = fh
self.figure = figure
self.image_counter = 0
# get LatexManager instance
self.latexManager = LatexManagerFactory.get_latex_manager()
if dummy:
# dummy==True deactivate all methods
nop = lambda *args, **kwargs: None
for m in RendererPgf.__dict__.keys():
if m.startswith("draw_"):
self.__dict__[m] = nop
else:
# if fh does not belong to a filename, deactivate draw_image
if not hasattr(fh, 'name') or not os.path.exists(fh.name):
warnings.warn("streamed pgf-code does not support raster "
"graphics, consider using the pgf-to-pdf option",
UserWarning)
self.__dict__["draw_image"] = lambda *args, **kwargs: None
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
writeln(self.fh, r"\begin{pgfscope}")
# convert from display units to in
f = 1. / self.dpi
# set style and clip
self._print_pgf_clip(gc)
self._print_pgf_path_styles(gc, rgbFace)
# build marker definition
bl, tr = marker_path.get_extents(marker_trans).get_points()
coords = bl[0] * f, bl[1] * f, tr[0] * f, tr[1] * f
writeln(self.fh, r"\pgfsys@defobject{currentmarker}{\pgfqpoint{%fin}{%fin}}{\pgfqpoint{%fin}{%fin}}{" % coords)
self._print_pgf_path(None, marker_path, marker_trans)
self._pgf_path_draw(stroke=gc.get_linewidth() != 0.0,
fill=rgbFace is not None)
writeln(self.fh, r"}")
# draw marker for each vertex
for point, code in path.iter_segments(trans, simplify=False):
x, y = point[0] * f, point[1] * f
writeln(self.fh, r"\begin{pgfscope}")
writeln(self.fh, r"\pgfsys@transformshift{%fin}{%fin}" % (x, y))
writeln(self.fh, r"\pgfsys@useobject{currentmarker}{}")
writeln(self.fh, r"\end{pgfscope}")
writeln(self.fh, r"\end{pgfscope}")
def draw_path(self, gc, path, transform, rgbFace=None):
writeln(self.fh, r"\begin{pgfscope}")
# draw the path
self._print_pgf_clip(gc)
self._print_pgf_path_styles(gc, rgbFace)
self._print_pgf_path(gc, path, transform, rgbFace)
self._pgf_path_draw(stroke=gc.get_linewidth() != 0.0,
fill=rgbFace is not None)
writeln(self.fh, r"\end{pgfscope}")
# if present, draw pattern on top
if gc.get_hatch():
writeln(self.fh, r"\begin{pgfscope}")
self._print_pgf_path_styles(gc, rgbFace)
# combine clip and path for clipping
self._print_pgf_clip(gc)
self._print_pgf_path(gc, path, transform, rgbFace)
writeln(self.fh, r"\pgfusepath{clip}")
# build pattern definition
writeln(self.fh, r"\pgfsys@defobject{currentpattern}{\pgfqpoint{0in}{0in}}{\pgfqpoint{1in}{1in}}{")
writeln(self.fh, r"\begin{pgfscope}")
writeln(self.fh, r"\pgfpathrectangle{\pgfqpoint{0in}{0in}}{\pgfqpoint{1in}{1in}}")
writeln(self.fh, r"\pgfusepath{clip}")
scale = mpl.transforms.Affine2D().scale(self.dpi)
self._print_pgf_path(None, gc.get_hatch_path(), scale)
self._pgf_path_draw(stroke=True)
writeln(self.fh, r"\end{pgfscope}")
writeln(self.fh, r"}")
# repeat pattern, filling the bounding rect of the path
f = 1. / self.dpi
(xmin, ymin), (xmax, ymax) = path.get_extents(transform).get_points()
xmin, xmax = f * xmin, f * xmax
ymin, ymax = f * ymin, f * ymax
repx, repy = int(math.ceil(xmax-xmin)), int(math.ceil(ymax-ymin))
writeln(self.fh, r"\pgfsys@transformshift{%fin}{%fin}" % (xmin, ymin))
for iy in range(repy):
for ix in range(repx):
writeln(self.fh, r"\pgfsys@useobject{currentpattern}{}")
writeln(self.fh, r"\pgfsys@transformshift{1in}{0in}")
writeln(self.fh, r"\pgfsys@transformshift{-%din}{0in}" % repx)
writeln(self.fh, r"\pgfsys@transformshift{0in}{1in}")
writeln(self.fh, r"\end{pgfscope}")
def _print_pgf_clip(self, gc):
f = 1. / self.dpi
# check for clip box
bbox = gc.get_clip_rectangle()
if bbox:
p1, p2 = bbox.get_points()
w, h = p2 - p1
coords = p1[0] * f, p1[1] * f, w * f, h * f
writeln(self.fh, r"\pgfpathrectangle{\pgfqpoint{%fin}{%fin}}{\pgfqpoint{%fin}{%fin}} " % coords)
writeln(self.fh, r"\pgfusepath{clip}")
# check for clip path
clippath, clippath_trans = gc.get_clip_path()
if clippath is not None:
self._print_pgf_path(gc, clippath, clippath_trans)
writeln(self.fh, r"\pgfusepath{clip}")
def _print_pgf_path_styles(self, gc, rgbFace):
# cap style
capstyles = {"butt": r"\pgfsetbuttcap",
"round": r"\pgfsetroundcap",
"projecting": r"\pgfsetrectcap"}
writeln(self.fh, capstyles[gc.get_capstyle()])
# join style
joinstyles = {"miter": r"\pgfsetmiterjoin",
"round": r"\pgfsetroundjoin",
"bevel": r"\pgfsetbeveljoin"}
writeln(self.fh, joinstyles[gc.get_joinstyle()])
# filling
has_fill = rgbFace is not None
if gc.get_forced_alpha():
fillopacity = strokeopacity = gc.get_alpha()
else:
strokeopacity = gc.get_rgb()[3]
fillopacity = rgbFace[3] if has_fill and len(rgbFace) > 3 else 1.0
if has_fill:
writeln(self.fh, r"\definecolor{currentfill}{rgb}{%f,%f,%f}" % tuple(rgbFace[:3]))
writeln(self.fh, r"\pgfsetfillcolor{currentfill}")
if has_fill and fillopacity != 1.0:
writeln(self.fh, r"\pgfsetfillopacity{%f}" % fillopacity)
# linewidth and color
lw = gc.get_linewidth() * mpl_pt_to_in * latex_in_to_pt
stroke_rgba = gc.get_rgb()
writeln(self.fh, r"\pgfsetlinewidth{%fpt}" % lw)
writeln(self.fh, r"\definecolor{currentstroke}{rgb}{%f,%f,%f}" % stroke_rgba[:3])
writeln(self.fh, r"\pgfsetstrokecolor{currentstroke}")
if strokeopacity != 1.0:
writeln(self.fh, r"\pgfsetstrokeopacity{%f}" % strokeopacity)
# line style
dash_offset, dash_list = gc.get_dashes()
if dash_list is None:
writeln(self.fh, r"\pgfsetdash{}{0pt}")
else:
dash_str = r"\pgfsetdash{"
for dash in dash_list:
dash_str += r"{%fpt}" % dash
dash_str += r"}{%fpt}" % dash_offset
writeln(self.fh, dash_str)
def _print_pgf_path(self, gc, path, transform, rgbFace=None):
f = 1. / self.dpi
# check for clip box / ignore clip for filled paths
bbox = gc.get_clip_rectangle() if gc else None
if bbox and (rgbFace is None):
p1, p2 = bbox.get_points()
clip = (p1[0], p1[1], p2[0], p2[1])
else:
clip = None
# build path
for points, code in path.iter_segments(transform, clip=clip):
if code == Path.MOVETO:
x, y = tuple(points)
writeln(self.fh, r"\pgfpathmoveto{\pgfqpoint{%fin}{%fin}}" %
(f * x, f * y))
elif code == Path.CLOSEPOLY:
writeln(self.fh, r"\pgfpathclose")
elif code == Path.LINETO:
x, y = tuple(points)
writeln(self.fh, r"\pgfpathlineto{\pgfqpoint{%fin}{%fin}}" %
(f * x, f * y))
elif code == Path.CURVE3:
cx, cy, px, py = tuple(points)
coords = cx * f, cy * f, px * f, py * f
writeln(self.fh, r"\pgfpathquadraticcurveto{\pgfqpoint{%fin}{%fin}}{\pgfqpoint{%fin}{%fin}}" % coords)
elif code == Path.CURVE4:
c1x, c1y, c2x, c2y, px, py = tuple(points)
coords = c1x * f, c1y * f, c2x * f, c2y * f, px * f, py * f
writeln(self.fh, r"\pgfpathcurveto{\pgfqpoint{%fin}{%fin}}{\pgfqpoint{%fin}{%fin}}{\pgfqpoint{%fin}{%fin}}" % coords)
def _pgf_path_draw(self, stroke=True, fill=False):
actions = []
if stroke:
actions.append("stroke")
if fill:
actions.append("fill")
writeln(self.fh, r"\pgfusepath{%s}" % ",".join(actions))
def draw_image(self, gc, x, y, im):
# TODO: Almost no documentation for the behavior of this function.
# Something missing?
# save the images to png files
path = os.path.dirname(self.fh.name)
fname = os.path.splitext(os.path.basename(self.fh.name))[0]
fname_img = "%s-img%d.png" % (fname, self.image_counter)
self.image_counter += 1
_png.write_png(np.array(im)[::-1], os.path.join(path, fname_img))
# reference the image in the pgf picture
writeln(self.fh, r"\begin{pgfscope}")
self._print_pgf_clip(gc)
h, w = im.get_size_out()
f = 1. / self.dpi # from display coords to inch
writeln(self.fh, r"\pgftext[at=\pgfqpoint{%fin}{%fin},left,bottom]{\pgfimage[interpolate=true,width=%fin,height=%fin]{%s}}" % (x * f, y * f, w * f, h * f, fname_img))
writeln(self.fh, r"\end{pgfscope}")
def draw_tex(self, gc, x, y, s, prop, angle, ismath="TeX!", mtext=None):
self.draw_text(gc, x, y, s, prop, angle, ismath, mtext)
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
# prepare string for tex
s = common_texification(s)
prop_cmds = _font_properties_str(prop)
s = r"%s %s" % (prop_cmds, s)
writeln(self.fh, r"\begin{pgfscope}")
alpha = gc.get_alpha()
if alpha != 1.0:
writeln(self.fh, r"\pgfsetfillopacity{%f}" % alpha)
writeln(self.fh, r"\pgfsetstrokeopacity{%f}" % alpha)
rgb = tuple(gc.get_rgb())[:3]
if rgb != (0, 0, 0):
writeln(self.fh, r"\definecolor{textcolor}{rgb}{%f,%f,%f}" % rgb)
writeln(self.fh, r"\pgfsetstrokecolor{textcolor}")
writeln(self.fh, r"\pgfsetfillcolor{textcolor}")
s = r"\color{textcolor}" + s
f = 1.0 / self.figure.dpi
text_args = []
if mtext and (angle == 0 or mtext.get_rotation_mode() == "anchor"):
# if text anchoring can be supported, get the original coordinates
# and add alignment information
x, y = mtext.get_transform().transform_point(mtext.get_position())
text_args.append("x=%fin" % (x * f))
text_args.append("y=%fin" % (y * f))
halign = {"left": "left", "right": "right", "center": ""}
valign = {"top": "top", "bottom": "bottom",
"baseline": "base", "center": ""}
text_args.append(halign[mtext.get_ha()])
text_args.append(valign[mtext.get_va()])
else:
# if not, use the text layout provided by matplotlib
text_args.append("x=%fin" % (x * f))
text_args.append("y=%fin" % (y * f))
text_args.append("left")
text_args.append("base")
if angle != 0:
text_args.append("rotate=%f" % angle)
writeln(self.fh, r"\pgftext[%s]{%s}" % (",".join(text_args), s))
writeln(self.fh, r"\end{pgfscope}")
def get_text_width_height_descent(self, s, prop, ismath):
# check if the math is supposed to be displaystyled
s = common_texification(s)
# get text metrics in units of latex pt, convert to display units
w, h, d = self.latexManager.get_width_height_descent(s, prop)
# TODO: this should be latex_pt_to_in instead of mpl_pt_to_in
# but having a little bit more space around the text looks better,
# plus the bounding box reported by LaTeX is VERY narrow
f = mpl_pt_to_in * self.dpi
return w * f, h * f, d * f
def flipy(self):
return False
def get_canvas_width_height(self):
return self.figure.get_figwidth(), self.figure.get_figheight()
def points_to_pixels(self, points):
return points * mpl_pt_to_in * self.dpi
def new_gc(self):
return GraphicsContextPgf()
class GraphicsContextPgf(GraphicsContextBase):
pass
########################################################################
def draw_if_interactive():
pass
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# if a main-level app must be created, this is the usual place to
# do it -- see backend_wx, backend_wxagg and backend_tkagg for
# examples. Not all GUIs require explicit instantiation of a
# main-level app (egg backend_gtk, backend_gtkagg) for pylab
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
canvas = FigureCanvasPgf(figure)
manager = FigureManagerPgf(canvas, num)
return manager
class TmpDirCleaner(object):
remaining_tmpdirs = set()
@staticmethod
def add(tmpdir):
TmpDirCleaner.remaining_tmpdirs.add(tmpdir)
@staticmethod
def cleanup_remaining_tmpdirs():
for tmpdir in TmpDirCleaner.remaining_tmpdirs:
try:
shutil.rmtree(tmpdir)
except:
sys.stderr.write("error deleting tmp directory %s\n" % tmpdir)
class FigureCanvasPgf(FigureCanvasBase):
filetypes = {"pgf": "LaTeX PGF picture",
"pdf": "LaTeX compiled PGF picture",
"png": "Portable Network Graphics", }
def get_default_filetype(self):
return 'pdf'
def _print_pgf_to_fh(self, fh, *args, **kwargs):
if kwargs.get("dryrun", False):
renderer = RendererPgf(self.figure, None, dummy=True)
self.figure.draw(renderer)
return
header_text = """%% Creator: Matplotlib, PGF backend
%%
%% To include the figure in your LaTeX document, write
%% \\input{<filename>.pgf}
%%
%% Make sure the required packages are loaded in your preamble
%% \\usepackage{pgf}
%%
%% Figures using additional raster images can only be included by \input if
%% they are in the same directory as the main LaTeX file. For loading figures
%% from other directories you can use the `import` package
%% \\usepackage{import}
%% and then include the figures with
%% \\import{<path to file>}{<filename>.pgf}
%%
"""
# append the preamble used by the backend as a comment for debugging
header_info_preamble = ["%% Matplotlib used the following preamble"]
for line in get_preamble().splitlines():
header_info_preamble.append("%% " + line)
for line in get_fontspec().splitlines():
header_info_preamble.append("%% " + line)
header_info_preamble.append("%%")
header_info_preamble = "\n".join(header_info_preamble)
# get figure size in inch
w, h = self.figure.get_figwidth(), self.figure.get_figheight()
dpi = self.figure.get_dpi()
# create pgfpicture environment and write the pgf code
fh.write(header_text)
fh.write(header_info_preamble)
fh.write("\n")
writeln(fh, r"\begingroup")
writeln(fh, r"\makeatletter")
writeln(fh, r"\begin{pgfpicture}")
writeln(fh, r"\pgfpathrectangle{\pgfpointorigin}{\pgfqpoint{%fin}{%fin}}" % (w, h))
writeln(fh, r"\pgfusepath{use as bounding box, clip}")
_bbox_inches_restore = kwargs.pop("bbox_inches_restore", None)
renderer = MixedModeRenderer(self.figure, w, h, dpi,
RendererPgf(self.figure, fh),
bbox_inches_restore=_bbox_inches_restore)
self.figure.draw(renderer)
# end the pgfpicture environment
writeln(fh, r"\end{pgfpicture}")
writeln(fh, r"\makeatother")
writeln(fh, r"\endgroup")
def print_pgf(self, fname_or_fh, *args, **kwargs):
"""
Output pgf commands for drawing the figure so it can be included and
rendered in latex documents.
"""
if kwargs.get("dryrun", False):
self._print_pgf_to_fh(None, *args, **kwargs)
return
# figure out where the pgf is to be written to
if is_string_like(fname_or_fh):
with codecs.open(fname_or_fh, "w", encoding="utf-8") as fh:
self._print_pgf_to_fh(fh, *args, **kwargs)
elif is_writable_file_like(fname_or_fh):
fh = codecs.getwriter("utf-8")(fname_or_fh)
self._print_pgf_to_fh(fh, *args, **kwargs)
else:
raise ValueError("filename must be a path")
def _print_pdf_to_fh(self, fh, *args, **kwargs):
w, h = self.figure.get_figwidth(), self.figure.get_figheight()
try:
# create temporary directory for compiling the figure
tmpdir = tempfile.mkdtemp(prefix="mpl_pgf_")
fname_pgf = os.path.join(tmpdir, "figure.pgf")
fname_tex = os.path.join(tmpdir, "figure.tex")
fname_pdf = os.path.join(tmpdir, "figure.pdf")
# print figure to pgf and compile it with latex
self.print_pgf(fname_pgf, *args, **kwargs)
latex_preamble = get_preamble()
latex_fontspec = get_fontspec()
latexcode = """
\\documentclass[12pt]{minimal}
\\usepackage[paperwidth=%fin, paperheight=%fin, margin=0in]{geometry}
%s
%s
\\usepackage{pgf}
\\begin{document}
\\centering
\\input{figure.pgf}
\\end{document}""" % (w, h, latex_preamble, latex_fontspec)
with codecs.open(fname_tex, "w", "utf-8") as fh_tex:
fh_tex.write(latexcode)
texcommand = get_texcommand()
cmdargs = [texcommand, "-interaction=nonstopmode",
"-halt-on-error", "figure.tex"]
try:
check_output(cmdargs, stderr=subprocess.STDOUT, cwd=tmpdir)
except subprocess.CalledProcessError as e:
raise RuntimeError("%s was not able to process your file.\n\nFull log:\n%s" % (texcommand, e.output))
# copy file contents to target
with open(fname_pdf, "rb") as fh_src:
shutil.copyfileobj(fh_src, fh)
finally:
try:
shutil.rmtree(tmpdir)
except:
TmpDirCleaner.add(tmpdir)
def print_pdf(self, fname_or_fh, *args, **kwargs):
"""
Use LaTeX to compile a Pgf generated figure to PDF.
"""
if kwargs.get("dryrun", False):
self._print_pgf_to_fh(None, *args, **kwargs)
return
# figure out where the pdf is to be written to
if is_string_like(fname_or_fh):
with open(fname_or_fh, "wb") as fh:
self._print_pdf_to_fh(fh, *args, **kwargs)
elif is_writable_file_like(fname_or_fh):
self._print_pdf_to_fh(fname_or_fh, *args, **kwargs)
else:
raise ValueError("filename must be a path or a file-like object")
def _print_png_to_fh(self, fh, *args, **kwargs):
converter = make_pdf_to_png_converter()
try:
# create temporary directory for pdf creation and png conversion
tmpdir = tempfile.mkdtemp(prefix="mpl_pgf_")
fname_pdf = os.path.join(tmpdir, "figure.pdf")
fname_png = os.path.join(tmpdir, "figure.png")
# create pdf and try to convert it to png
self.print_pdf(fname_pdf, *args, **kwargs)
converter(fname_pdf, fname_png, dpi=self.figure.dpi)
# copy file contents to target
with open(fname_png, "rb") as fh_src:
shutil.copyfileobj(fh_src, fh)
finally:
try:
shutil.rmtree(tmpdir)
except:
TmpDirCleaner.add(tmpdir)
def print_png(self, fname_or_fh, *args, **kwargs):
"""
Use LaTeX to compile a pgf figure to pdf and convert it to png.
"""
if kwargs.get("dryrun", False):
self._print_pgf_to_fh(None, *args, **kwargs)
return
if is_string_like(fname_or_fh):
with open(fname_or_fh, "wb") as fh:
self._print_png_to_fh(fh, *args, **kwargs)
elif is_writable_file_like(fname_or_fh):
self._print_png_to_fh(fname_or_fh, *args, **kwargs)
else:
raise ValueError("filename must be a path or a file-like object")
def get_renderer(self):
return RendererPgf(self.figure, None, dummy=True)
class FigureManagerPgf(FigureManagerBase):
def __init__(self, *args):
FigureManagerBase.__init__(self, *args)
FigureCanvas = FigureCanvasPgf
FigureManager = FigureManagerPgf
def _cleanup_all():
LatexManager._cleanup_remaining_instances()
TmpDirCleaner.cleanup_remaining_tmpdirs()
atexit.register(_cleanup_all)
|
mit
|
Mistobaan/tensorflow
|
tensorflow/contrib/metrics/python/ops/metric_ops_test.py
|
7
|
266607
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for metric_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
NAN = float('nan')
metrics = metrics_lib
def _enqueue_vector(sess, queue, values, shape=None):
if not shape:
shape = (1, len(values))
dtype = queue.dtypes[0]
sess.run(
queue.enqueue(constant_op.constant(
values, dtype=dtype, shape=shape)))
def _binary_2d_label_to_sparse_value(labels):
"""Convert dense 2D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensorValue` whose values are indices along the last dimension of
`labels`.
"""
indices = []
values = []
batch = 0
for row in labels:
label = 0
xi = 0
for x in row:
if x == 1:
indices.append([batch, xi])
values.append(label)
xi += 1
else:
assert x == 0
label += 1
batch += 1
shape = [len(labels), len(labels[0])]
return sparse_tensor.SparseTensorValue(
np.array(indices, np.int64),
np.array(values, np.int64), np.array(shape, np.int64))
def _binary_2d_label_to_sparse(labels):
"""Convert dense 2D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensor` whose values are indices along the last dimension of
`labels`.
"""
return sparse_tensor.SparseTensor.from_value(
_binary_2d_label_to_sparse_value(labels))
def _binary_3d_label_to_sparse_value(labels):
"""Convert dense 3D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensorValue` whose values are indices along the last dimension of
`labels`.
"""
indices = []
values = []
for d0, labels_d0 in enumerate(labels):
for d1, labels_d1 in enumerate(labels_d0):
d2 = 0
for class_id, label in enumerate(labels_d1):
if label == 1:
values.append(class_id)
indices.append([d0, d1, d2])
d2 += 1
else:
assert label == 0
shape = [len(labels), len(labels[0]), len(labels[0][0])]
return sparse_tensor.SparseTensorValue(
np.array(indices, np.int64),
np.array(values, np.int64), np.array(shape, np.int64))
def _binary_3d_label_to_sparse(labels):
"""Convert dense 3D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensor` whose values are indices along the last dimension of
`labels`.
"""
return sparse_tensor.SparseTensor.from_value(
_binary_3d_label_to_sparse_value(labels))
def _assert_nan(test_case, actual):
test_case.assertTrue(math.isnan(actual), 'Expected NAN, got %s.' % actual)
def _assert_metric_variables(test_case, expected):
test_case.assertEquals(
set(expected), set(v.name for v in variables.local_variables()))
test_case.assertEquals(
set(expected),
set(v.name for v in ops.get_collection(ops.GraphKeys.METRIC_VARIABLES)))
class StreamingMeanTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean(array_ops.ones([4, 3]))
_assert_metric_variables(self, ('mean/count:0', 'mean/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean(
array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean(
array_ops.ones([4, 3]), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testBasic(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean(values)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAlmostEqual(1.65, sess.run(mean), 5)
def testUpdateOpsReturnsCurrentValue(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean(values)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op), 5)
self.assertAlmostEqual(1.475, sess.run(update_op), 5)
self.assertAlmostEqual(12.4 / 6.0, sess.run(update_op), 5)
self.assertAlmostEqual(1.65, sess.run(update_op), 5)
self.assertAlmostEqual(1.65, sess.run(mean), 5)
def test1dWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [1])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)
def test1dWeightedValues_placeholders(self):
with self.test_session() as sess:
# Create the queue that populates the values.
feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1,))
_enqueue_vector(sess, weights_queue, 1, shape=(1,))
_enqueue_vector(sess, weights_queue, 0, shape=(1,))
_enqueue_vector(sess, weights_queue, 0, shape=(1,))
_enqueue_vector(sess, weights_queue, 1, shape=(1,))
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)
def test2dWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual((0 + 1 - 4.2 + 0) / 4.0, mean.eval(), 5)
def test2dWeightedValues_placeholders(self):
with self.test_session() as sess:
# Create the queue that populates the values.
feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(2,))
_enqueue_vector(sess, weights_queue, [1, 1], shape=(2,))
_enqueue_vector(sess, weights_queue, [1, 0], shape=(2,))
_enqueue_vector(sess, weights_queue, [0, 1], shape=(2,))
_enqueue_vector(sess, weights_queue, [0, 0], shape=(2,))
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual((0 + 1 - 4.2 + 0) / 4.0, mean.eval(), 5)
class StreamingMeanTensorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_tensor(array_ops.ones([4, 3]))
_assert_metric_variables(self,
('mean/total_tensor:0', 'mean/count_tensor:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_tensor(
array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_tensor(
array_ops.ones([4, 3]), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testBasic(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(mean))
def testMultiDimensional(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(2, 2, 2))
_enqueue_vector(
sess,
values_queue, [[[1, 2], [1, 2]], [[1, 2], [1, 2]]],
shape=(2, 2, 2))
_enqueue_vector(
sess,
values_queue, [[[1, 2], [1, 2]], [[3, 4], [9, 10]]],
shape=(2, 2, 2))
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values)
sess.run(variables.local_variables_initializer())
for _ in range(2):
sess.run(update_op)
self.assertAllClose([[[1, 2], [1, 2]], [[2, 3], [5, 6]]], sess.run(mean))
def testUpdateOpsReturnsCurrentValue(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values)
sess.run(variables.local_variables_initializer())
self.assertAllClose([[0, 1]], sess.run(update_op), 5)
self.assertAllClose([[-2.1, 5.05]], sess.run(update_op), 5)
self.assertAllClose([[2.3 / 3., 10.1 / 3.]], sess.run(update_op), 5)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(update_op), 5)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(mean), 5)
def testWeighted1d(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [[1]])
_enqueue_vector(sess, weights_queue, [[0]])
_enqueue_vector(sess, weights_queue, [[1]])
_enqueue_vector(sess, weights_queue, [[0]])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[3.25, 0.5]], sess.run(mean), 5)
def testWeighted2d_1(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[-2.1, 0.5]], sess.run(mean), 5)
def testWeighted2d_2(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[0, 0.5]], sess.run(mean), 5)
class StreamingAccuracyTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
name='my_accuracy')
_assert_metric_variables(self,
('my_accuracy/count:0', 'my_accuracy/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testPredictionsAndLabelsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones((10, 3))
labels = array_ops.ones((10, 4))
with self.assertRaises(ValueError):
metrics.streaming_accuracy(predictions, labels)
def testPredictionsAndWeightsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones((10, 3))
labels = array_ops.ones((10, 3))
weights = array_ops.ones((9, 3))
with self.assertRaises(ValueError):
metrics.streaming_accuracy(predictions, labels, weights)
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=3, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=3, dtype=dtypes_lib.int64, seed=2)
accuracy, update_op = metrics.streaming_accuracy(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_accuracy = accuracy.eval()
for _ in range(10):
self.assertEqual(initial_accuracy, accuracy.eval())
def testMultipleUpdates(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
labels = labels_queue.dequeue()
accuracy, update_op = metrics.streaming_accuracy(predictions, labels)
sess.run(variables.local_variables_initializer())
for _ in xrange(3):
sess.run(update_op)
self.assertEqual(0.5, sess.run(update_op))
self.assertEqual(0.5, accuracy.eval())
def testEffectivelyEquivalentSizes(self):
predictions = array_ops.ones((40, 1))
labels = array_ops.ones((40,))
with self.test_session() as sess:
accuracy, update_op = metrics.streaming_accuracy(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertEqual(1.0, update_op.eval())
self.assertEqual(1.0, accuracy.eval())
def testEffectivelyEquivalentSizesWithStaicShapedWeight(self):
predictions = ops.convert_to_tensor([1, 1, 1]) # shape 3,
labels = array_ops.expand_dims(ops.convert_to_tensor([1, 0, 0]),
1) # shape 3, 1
weights = array_ops.expand_dims(ops.convert_to_tensor([100, 1, 1]),
1) # shape 3, 1
with self.test_session() as sess:
accuracy, update_op = metrics.streaming_accuracy(predictions, labels,
weights)
sess.run(variables.local_variables_initializer())
# if streaming_accuracy does not flatten the weight, accuracy would be
# 0.33333334 due to an intended broadcast of weight. Due to flattening,
# it will be higher than .95
self.assertGreater(update_op.eval(), .95)
self.assertGreater(accuracy.eval(), .95)
def testEffectivelyEquivalentSizesWithDynamicallyShapedWeight(self):
predictions = ops.convert_to_tensor([1, 1, 1]) # shape 3,
labels = array_ops.expand_dims(ops.convert_to_tensor([1, 0, 0]),
1) # shape 3, 1
weights = [[100], [1], [1]] # shape 3, 1
weights_placeholder = array_ops.placeholder(
dtype=dtypes_lib.int32, name='weights')
feed_dict = {weights_placeholder: weights}
with self.test_session() as sess:
accuracy, update_op = metrics.streaming_accuracy(predictions, labels,
weights_placeholder)
sess.run(variables.local_variables_initializer())
# if streaming_accuracy does not flatten the weight, accuracy would be
# 0.33333334 due to an intended broadcast of weight. Due to flattening,
# it will be higher than .95
self.assertGreater(update_op.eval(feed_dict=feed_dict), .95)
self.assertGreater(accuracy.eval(feed_dict=feed_dict), .95)
def testMultipleUpdatesWithWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
labels = labels_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.int64, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
weights = weights_queue.dequeue()
accuracy, update_op = metrics.streaming_accuracy(predictions, labels,
weights)
sess.run(variables.local_variables_initializer())
for _ in xrange(3):
sess.run(update_op)
self.assertEqual(1.0, sess.run(update_op))
self.assertEqual(1.0, accuracy.eval())
class StreamingTruePositivesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_positives((0, 1, 0), (0, 1, 1))
_assert_metric_variables(self, ('true_positives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
tp, tp_update_op = metrics.streaming_true_positives(predictions,
labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tp.eval())
self.assertEqual(1, tp_update_op.eval())
self.assertEqual(1, tp.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
tp, tp_update_op = metrics.streaming_true_positives(
predictions, labels, weights=37.0)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tp.eval())
self.assertEqual(37.0, tp_update_op.eval())
self.assertEqual(37.0, tp.eval())
class StreamingFalseNegativesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_negatives((0, 1, 0),
(0, 1, 1))
_assert_metric_variables(self, ('false_negatives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
fn, fn_update_op = metrics.streaming_false_negatives(predictions,
labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fn.eval())
self.assertEqual(2, fn_update_op.eval())
self.assertEqual(2, fn.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
fn, fn_update_op = metrics.streaming_false_negatives(
predictions, labels, weights=((3.0,), (5.0,), (7.0,)))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fn.eval())
self.assertEqual(8.0, fn_update_op.eval())
self.assertEqual(8.0, fn.eval())
class StreamingFalsePositivesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_positives((0, 1, 0),
(0, 1, 1))
_assert_metric_variables(self, ('false_positives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
fp, fp_update_op = metrics.streaming_false_positives(predictions,
labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fp.eval())
self.assertEqual(4, fp_update_op.eval())
self.assertEqual(4, fp.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
fp, fp_update_op = metrics.streaming_false_positives(
predictions,
labels,
weights=((1.0, 2.0, 3.0, 5.0),
(7.0, 11.0, 13.0, 17.0),
(19.0, 23.0, 29.0, 31.0)))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fp.eval())
self.assertEqual(42.0, fp_update_op.eval())
self.assertEqual(42.0, fp.eval())
class StreamingTrueNegativesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_negatives((0, 1, 0),
(0, 1, 1))
_assert_metric_variables(self, ('true_negatives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
tn, tn_update_op = metrics.streaming_true_negatives(predictions,
labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tn.eval())
self.assertEqual(5, tn_update_op.eval())
self.assertEqual(5, tn.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
tn, tn_update_op = metrics.streaming_true_negatives(
predictions, labels, weights=((0.0, 2.0, 3.0, 5.0),))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tn.eval())
self.assertEqual(15.0, tn_update_op.eval())
self.assertEqual(15.0, tn.eval())
class StreamingTruePositivesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_positives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(0.15, 0.5, 0.85))
_assert_metric_variables(self, ('true_positives:0',))
def testUnweighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
tp, tp_update_op = metrics.streaming_true_positives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), tp.eval())
self.assertAllEqual((3, 1, 0), tp_update_op.eval())
self.assertAllEqual((3, 1, 0), tp.eval())
def testWeighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
tp, tp_update_op = metrics.streaming_true_positives_at_thresholds(
predictions, labels, weights=37.0, thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), tp.eval())
self.assertAllEqual((111.0, 37.0, 0.0), tp_update_op.eval())
self.assertAllEqual((111.0, 37.0, 0.0), tp.eval())
class StreamingFalseNegativesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_negatives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(
0.15,
0.5,
0.85,))
_assert_metric_variables(self, ('false_negatives:0',))
def testUnweighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
fn, fn_update_op = metrics.streaming_false_negatives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), fn.eval())
self.assertAllEqual((0, 2, 3), fn_update_op.eval())
self.assertAllEqual((0, 2, 3), fn.eval())
def testWeighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
fn, fn_update_op = metrics.streaming_false_negatives_at_thresholds(
predictions,
labels,
weights=((3.0,), (5.0,), (7.0,)),
thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), fn.eval())
self.assertAllEqual((0.0, 8.0, 11.0), fn_update_op.eval())
self.assertAllEqual((0.0, 8.0, 11.0), fn.eval())
class StreamingFalsePositivesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_positives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(0.15, 0.5, 0.85))
_assert_metric_variables(self, ('false_positives:0',))
def testUnweighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
fp, fp_update_op = metrics.streaming_false_positives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), fp.eval())
self.assertAllEqual((7, 4, 2), fp_update_op.eval())
self.assertAllEqual((7, 4, 2), fp.eval())
def testWeighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
fp, fp_update_op = metrics.streaming_false_positives_at_thresholds(
predictions,
labels,
weights=((1.0, 2.0, 3.0, 5.0),
(7.0, 11.0, 13.0, 17.0),
(19.0, 23.0, 29.0, 31.0)),
thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), fp.eval())
self.assertAllEqual((125.0, 42.0, 12.0), fp_update_op.eval())
self.assertAllEqual((125.0, 42.0, 12.0), fp.eval())
class StreamingTrueNegativesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_negatives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(0.15, 0.5, 0.85))
_assert_metric_variables(self, ('true_negatives:0',))
def testUnweighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
tn, tn_update_op = metrics.streaming_true_negatives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), tn.eval())
self.assertAllEqual((2, 5, 7), tn_update_op.eval())
self.assertAllEqual((2, 5, 7), tn.eval())
def testWeighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
tn, tn_update_op = metrics.streaming_true_negatives_at_thresholds(
predictions,
labels,
weights=((0.0, 2.0, 3.0, 5.0),),
thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), tn.eval())
self.assertAllEqual((5.0, 15.0, 23.0), tn_update_op.eval())
self.assertAllEqual((5.0, 15.0, 23.0), tn.eval())
class StreamingPrecisionTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_precision(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(self, ('precision/false_positives/count:0',
'precision/true_positives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_precision = precision.eval()
for _ in range(10):
self.assertEqual(initial_precision, precision.eval())
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs)
labels = constant_op.constant(inputs)
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op))
self.assertAlmostEqual(1, precision.eval())
def testSomeCorrect(self):
predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, precision.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
precision, update_op = metrics.streaming_precision(
predictions, labels, weights=constant_op.constant([[2], [5]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 2.0 + 5.0
weighted_positives = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, precision.eval())
def testWeighted1d_placeholders(self):
predictions = array_ops.placeholder(dtype=dtypes_lib.float32)
labels = array_ops.placeholder(dtype=dtypes_lib.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.streaming_precision(
predictions, labels, weights=constant_op.constant([[2], [5]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 2.0 + 5.0
weighted_positives = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(
expected_precision, update_op.eval(feed_dict=feed_dict))
self.assertAlmostEqual(
expected_precision, precision.eval(feed_dict=feed_dict))
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
precision, update_op = metrics.streaming_precision(
predictions,
labels,
weights=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, precision.eval())
def testWeighted2d_placeholders(self):
predictions = array_ops.placeholder(dtype=dtypes_lib.float32)
labels = array_ops.placeholder(dtype=dtypes_lib.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.streaming_precision(
predictions,
labels,
weights=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(
expected_precision, update_op.eval(feed_dict=feed_dict))
self.assertAlmostEqual(
expected_precision, precision.eval(feed_dict=feed_dict))
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs)
labels = constant_op.constant(1 - inputs)
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0, precision.eval())
def testZeroTrueAndFalsePositivesGivesZeroPrecision(self):
predictions = constant_op.constant([0, 0, 0, 0])
labels = constant_op.constant([0, 0, 0, 0])
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0.0, precision.eval())
class StreamingRecallTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_recall(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self,
('recall/false_negatives/count:0', 'recall/true_positives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_recall = recall.eval()
for _ in range(10):
self.assertEqual(initial_recall, recall.eval())
def testAllCorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(np_inputs)
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, recall.eval())
def testSomeCorrect(self):
predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, recall.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[2], [5]])
recall, update_op = metrics.streaming_recall(
predictions, labels, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_tp = 2.0 + 5.0
weighted_t = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_t
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, recall.eval())
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]])
recall, update_op = metrics.streaming_recall(
predictions, labels, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_tp = 3.0 + 1.0
weighted_t = (2.0 + 3.0) + (4.0 + 1.0)
expected_precision = weighted_tp / weighted_t
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, recall.eval())
def testAllIncorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(1 - np_inputs)
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, recall.eval())
def testZeroTruePositivesAndFalseNegativesGivesZeroRecall(self):
predictions = array_ops.zeros((1, 4))
labels = array_ops.zeros((1, 4))
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, recall.eval())
class StreamingFPRTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_positive_rate(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(self,
('false_positive_rate/false_positives/count:0',
'false_positive_rate/true_negatives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_false_positive_rate(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_false_positive_rate(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
fpr, update_op = metrics.streaming_false_positive_rate(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_fpr = fpr.eval()
for _ in range(10):
self.assertEqual(initial_fpr, fpr.eval())
def testAllCorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(np_inputs)
fpr, update_op = metrics.streaming_false_positive_rate(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, fpr.eval())
def testSomeCorrect(self):
predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
fpr, update_op = metrics.streaming_false_positive_rate(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, fpr.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[2], [5]])
fpr, update_op = metrics.streaming_false_positive_rate(
predictions, labels, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_fp = 2.0 + 5.0
weighted_f = (2.0 + 2.0) + (5.0 + 5.0)
expected_fpr = weighted_fp / weighted_f
self.assertAlmostEqual(expected_fpr, update_op.eval())
self.assertAlmostEqual(expected_fpr, fpr.eval())
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]])
fpr, update_op = metrics.streaming_false_positive_rate(
predictions, labels, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_fp = 1.0 + 3.0
weighted_f = (1.0 + 4.0) + (2.0 + 3.0)
expected_fpr = weighted_fp / weighted_f
self.assertAlmostEqual(expected_fpr, update_op.eval())
self.assertAlmostEqual(expected_fpr, fpr.eval())
def testAllIncorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(1 - np_inputs)
fpr, update_op = metrics.streaming_false_positive_rate(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, fpr.eval())
def testZeroFalsePositivesAndTrueNegativesGivesZeroFPR(self):
predictions = array_ops.ones((1, 4))
labels = array_ops.ones((1, 4))
fpr, update_op = metrics.streaming_false_positive_rate(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, fpr.eval())
class StreamingFNRTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_negative_rate(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(self,
('false_negative_rate/false_negatives/count:0',
'false_negative_rate/true_positives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_false_negative_rate(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_false_negative_rate(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
fnr, update_op = metrics.streaming_false_negative_rate(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_fnr = fnr.eval()
for _ in range(10):
self.assertEqual(initial_fnr, fnr.eval())
def testAllCorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(np_inputs)
fnr, update_op = metrics.streaming_false_negative_rate(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, fnr.eval())
def testSomeCorrect(self):
predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
fnr, update_op = metrics.streaming_false_negative_rate(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, fnr.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[2], [5]])
fnr, update_op = metrics.streaming_false_negative_rate(
predictions, labels, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_fn = 2.0 + 5.0
weighted_t = (2.0 + 2.0) + (5.0 + 5.0)
expected_fnr = weighted_fn / weighted_t
self.assertAlmostEqual(expected_fnr, update_op.eval())
self.assertAlmostEqual(expected_fnr, fnr.eval())
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]])
fnr, update_op = metrics.streaming_false_negative_rate(
predictions, labels, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_fn = 2.0 + 4.0
weighted_t = (2.0 + 3.0) + (1.0 + 4.0)
expected_fnr = weighted_fn / weighted_t
self.assertAlmostEqual(expected_fnr, update_op.eval())
self.assertAlmostEqual(expected_fnr, fnr.eval())
def testAllIncorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(1 - np_inputs)
fnr, update_op = metrics.streaming_false_negative_rate(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, fnr.eval())
def testZeroFalseNegativesAndTruePositivesGivesZeroFNR(self):
predictions = array_ops.zeros((1, 4))
labels = array_ops.zeros((1, 4))
fnr, update_op = metrics.streaming_false_negative_rate(
predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, fnr.eval())
class StreamingCurvePointsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metric_ops.streaming_curve_points(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self,
('curve_points/true_positives:0', 'curve_points/false_negatives:0',
'curve_points/false_positives:0', 'curve_points/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
points, _ = metric_ops.streaming_curve_points(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [points])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metric_ops.streaming_curve_points(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def _testValueTensorIsIdempotent(self, curve):
predictions = constant_op.constant(
np.random.uniform(size=(10, 3)), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np.random.uniform(high=2, size=(10, 3)), dtype=dtypes_lib.float32)
points, update_op = metric_ops.streaming_curve_points(
labels, predictions=predictions, curve=curve)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
initial_points = points.eval()
sess.run(update_op)
self.assertAllClose(initial_points, points.eval())
def testValueTensorIsIdempotentROC(self):
self._testValueTensorIsIdempotent(curve='ROC')
def testValueTensorIsIdempotentPR(self):
self._testValueTensorIsIdempotent(curve='PR')
def _testCase(self, labels, predictions, curve, expected_points):
with self.test_session() as sess:
predictions_tensor = constant_op.constant(
predictions, dtype=dtypes_lib.float32)
labels_tensor = constant_op.constant(labels, dtype=dtypes_lib.float32)
points, update_op = metric_ops.streaming_curve_points(
labels=labels_tensor,
predictions=predictions_tensor,
num_thresholds=3,
curve=curve)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAllClose(expected_points, points.eval())
def testEdgeCasesROC(self):
self._testCase([[1]], [[1]], 'ROC', [[0, 1], [0, 1], [0, 0]])
self._testCase([[0]], [[0]], 'ROC', [[1, 1], [0, 1], [0, 1]])
self._testCase([[0]], [[1]], 'ROC', [[1, 1], [1, 1], [0, 1]])
self._testCase([[1]], [[0]], 'ROC', [[0, 1], [0, 0], [0, 0]])
def testManyValuesROC(self):
self._testCase([[1.0, 0.0, 0.0, 1.0, 1.0, 1.0]],
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]], 'ROC',
[[1.0, 1.0], [0.0, 0.75], [0.0, 0.0]])
def testEdgeCasesPR(self):
self._testCase([[1]], [[1]], 'PR', [[1, 1], [1, 1], [0, 1]])
self._testCase([[0]], [[0]], 'PR', [[1, 0], [1, 1], [1, 1]])
self._testCase([[0]], [[1]], 'PR', [[1, 0], [1, 0], [1, 1]])
self._testCase([[1]], [[0]], 'PR', [[1, 1], [0, 1], [0, 1]])
def testManyValuesPR(self):
self._testCase([[1.0, 0.0, 0.0, 1.0, 1.0, 1.0]],
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]], 'PR',
[[1.0, 4.0 / 6.0], [0.75, 1.0], [0.0, 1.0]])
def _np_auc(predictions, labels, weights=None):
"""Computes the AUC explicitly using Numpy.
Args:
predictions: an ndarray with shape [N].
labels: an ndarray with shape [N].
weights: an ndarray with shape [N].
Returns:
the area under the ROC curve.
"""
if weights is None:
weights = np.ones(np.size(predictions))
is_positive = labels > 0
num_positives = np.sum(weights[is_positive])
num_negatives = np.sum(weights[~is_positive])
# Sort descending:
inds = np.argsort(-predictions)
sorted_labels = labels[inds]
sorted_weights = weights[inds]
is_positive = sorted_labels > 0
tp = np.cumsum(sorted_weights * is_positive) / num_positives
return np.sum((sorted_weights * tp)[~is_positive]) / num_negatives
class StreamingAUCTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_auc(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(self,
('auc/true_positives:0', 'auc/false_negatives:0',
'auc/false_positives:0', 'auc/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_auc(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_auc(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
auc, update_op = metrics.streaming_auc(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_auc = auc.eval()
for _ in range(10):
self.assertAlmostEqual(initial_auc, auc.eval(), 5)
def testPredictionsOutOfRange(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, -1, 1, -1], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
_, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertRaises(errors_impl.InvalidArgumentError, update_op.eval)
def testAllCorrect(self):
self.allCorrectAsExpected('ROC')
def allCorrectAsExpected(self, curve):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
auc, update_op = metrics.streaming_auc(predictions, labels, curve=curve)
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, auc.eval())
def testSomeCorrect(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
auc, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op))
self.assertAlmostEqual(0.5, auc.eval())
def testWeighted1d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
weights = constant_op.constant([2], shape=(1, 1))
auc, update_op = metrics.streaming_auc(
predictions, labels, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op), 5)
self.assertAlmostEqual(0.5, auc.eval(), 5)
def testWeighted2d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
weights = constant_op.constant([1, 2, 3, 4], shape=(1, 4))
auc, update_op = metrics.streaming_auc(
predictions, labels, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.7, sess.run(update_op), 5)
self.assertAlmostEqual(0.7, auc.eval(), 5)
def testAUCPRSpecialCase(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 1], shape=(1, 4))
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.79166, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.79166, auc.eval(), delta=1e-3)
def testAnotherAUCPRSpecialCase(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8, 0.1, 0.135, 0.81],
shape=(1, 7),
dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 0, 1, 0, 1], shape=(1, 7))
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.610317, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.610317, auc.eval(), delta=1e-3)
def testThirdAUCPRSpecialCase(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.0, 0.1, 0.2, 0.33, 0.3, 0.4, 0.5],
shape=(1, 7),
dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 0, 0, 1, 1, 1], shape=(1, 7))
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.90277, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.90277, auc.eval(), delta=1e-3)
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
auc, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0, sess.run(update_op))
self.assertAlmostEqual(0, auc.eval())
def testZeroTruePositivesAndFalseNegativesGivesOneAUC(self):
with self.test_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
auc, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 6)
self.assertAlmostEqual(1, auc.eval(), 6)
def testRecallOneAndPrecisionOneGivesOnePRAUC(self):
with self.test_session() as sess:
predictions = array_ops.ones([4], dtype=dtypes_lib.float32)
labels = array_ops.ones([4])
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 6)
self.assertAlmostEqual(1, auc.eval(), 6)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=num_samples)
noise = np.random.normal(0.0, scale=0.2, size=num_samples)
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
def _enqueue_as_batches(x, enqueue_ops):
x_batches = x.astype(np.float32).reshape((num_batches, batch_size))
x_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(num_batches):
enqueue_ops[i].append(x_queue.enqueue(x_batches[i, :]))
return x_queue.dequeue()
for weights in (None, np.ones(num_samples), np.random.exponential(
scale=1.0, size=num_samples)):
expected_auc = _np_auc(predictions, labels, weights)
with self.test_session() as sess:
enqueue_ops = [[] for i in range(num_batches)]
tf_predictions = _enqueue_as_batches(predictions, enqueue_ops)
tf_labels = _enqueue_as_batches(labels, enqueue_ops)
tf_weights = (_enqueue_as_batches(weights, enqueue_ops) if
weights is not None else None)
for i in range(num_batches):
sess.run(enqueue_ops[i])
auc, update_op = metrics.streaming_auc(
tf_predictions,
tf_labels,
curve='ROC',
num_thresholds=500,
weights=tf_weights)
sess.run(variables.local_variables_initializer())
for i in range(num_batches):
sess.run(update_op)
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_auc, auc.eval(), 2)
class StreamingDynamicAUCTest(test.TestCase):
def setUp(self):
super(StreamingDynamicAUCTest, self).setUp()
np.random.seed(1)
ops.reset_default_graph()
def testUnknownCurve(self):
with self.assertRaisesRegexp(
ValueError, 'curve must be either ROC or PR, TEST_CURVE unknown'):
metrics.streaming_dynamic_auc(labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
curve='TEST_CURVE')
def testVars(self):
metrics.streaming_dynamic_auc(
labels=array_ops.ones((10, 1)), predictions=array_ops.ones((10, 1)))
_assert_metric_variables(self, ['dynamic_auc/concat_labels/array:0',
'dynamic_auc/concat_labels/size:0',
'dynamic_auc/concat_preds/array:0',
'dynamic_auc/concat_preds/size:0'])
def testMetricsCollection(self):
my_collection_name = '__metrics__'
auc, _ = metrics.streaming_dynamic_auc(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertEqual(ops.get_collection(my_collection_name), [auc])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_dynamic_auc(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in xrange(10):
sess.run(update_op)
# Then verify idempotency.
initial_auc = auc.eval()
for _ in xrange(10):
self.assertAlmostEqual(initial_auc, auc.eval(), 5)
def testAllLabelsOnes(self):
with self.test_session() as sess:
predictions = constant_op.constant([1., 1., 1.])
labels = constant_op.constant([1, 1, 1])
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, auc.eval())
def testAllLabelsZeros(self):
with self.test_session() as sess:
predictions = constant_op.constant([1., 1., 1.])
labels = constant_op.constant([0, 0, 0])
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, auc.eval())
def testNonZeroOnePredictions(self):
with self.test_session() as sess:
predictions = constant_op.constant([2.5, -2.5, 2.5, -2.5],
dtype=dtypes_lib.float32)
labels = constant_op.constant([1, 0, 1, 0])
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(auc.eval(), 1.0)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs)
labels = constant_op.constant(inputs)
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, auc.eval())
def testSomeCorrect(self):
with self.test_session() as sess:
predictions = constant_op.constant([1, 0, 1, 0])
labels = constant_op.constant([0, 1, 1, 0])
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0.5, auc.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
auc, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0, auc.eval())
def testExceptionOnIncompatibleShapes(self):
with self.test_session() as sess:
predictions = array_ops.ones([5])
labels = array_ops.zeros([6])
with self.assertRaisesRegexp(ValueError, 'Shapes .* are incompatible'):
_, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
def testExceptionOnGreaterThanOneLabel(self):
with self.test_session() as sess:
predictions = constant_op.constant([1, 0.5, 0], dtypes_lib.float32)
labels = constant_op.constant([2, 1, 0])
_, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
'.*labels must be 0 or 1, at least one is >1.*'):
sess.run(update_op)
def testExceptionOnNegativeLabel(self):
with self.test_session() as sess:
predictions = constant_op.constant([1, 0.5, 0], dtypes_lib.float32)
labels = constant_op.constant([1, 0, -1])
_, update_op = metrics.streaming_dynamic_auc(labels, predictions)
sess.run(variables.local_variables_initializer())
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
'.*labels must be 0 or 1, at least one is <0.*'):
sess.run(update_op)
def testWithMultipleUpdates(self):
batch_size = 10
num_batches = 100
labels = np.array([])
predictions = np.array([])
tf_labels = variables.Variable(array_ops.ones(batch_size, dtypes_lib.int32),
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=dtypes_lib.int32)
tf_predictions = variables.Variable(
array_ops.ones(batch_size),
collections=[ops.GraphKeys.LOCAL_VARIABLES],
dtype=dtypes_lib.float32)
auc, update_op = metrics.streaming_dynamic_auc(tf_labels, tf_predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
for _ in xrange(num_batches):
new_labels = np.random.randint(0, 2, size=batch_size)
noise = np.random.normal(0.0, scale=0.2, size=batch_size)
new_predictions = 0.4 + 0.2 * new_labels + noise
labels = np.concatenate([labels, new_labels])
predictions = np.concatenate([predictions, new_predictions])
sess.run(tf_labels.assign(new_labels))
sess.run(tf_predictions.assign(new_predictions))
sess.run(update_op)
expected_auc = _np_auc(predictions, labels)
self.assertAlmostEqual(expected_auc, auc.eval())
def testAUCPRReverseIncreasingPredictions(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8], dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 1])
auc, update_op = metrics.streaming_dynamic_auc(
labels, predictions, curve='PR')
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0.79166, auc.eval(), delta=1e-5)
def testAUCPRJumbledPredictions(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8, 0.1, 0.135, 0.81], dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 0, 1, 0, 1])
auc, update_op = metrics.streaming_dynamic_auc(
labels, predictions, curve='PR')
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0.610317, auc.eval(), delta=1e-6)
def testAUCPRPredictionsLessThanHalf(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.0, 0.1, 0.2, 0.33, 0.3, 0.4, 0.5],
shape=(1, 7),
dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 0, 0, 1, 1, 1], shape=(1, 7))
auc, update_op = metrics.streaming_dynamic_auc(
labels, predictions, curve='PR')
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0.90277, auc.eval(), delta=1e-5)
class StreamingPrecisionRecallAtEqualThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def _testResultsEqual(self, expected_dict, gotten_result):
"""Tests that 2 results (dicts) represent the same data.
Args:
expected_dict: A dictionary with keys that are the names of properties
of PrecisionRecallData and whose values are lists of floats.
gotten_result: A PrecisionRecallData object.
"""
gotten_dict = {k: t.eval() for k, t in gotten_result._asdict().items()}
self.assertItemsEqual(
list(expected_dict.keys()), list(gotten_dict.keys()))
for key, expected_values in expected_dict.items():
self.assertAllClose(expected_values, gotten_dict[key])
def _testCase(self, predictions, labels, expected_result, weights=None):
"""Performs a test given a certain scenario of labels, predictions, weights.
Args:
predictions: The predictions tensor. Of type float32.
labels: The labels tensor. Of type bool.
expected_result: The expected result (dict) that maps to tensors.
weights: Optional weights tensor.
"""
with self.test_session() as sess:
predictions_tensor = constant_op.constant(
predictions, dtype=dtypes_lib.float32)
labels_tensor = constant_op.constant(labels, dtype=dtypes_lib.bool)
weights_tensor = None
if weights:
weights_tensor = constant_op.constant(weights, dtype=dtypes_lib.float32)
gotten_result, update_op = (
metric_ops.precision_recall_at_equal_thresholds(
labels=labels_tensor,
predictions=predictions_tensor,
weights=weights_tensor,
num_thresholds=3))
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self._testResultsEqual(expected_result, gotten_result)
def testVars(self):
metric_ops.precision_recall_at_equal_thresholds(
labels=constant_op.constant([True], dtype=dtypes_lib.bool),
predictions=constant_op.constant([0.42], dtype=dtypes_lib.float32))
_assert_metric_variables(
self, ('precision_recall_at_equal_thresholds/variables/tp_buckets:0',
'precision_recall_at_equal_thresholds/variables/fp_buckets:0'))
def testVarsWithName(self):
metric_ops.precision_recall_at_equal_thresholds(
labels=constant_op.constant([True], dtype=dtypes_lib.bool),
predictions=constant_op.constant([0.42], dtype=dtypes_lib.float32),
name='foo')
_assert_metric_variables(
self, ('foo/variables/tp_buckets:0', 'foo/variables/fp_buckets:0'))
def testValuesAreIdempotent(self):
predictions = constant_op.constant(
np.random.uniform(size=(10, 3)), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np.random.uniform(size=(10, 3)) > 0.5, dtype=dtypes_lib.bool)
result, update_op = metric_ops.precision_recall_at_equal_thresholds(
labels=labels, predictions=predictions)
with self.test_session() as sess:
# Run several updates.
sess.run(variables.local_variables_initializer())
for _ in range(3):
sess.run(update_op)
# Then verify idempotency.
initial_result = {k: value.eval().tolist() for k, value in
result._asdict().items()}
for _ in range(3):
self._testResultsEqual(initial_result, result)
def testAllTruePositives(self):
self._testCase([[1]], [[True]], {
'tp': [1, 1, 1],
'fp': [0, 0, 0],
'tn': [0, 0, 0],
'fn': [0, 0, 0],
'precision': [1.0, 1.0, 1.0],
'recall': [1.0, 1.0, 1.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testAllTrueNegatives(self):
self._testCase([[0]], [[False]], {
'tp': [0, 0, 0],
'fp': [1, 0, 0],
'tn': [0, 1, 1],
'fn': [0, 0, 0],
'precision': [0.0, 0.0, 0.0],
'recall': [0.0, 0.0, 0.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testAllFalsePositives(self):
self._testCase([[1]], [[False]], {
'tp': [0, 0, 0],
'fp': [1, 1, 1],
'tn': [0, 0, 0],
'fn': [0, 0, 0],
'precision': [0.0, 0.0, 0.0],
'recall': [0.0, 0.0, 0.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testAllFalseNegatives(self):
self._testCase([[0]], [[True]], {
'tp': [1, 0, 0],
'fp': [0, 0, 0],
'tn': [0, 0, 0],
'fn': [0, 1, 1],
'precision': [1.0, 0.0, 0.0],
'recall': [1.0, 0.0, 0.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testManyValues(self):
self._testCase(
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]],
[[True, False, False, True, True, True]],
{
'tp': [4, 3, 0],
'fp': [2, 0, 0],
'tn': [0, 2, 2],
'fn': [0, 1, 4],
'precision': [2.0 / 3.0, 1.0, 0.0],
'recall': [1.0, 0.75, 0.0],
'thresholds': [0.0, 0.5, 1.0],
})
def testManyValuesWithWeights(self):
self._testCase(
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]],
[[True, False, False, True, True, True]],
{
'tp': [1.5, 1.5, 0.0],
'fp': [2.5, 0.0, 0.0],
'tn': [0.0, 2.5, 2.5],
'fn': [0.0, 0.0, 1.5],
'precision': [0.375, 1.0, 0.0],
'recall': [1.0, 1.0, 0.0],
'thresholds': [0.0, 0.5, 1.0],
},
weights=[[0.0, 0.5, 2.0, 0.0, 0.5, 1.0]])
class StreamingSpecificityAtSensitivityTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7)
_assert_metric_variables(self,
('specificity_at_sensitivity/true_positives:0',
'specificity_at_sensitivity/false_negatives:0',
'specificity_at_sensitivity/false_positives:0',
'specificity_at_sensitivity/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_specificity = specificity.eval()
for _ in range(10):
self.assertAlmostEqual(initial_specificity, specificity.eval(), 5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, specificity.eval())
def testSomeCorrectHighSensitivity(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.45, 0.5, 0.8, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.8)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1.0, sess.run(update_op))
self.assertAlmostEqual(1.0, specificity.eval())
def testSomeCorrectLowSensitivity(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted1d(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [3]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, weights=weights, sensitivity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted2d(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, weights=weights, sensitivity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(8.0 / 15.0, sess.run(update_op))
self.assertAlmostEqual(8.0 / 15.0, specificity.eval())
class StreamingSensitivityAtSpecificityTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7)
_assert_metric_variables(self,
('sensitivity_at_specificity/true_positives:0',
'sensitivity_at_specificity/false_negatives:0',
'sensitivity_at_specificity/false_positives:0',
'sensitivity_at_specificity/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
sensitivity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_sensitivity = sensitivity.eval()
for _ in range(10):
self.assertAlmostEqual(initial_sensitivity, sensitivity.eval(), 5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, specificity.eval())
def testSomeCorrectHighSpecificity(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.45, 0.5, 0.8, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.8)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.8, sess.run(update_op))
self.assertAlmostEqual(0.8, specificity.eval())
def testSomeCorrectLowSpecificity(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, weights=weights, specificity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.675, sess.run(update_op))
self.assertAlmostEqual(0.675, specificity.eval())
# TODO(nsilberman): Break this up into two sets of tests.
class StreamingPrecisionRecallThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0])
_assert_metric_variables(self, (
'precision_at_thresholds/true_positives:0',
'precision_at_thresholds/false_positives:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
prec, _ = metrics.streaming_precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
rec, _ = metrics.streaming_recall_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [prec, rec])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, precision_op = metrics.streaming_precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
_, recall_op = metrics.streaming_recall_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
self.assertListEqual(
ops.get_collection(my_collection_name), [precision_op, recall_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
thresholds = [0, 0.5, 1.0]
prec, prec_op = metrics.streaming_precision_at_thresholds(predictions,
labels,
thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(predictions, labels,
thresholds)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run([prec_op, rec_op])
# Then verify idempotency.
initial_prec = prec.eval()
initial_rec = rec.eval()
for _ in range(10):
self.assertAllClose(initial_prec, prec.eval())
self.assertAllClose(initial_rec, rec.eval())
# TODO(nsilberman): fix tests (passing but incorrect).
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(predictions,
labels,
thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(predictions, labels,
thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertEqual(1, prec.eval())
self.assertEqual(1, rec.eval())
def testSomeCorrect(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(predictions,
labels,
thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(predictions, labels,
thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0.5, prec.eval())
self.assertAlmostEqual(0.5, rec.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(predictions,
labels,
thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(predictions, labels,
thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0, prec.eval())
self.assertAlmostEqual(0, rec.eval())
def testWeights1d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0], [1]], shape=(2, 1), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds, weights=weights)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds, weights=weights)
prec_low = prec[0]
prec_high = prec[1]
rec_low = rec[0]
rec_high = rec[1]
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(1.0, prec_low.eval(), places=5)
self.assertAlmostEqual(0.0, prec_high.eval(), places=5)
self.assertAlmostEqual(1.0, rec_low.eval(), places=5)
self.assertAlmostEqual(0.0, rec_high.eval(), places=5)
def testWeights2d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0, 0], [1, 1]], shape=(2, 2), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds, weights=weights)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds, weights=weights)
prec_low = prec[0]
prec_high = prec[1]
rec_low = rec[0]
rec_high = rec[1]
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(1.0, prec_low.eval(), places=5)
self.assertAlmostEqual(0.0, prec_high.eval(), places=5)
self.assertAlmostEqual(1.0, rec_low.eval(), places=5)
self.assertAlmostEqual(0.0, rec_high.eval(), places=5)
def testExtremeThresholds(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 1], shape=(1, 4))
thresholds = [-1.0, 2.0] # lower/higher than any values
prec, prec_op = metrics.streaming_precision_at_thresholds(predictions,
labels,
thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(predictions, labels,
thresholds)
prec_low = prec[0]
prec_high = prec[1]
rec_low = rec[0]
rec_high = rec[1]
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0.75, prec_low.eval())
self.assertAlmostEqual(0.0, prec_high.eval())
self.assertAlmostEqual(1.0, rec_low.eval())
self.assertAlmostEqual(0.0, rec_high.eval())
def testZeroLabelsPredictions(self):
with self.test_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(predictions,
labels,
thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(predictions, labels,
thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0, prec.eval(), 6)
self.assertAlmostEqual(0, rec.eval(), 6)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=(num_samples, 1))
noise = np.random.normal(0.0, scale=0.2, size=(num_samples, 1))
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
thresholds = [0.3]
tp = 0
fp = 0
fn = 0
tn = 0
for i in range(num_samples):
if predictions[i] > thresholds[0]:
if labels[i] == 1:
tp += 1
else:
fp += 1
else:
if labels[i] == 1:
fn += 1
else:
tn += 1
epsilon = 1e-7
expected_prec = tp / (epsilon + tp + fp)
expected_rec = tp / (epsilon + tp + fn)
labels = labels.astype(np.float32)
predictions = predictions.astype(np.float32)
with self.test_session() as sess:
# Reshape the data so its easy to queue up:
predictions_batches = predictions.reshape((batch_size, num_batches))
labels_batches = labels.reshape((batch_size, num_batches))
# Enqueue the data:
predictions_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
labels_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(int(num_batches)):
tf_prediction = constant_op.constant(predictions_batches[:, i])
tf_label = constant_op.constant(labels_batches[:, i])
sess.run([
predictions_queue.enqueue(tf_prediction),
labels_queue.enqueue(tf_label)
])
tf_predictions = predictions_queue.dequeue()
tf_labels = labels_queue.dequeue()
prec, prec_op = metrics.streaming_precision_at_thresholds(tf_predictions,
tf_labels,
thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(tf_predictions,
tf_labels,
thresholds)
sess.run(variables.local_variables_initializer())
for _ in range(int(num_samples / batch_size)):
sess.run([prec_op, rec_op])
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_prec, prec.eval(), 2)
self.assertAlmostEqual(expected_rec, rec.eval(), 2)
class StreamingFPRThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_positive_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0])
_assert_metric_variables(self, (
'false_positive_rate_at_thresholds/false_positives:0',
'false_positive_rate_at_thresholds/true_negatives:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
fpr, _ = metrics.streaming_false_positive_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [fpr])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
self.assertListEqual(
ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
thresholds = [0, 0.5, 1.0]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(fpr_op)
# Then verify idempotency.
initial_fpr = fpr.eval()
for _ in range(10):
self.assertAllClose(initial_fpr, fpr.eval())
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
thresholds = [0.5]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertEqual(0, fpr.eval())
def testSomeCorrect(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
thresholds = [0.5]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(0.5, fpr.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
thresholds = [0.5]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(1, fpr.eval())
def testWeights1d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0], [1]], shape=(2, 1), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds, weights=weights)
fpr_low = fpr[0]
fpr_high = fpr[1]
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(0.0, fpr_low.eval(), places=5)
self.assertAlmostEqual(0.0, fpr_high.eval(), places=5)
def testWeights2d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0, 0], [1, 1]], shape=(2, 2), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds, weights=weights)
fpr_low = fpr[0]
fpr_high = fpr[1]
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(0.0, fpr_low.eval(), places=5)
self.assertAlmostEqual(0.0, fpr_high.eval(), places=5)
def testExtremeThresholds(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 1], shape=(1, 4))
thresholds = [-1.0, 2.0] # lower/higher than any values
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
fpr_low = fpr[0]
fpr_high = fpr[1]
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(1.0, fpr_low.eval(), places=5)
self.assertAlmostEqual(0.0, fpr_high.eval(), places=5)
def testZeroLabelsPredictions(self):
with self.test_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
thresholds = [0.5]
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fpr_op)
self.assertAlmostEqual(0, fpr.eval(), 6)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=(num_samples, 1))
noise = np.random.normal(0.0, scale=0.2, size=(num_samples, 1))
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
thresholds = [0.3]
fp = 0
tn = 0
for i in range(num_samples):
if predictions[i] > thresholds[0]:
if labels[i] == 0:
fp += 1
else:
if labels[i] == 0:
tn += 1
epsilon = 1e-7
expected_fpr = fp / (epsilon + fp + tn)
labels = labels.astype(np.float32)
predictions = predictions.astype(np.float32)
with self.test_session() as sess:
# Reshape the data so its easy to queue up:
predictions_batches = predictions.reshape((batch_size, num_batches))
labels_batches = labels.reshape((batch_size, num_batches))
# Enqueue the data:
predictions_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
labels_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(int(num_batches)):
tf_prediction = constant_op.constant(predictions_batches[:, i])
tf_label = constant_op.constant(labels_batches[:, i])
sess.run([
predictions_queue.enqueue(tf_prediction),
labels_queue.enqueue(tf_label)
])
tf_predictions = predictions_queue.dequeue()
tf_labels = labels_queue.dequeue()
fpr, fpr_op = metrics.streaming_false_positive_rate_at_thresholds(
tf_predictions, tf_labels, thresholds)
sess.run(variables.local_variables_initializer())
for _ in range(int(num_samples / batch_size)):
sess.run(fpr_op)
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_fpr, fpr.eval(), 2)
class RecallAtPrecisionTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.recall_at_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
precision=0.7)
_assert_metric_variables(self, ('recall_at_precision/true_positives:0',
'recall_at_precision/false_negatives:0',
'recall_at_precision/false_positives:0',
'recall_at_precision/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.recall_at_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
precision=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.recall_at_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
precision=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
recall, update_op = metrics.recall_at_precision(
labels, predictions, precision=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_recall = recall.eval()
for _ in range(10):
self.assertAlmostEqual(initial_recall, recall.eval(), 5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
recall, update_op = metrics.recall_at_precision(
labels, predictions, precision=1.0)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, recall.eval())
def testSomeCorrectHighPrecision(self):
predictions_values = [1, .9, .8, .7, .6, .5, .4, .3]
labels_values = [1, 1, 1, 1, 0, 0, 0, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
recall, update_op = metrics.recall_at_precision(
labels, predictions, precision=0.8)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.8, sess.run(update_op))
self.assertAlmostEqual(0.8, recall.eval())
def testSomeCorrectLowPrecision(self):
predictions_values = [1, .9, .8, .7, .6, .5, .4, .3, .2, .1]
labels_values = [1, 1, 0, 0, 0, 0, 0, 0, 0, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
recall, update_op = metrics.recall_at_precision(
labels, predictions, precision=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
target_recall = 2.0 / 3.0
self.assertAlmostEqual(target_recall, sess.run(update_op))
self.assertAlmostEqual(target_recall, recall.eval())
def testWeighted(self):
predictions_values = [1, .9, .8, .7, .6]
labels_values = [1, 1, 0, 0, 1]
weights_values = [1, 1, 3, 4, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
recall, update_op = metrics.recall_at_precision(
labels, predictions, weights=weights, precision=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
target_recall = 2.0 / 3.0
self.assertAlmostEqual(target_recall, sess.run(update_op))
self.assertAlmostEqual(target_recall, recall.eval())
class StreamingFNRThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_negative_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0])
_assert_metric_variables(self, (
'false_negative_rate_at_thresholds/false_negatives:0',
'false_negative_rate_at_thresholds/true_positives:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
fnr, _ = metrics.streaming_false_negative_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [fnr])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
self.assertListEqual(
ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
thresholds = [0, 0.5, 1.0]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(fnr_op)
# Then verify idempotency.
initial_fnr = fnr.eval()
for _ in range(10):
self.assertAllClose(initial_fnr, fnr.eval())
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
thresholds = [0.5]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertEqual(0, fnr.eval())
def testSomeCorrect(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
thresholds = [0.5]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0.5, fnr.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
thresholds = [0.5]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(1, fnr.eval())
def testWeights1d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0], [1]], shape=(2, 1), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds, weights=weights)
fnr_low = fnr[0]
fnr_high = fnr[1]
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0.0, fnr_low.eval(), places=5)
self.assertAlmostEqual(1.0, fnr_high.eval(), places=5)
def testWeights2d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0, 0], [1, 1]], shape=(2, 2), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds, weights=weights)
fnr_low = fnr[0]
fnr_high = fnr[1]
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0.0, fnr_low.eval(), places=5)
self.assertAlmostEqual(1.0, fnr_high.eval(), places=5)
def testExtremeThresholds(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 1], shape=(1, 4))
thresholds = [-1.0, 2.0] # lower/higher than any values
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
fnr_low = fnr[0]
fnr_high = fnr[1]
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0.0, fnr_low.eval())
self.assertAlmostEqual(1.0, fnr_high.eval())
def testZeroLabelsPredictions(self):
with self.test_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
thresholds = [0.5]
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
predictions, labels, thresholds)
sess.run(variables.local_variables_initializer())
sess.run(fnr_op)
self.assertAlmostEqual(0, fnr.eval(), 6)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=(num_samples, 1))
noise = np.random.normal(0.0, scale=0.2, size=(num_samples, 1))
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
thresholds = [0.3]
fn = 0
tp = 0
for i in range(num_samples):
if predictions[i] > thresholds[0]:
if labels[i] == 1:
tp += 1
else:
if labels[i] == 1:
fn += 1
epsilon = 1e-7
expected_fnr = fn / (epsilon + fn + tp)
labels = labels.astype(np.float32)
predictions = predictions.astype(np.float32)
with self.test_session() as sess:
# Reshape the data so its easy to queue up:
predictions_batches = predictions.reshape((batch_size, num_batches))
labels_batches = labels.reshape((batch_size, num_batches))
# Enqueue the data:
predictions_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
labels_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(int(num_batches)):
tf_prediction = constant_op.constant(predictions_batches[:, i])
tf_label = constant_op.constant(labels_batches[:, i])
sess.run([
predictions_queue.enqueue(tf_prediction),
labels_queue.enqueue(tf_label)
])
tf_predictions = predictions_queue.dequeue()
tf_labels = labels_queue.dequeue()
fnr, fnr_op = metrics.streaming_false_negative_rate_at_thresholds(
tf_predictions, tf_labels, thresholds)
sess.run(variables.local_variables_initializer())
for _ in range(int(num_samples / batch_size)):
sess.run(fnr_op)
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_fnr, fnr.eval(), 2)
# TODO(ptucker): Remove when we remove `streaming_recall_at_k`.
# This op will be deprecated soon in favor of `streaming_sparse_recall_at_k`.
# Until then, this test validates that both ops yield the same results.
class StreamingRecallAtKTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
self._batch_size = 4
self._num_classes = 3
self._np_predictions = np.matrix(('0.1 0.2 0.7;'
'0.6 0.2 0.2;'
'0.0 0.9 0.1;'
'0.2 0.0 0.8'))
self._np_labels = [0, 0, 0, 0]
def testVars(self):
metrics.streaming_recall_at_k(
predictions=array_ops.ones((self._batch_size, self._num_classes)),
labels=array_ops.ones(
(self._batch_size,), dtype=dtypes_lib.int32),
k=1)
_assert_metric_variables(self,
('recall_at_1/count:0', 'recall_at_1/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_recall_at_k(
predictions=array_ops.ones((self._batch_size, self._num_classes)),
labels=array_ops.ones(
(self._batch_size,), dtype=dtypes_lib.int32),
k=1,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_recall_at_k(
predictions=array_ops.ones((self._batch_size, self._num_classes)),
labels=array_ops.ones(
(self._batch_size,), dtype=dtypes_lib.int32),
k=1,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testSingleUpdateKIs1(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
recall, update_op = metrics.streaming_recall_at_k(predictions, labels, k=1)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions, array_ops.reshape(labels, (self._batch_size, 1)), k=1)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0.25, sess.run(update_op))
self.assertEqual(0.25, recall.eval())
self.assertEqual(0.25, sess.run(sp_update_op))
self.assertEqual(0.25, sp_recall.eval())
def testSingleUpdateKIs2(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
recall, update_op = metrics.streaming_recall_at_k(predictions, labels, k=2)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions, array_ops.reshape(labels, (self._batch_size, 1)), k=2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0.5, sess.run(update_op))
self.assertEqual(0.5, recall.eval())
self.assertEqual(0.5, sess.run(sp_update_op))
self.assertEqual(0.5, sp_recall.eval())
def testSingleUpdateKIs3(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
recall, update_op = metrics.streaming_recall_at_k(predictions, labels, k=3)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions, array_ops.reshape(labels, (self._batch_size, 1)), k=3)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1.0, sess.run(update_op))
self.assertEqual(1.0, recall.eval())
self.assertEqual(1.0, sess.run(sp_update_op))
self.assertEqual(1.0, sp_recall.eval())
def testSingleUpdateSomeMissingKIs2(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
weights = constant_op.constant(
[0, 1, 0, 1], shape=(self._batch_size,), dtype=dtypes_lib.float32)
recall, update_op = metrics.streaming_recall_at_k(
predictions, labels, k=2, weights=weights)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions,
array_ops.reshape(labels, (self._batch_size, 1)),
k=2,
weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1.0, sess.run(update_op))
self.assertEqual(1.0, recall.eval())
self.assertEqual(1.0, sess.run(sp_update_op))
self.assertEqual(1.0, sp_recall.eval())
class StreamingSparsePrecisionTest(test.TestCase):
def _test_streaming_sparse_precision_at_k(self,
predictions,
labels,
k,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_precision_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=labels,
k=k,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def _test_streaming_sparse_precision_at_top_k(self,
top_k_predictions,
labels,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_precision_at_top_k(
top_k_predictions=constant_op.constant(top_k_predictions,
dtypes_lib.int32),
labels=labels,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
self.assertTrue(math.isnan(update.eval()))
self.assertTrue(math.isnan(metric.eval()))
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def _test_streaming_sparse_average_precision_at_k(self,
predictions,
labels,
k,
expected,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
predictions = constant_op.constant(predictions, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_average_precision_at_k(
predictions, labels, k, weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
local_variables = variables.local_variables()
variables.variables_initializer(local_variables).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertAlmostEqual(expected, update.eval())
self.assertAlmostEqual(expected, metric.eval())
def _test_streaming_sparse_average_precision_at_top_k(self,
top_k_predictions,
labels,
expected,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_average_precision_at_top_k(
top_k_predictions, labels, weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
local_variables = variables.local_variables()
variables.variables_initializer(local_variables).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertAlmostEqual(expected, update.eval())
self.assertAlmostEqual(expected, metric.eval())
def test_top_k_rank_invalid(self):
with self.test_session():
# top_k_predictions has rank < 2.
top_k_predictions = [9, 4, 6, 2, 0]
sp_labels = sparse_tensor.SparseTensorValue(
indices=np.array([[0,], [1,], [2,]], np.int64),
values=np.array([2, 7, 8], np.int64),
dense_shape=np.array([10,], np.int64))
with self.assertRaises(ValueError):
precision, _ = metrics.streaming_sparse_precision_at_top_k(
top_k_predictions=constant_op.constant(top_k_predictions,
dtypes_lib.int64),
labels=sp_labels)
variables.variables_initializer(variables.local_variables()).run()
precision.eval()
def test_average_precision(self):
# Example 1.
# Matches example here:
# fastml.com/what-you-wanted-to-know-about-mean-average-precision
labels_ex1 = (0, 1, 2, 3, 4)
labels = np.array([labels_ex1], dtype=np.int64)
predictions_ex1 = (0.2, 0.1, 0.0, 0.4, 0.0, 0.5, 0.3)
predictions = (predictions_ex1,)
predictions_top_k_ex1 = (5, 3, 6, 0, 1, 2)
precision_ex1 = (0.0 / 1, 1.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex1 = (0.0 / 1, precision_ex1[1] / 2, precision_ex1[1] / 3,
(precision_ex1[1] + precision_ex1[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex1[i])
self._test_streaming_sparse_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=precision_ex1[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex1[i])
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=avg_precision_ex1[i])
# Example 2.
labels_ex2 = (0, 2, 4, 5, 6)
labels = np.array([labels_ex2], dtype=np.int64)
predictions_ex2 = (0.3, 0.5, 0.0, 0.4, 0.0, 0.1, 0.2)
predictions = (predictions_ex2,)
predictions_top_k_ex2 = (1, 3, 0, 6, 5)
precision_ex2 = (0.0 / 1, 0.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex2 = (0.0 / 1, 0.0 / 2, precision_ex2[2] / 3,
(precision_ex2[2] + precision_ex2[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex2[i])
self._test_streaming_sparse_precision_at_top_k(
(predictions_top_k_ex2[:k],), labels, expected=precision_ex2[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex2[i])
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex2[:k],), labels, expected=avg_precision_ex2[i])
# Both examples, we expect both precision and average precision to be the
# average of the 2 examples.
labels = np.array([labels_ex1, labels_ex2], dtype=np.int64)
predictions = (predictions_ex1, predictions_ex2)
streaming_precision = [(ex1 + ex2) / 2
for ex1, ex2 in zip(precision_ex1, precision_ex2)]
streaming_average_precision = [
(ex1 + ex2) / 2
for ex1, ex2 in zip(avg_precision_ex1, avg_precision_ex2)
]
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=streaming_precision[i])
predictions_top_k = (predictions_top_k_ex1[:k], predictions_top_k_ex2[:k])
self._test_streaming_sparse_precision_at_top_k(
predictions_top_k, labels, expected=streaming_precision[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=streaming_average_precision[i])
self._test_streaming_sparse_average_precision_at_top_k(
predictions_top_k, labels, expected=streaming_average_precision[i])
# Weighted examples, we expect streaming average precision to be the
# weighted average of the 2 examples.
weights = (0.3, 0.6)
streaming_average_precision = [
(weights[0] * ex1 + weights[1] * ex2) / (weights[0] + weights[1])
for ex1, ex2 in zip(avg_precision_ex1, avg_precision_ex2)
]
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_average_precision_at_k(
predictions,
labels,
k,
expected=streaming_average_precision[i],
weights=weights)
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex1[:k], predictions_top_k_ex2[:k]),
labels,
expected=streaming_average_precision[i],
weights=weights)
def test_average_precision_some_labels_out_of_range(self):
"""Tests that labels outside the [0, n_classes) range are ignored."""
labels_ex1 = (-1, 0, 1, 2, 3, 4, 7)
labels = np.array([labels_ex1], dtype=np.int64)
predictions_ex1 = (0.2, 0.1, 0.0, 0.4, 0.0, 0.5, 0.3)
predictions = (predictions_ex1,)
predictions_top_k_ex1 = (5, 3, 6, 0, 1, 2)
precision_ex1 = (0.0 / 1, 1.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex1 = (0.0 / 1, precision_ex1[1] / 2, precision_ex1[1] / 3,
(precision_ex1[1] + precision_ex1[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex1[i])
self._test_streaming_sparse_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=precision_ex1[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex1[i])
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=avg_precision_ex1[i])
def test_average_precision_at_top_k_static_shape_check(self):
predictions_top_k = array_ops.placeholder(shape=(2, None),
dtype=dtypes_lib.int64)
labels = np.array(((1,), (2,)), dtype=np.int64)
# Fails due to non-static predictions_idx shape.
with self.assertRaises(ValueError):
metric_ops.streaming_sparse_average_precision_at_top_k(predictions_top_k,
labels)
predictions_top_k = (2, 1)
# Fails since rank of predictions_idx is less than one.
with self.assertRaises(ValueError):
metric_ops.streaming_sparse_average_precision_at_top_k(predictions_top_k,
labels)
predictions_top_k = ((2,), (1,))
# Valid static shape.
metric_ops.streaming_sparse_average_precision_at_top_k(predictions_top_k,
labels)
def test_one_label_at_k1_nan(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 0, 1], [0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,1,2 have 0 predictions, classes -1 and 4 are out of range.
for class_id in (-1, 0, 1, 2, 4):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=1, expected=NAN, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, class_id=class_id)
def test_one_label_at_k1(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 0, 1], [0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=1, expected=1.0 / 2, class_id=3)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 2, class_id=3)
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=1, expected=1.0 / 2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 2)
def test_three_labels_at_k5_no_predictions(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 1,3,8 have 0 predictions, classes -1 and 10 are out of range.
for class_id in (-1, 1, 3, 8, 10):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, class_id=class_id)
def test_three_labels_at_k5_no_labels(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,4,6,9: 0 labels, >=1 prediction.
for class_id in (0, 4, 6, 9):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=0.0, class_id=class_id)
def test_three_labels_at_k5(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 2 labels, 2 correct predictions.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, 1 correct prediction.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=1.0 / 1, class_id=5)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, 1 incorrect prediction.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=7)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=0.0 / 1, class_id=7)
# All classes: 10 predictions, 3 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=3.0 / 10)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=3.0 / 10)
def test_three_labels_at_k5_some_out_of_range(self):
"""Tests that labels outside the [0, n_classes) range are ignored."""
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sp_labels = sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2],
[1, 3]],
# values -1 and 10 are outside the [0, n_classes) range and are ignored.
values=np.array([2, 7, -1, 8, 1, 2, 5, 10], np.int64),
dense_shape=[2, 4])
# Class 2: 2 labels, 2 correct predictions.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=2.0 / 2, class_id=2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, 1 correct prediction.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=1.0 / 1, class_id=5)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, 1 incorrect prediction.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=0.0 / 1, class_id=7)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=0.0 / 1, class_id=7)
# All classes: 10 predictions, 3 correct.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=3.0 / 10)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=3.0 / 10)
def test_3d_nan(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Classes 1,3,8 have 0 predictions, classes -1 and 10 are out of range.
for class_id in (-1, 1, 3, 8, 10):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, class_id=class_id)
def test_3d_no_labels(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Classes 0,4,6,9: 0 labels, >=1 prediction.
for class_id in (0, 4, 6, 9):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=0.0, class_id=class_id)
def test_3d(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 4 predictions, all correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=4.0 / 4, class_id=2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=4.0 / 4, class_id=2)
# Class 5: 2 predictions, both correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=5)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=2.0 / 2, class_id=5)
# Class 7: 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=1.0 / 2, class_id=7)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 2, class_id=7)
# All classes: 20 predictions, 7 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=7.0 / 20)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=7.0 / 20)
def test_3d_ignore_all(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
for class_id in xrange(10):
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, weights=[[0], [0]])
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, weights=[[0, 0], [0, 0]])
def test_3d_ignore_some(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 2 predictions, both correct.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
# Class 2: 2 predictions, both correct.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
# Class 7: 1 incorrect prediction.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
# Class 7: 1 correct prediction.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
# Class 7: no predictions.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=7,
weights=[[1, 0], [0, 1]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=NAN,
class_id=7,
weights=[[1, 0], [0, 1]])
# Class 7: 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=1.0 / 2.0,
class_id=7,
weights=[[0, 1], [1, 0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=1.0 / 2.0,
class_id=7,
weights=[[0, 1], [1, 0]])
def test_sparse_tensor_value(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
labels = [[0, 0, 0, 1], [0, 0, 1, 0]]
expected_precision = 0.5
with self.test_session():
_, precision = metrics.streaming_sparse_precision_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=_binary_2d_label_to_sparse_value(labels),
k=1)
variables.variables_initializer(variables.local_variables()).run()
self.assertEqual(expected_precision, precision.eval())
class StreamingSparseRecallTest(test.TestCase):
def _test_streaming_sparse_recall_at_k(self,
predictions,
labels,
k,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_recall_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=labels,
k=k,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def _test_sparse_recall_at_top_k(self,
labels,
top_k_predictions,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metric_ops.sparse_recall_at_top_k(
labels=labels,
top_k_predictions=constant_op.constant(top_k_predictions,
dtypes_lib.int32),
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
self.assertTrue(math.isnan(update.eval()))
self.assertTrue(math.isnan(metric.eval()))
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def test_one_label_at_k1_nan(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 0, 1], [0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
# Classes 0,1 have 0 labels, 0 predictions, classes -1 and 4 are out of
# range.
for labels in (sparse_labels, dense_labels):
for class_id in (-1, 0, 1, 4):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=NAN, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=class_id)
def test_one_label_at_k1_no_predictions(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 0, 1], [0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 0 predictions.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.0, class_id=2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0, class_id=2)
def test_one_label_at_k1(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 0, 1], [0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 1, class_id=3)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 1, class_id=3)
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2)
def test_one_label_at_k1_weighted(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 0, 1], [0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=NAN, class_id=3, weights=(0.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=3, weights=(0.0,))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0,))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(1.0,))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(2.0,))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(2.0,))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=NAN,
class_id=3,
weights=(0.0, 0.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=3,
weights=(0.0, 0.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=NAN,
class_id=3,
weights=(0.0, 1.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=3,
weights=(0.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 0.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 0.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 1.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=2.0 / 2,
class_id=3,
weights=(2.0, 3.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=2.0 / 2,
class_id=3,
weights=(2.0, 3.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=3.0 / 3,
class_id=3,
weights=(3.0, 2.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=3.0 / 3,
class_id=3,
weights=(3.0, 2.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=0.3 / 0.3,
class_id=3,
weights=(0.3, 0.6))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=0.3 / 0.3,
class_id=3,
weights=(0.3, 0.6))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=0.6 / 0.6,
class_id=3,
weights=(0.6, 0.3))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=0.6 / 0.6,
class_id=3,
weights=(0.6, 0.3))
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=NAN, weights=(0.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, weights=(0.0,))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(1.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, weights=(1.0,))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(2.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, weights=(2.0,))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 1, weights=(1.0, 0.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 1, weights=(1.0, 0.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.0 / 1, weights=(0.0, 1.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0 / 1, weights=(0.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(1.0, 1.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, weights=(1.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=2.0 / 5, weights=(2.0, 3.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=2.0 / 5, weights=(2.0, 3.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=3.0 / 5, weights=(3.0, 2.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=3.0 / 5, weights=(3.0, 2.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.3 / 0.9, weights=(0.3, 0.6))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.3 / 0.9, weights=(0.3, 0.6))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.6 / 0.9, weights=(0.6, 0.3))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.6 / 0.9, weights=(0.6, 0.3))
def test_three_labels_at_k5_nan(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,3,4,6,9 have 0 labels, class 10 is out of range.
for class_id in (0, 3, 4, 6, 9, 10):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=class_id)
def test_three_labels_at_k5_no_predictions(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 8: 1 label, no predictions.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=8)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0 / 1, class_id=8)
def test_three_labels_at_k5(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=1.0 / 1, class_id=5)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=7)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0 / 1, class_id=7)
# All classes: 6 labels, 3 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=3.0 / 6)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=3.0 / 6)
def test_three_labels_at_k5_some_out_of_range(self):
"""Tests that labels outside the [0, n_classes) count in denominator."""
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sp_labels = sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2],
[1, 3]],
# values -1 and 10 are outside the [0, n_classes) range.
values=np.array([2, 7, -1, 8, 1, 2, 5, 10], np.int64),
dense_shape=[2, 4])
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions=predictions,
labels=sp_labels,
k=5,
expected=2.0 / 2,
class_id=2)
self._test_sparse_recall_at_top_k(
sp_labels,
top_k_predictions,
expected=2.0 / 2,
class_id=2)
# Class 5: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions=predictions,
labels=sp_labels,
k=5,
expected=1.0 / 1,
class_id=5)
self._test_sparse_recall_at_top_k(
sp_labels,
top_k_predictions,
expected=1.0 / 1,
class_id=5)
# Class 7: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions=predictions,
labels=sp_labels,
k=5,
expected=0.0 / 1,
class_id=7)
self._test_sparse_recall_at_top_k(
sp_labels,
top_k_predictions,
expected=0.0 / 1,
class_id=7)
# All classes: 8 labels, 3 correct.
self._test_streaming_sparse_recall_at_k(
predictions=predictions, labels=sp_labels, k=5, expected=3.0 / 8)
self._test_sparse_recall_at_top_k(
sp_labels, top_k_predictions, expected=3.0 / 8)
def test_3d_nan(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
sparse_labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 1, 1, 0]]])
dense_labels = np.array(
[[[2, 7, 8], [1, 2, 5]], [
[1, 2, 5],
[2, 7, 8],
]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,3,4,6,9 have 0 labels, class 10 is out of range.
for class_id in (0, 3, 4, 6, 9, 10):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=class_id)
def test_3d_no_predictions(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
sparse_labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 1, 1, 0]]])
dense_labels = np.array(
[[[2, 7, 8], [1, 2, 5]], [
[1, 2, 5],
[2, 7, 8],
]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 1,8 have 0 predictions, >=1 label.
for class_id in (1, 8):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0, class_id=class_id)
def test_3d(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 4 labels, all correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=4.0 / 4, class_id=2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=4.0 / 4, class_id=2)
# Class 5: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=5)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=2.0 / 2, class_id=5)
# Class 7: 2 labels, 1 incorrect.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=1.0 / 2, class_id=7)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, class_id=7)
# All classes: 12 labels, 7 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=7.0 / 12)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=7.0 / 12)
def test_3d_ignore_all(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
for class_id in xrange(10):
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0], [0]])
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, weights=[[0], [0]])
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0, 0], [0, 0]])
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, weights=[[0, 0], [0, 0]])
def test_3d_ignore_some(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
# Class 7: 1 label, correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
# Class 7: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
# Class 7: 2 labels, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=1.0 / 2.0,
class_id=7,
weights=[[1, 0], [1, 0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 2.0,
class_id=7,
weights=[[1, 0], [1, 0]])
# Class 7: No labels.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=7,
weights=[[0, 1], [0, 1]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=7,
weights=[[0, 1], [0, 1]])
def test_sparse_tensor_value(self):
predictions = [[0.1, 0.3, 0.2, 0.4],
[0.1, 0.2, 0.3, 0.4]]
labels = [[0, 0, 1, 0],
[0, 0, 0, 1]]
expected_recall = 0.5
with self.test_session():
_, recall = metrics.streaming_sparse_recall_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=_binary_2d_label_to_sparse_value(labels),
k=1)
variables.variables_initializer(variables.local_variables()).run()
self.assertEqual(expected_recall, recall.eval())
class StreamingMeanAbsoluteErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_absolute_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self, ('mean_absolute_error/count:0', 'mean_absolute_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_absolute_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_absolute_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_mean_absolute_error(predictions,
labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateWithErrorAndWeights(self):
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
error, update_op = metrics.streaming_mean_absolute_error(predictions,
labels, weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(3, sess.run(update_op))
self.assertEqual(3, error.eval())
class StreamingMeanRelativeErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)))
_assert_metric_variables(
self, ('mean_relative_error/count:0', 'mean_relative_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
normalizer = random_ops.random_normal((10, 3), seed=3)
error, update_op = metrics.streaming_mean_relative_error(predictions,
labels, normalizer)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateNormalizedByLabels(self):
np_predictions = np.asarray([2, 4, 6, 8], dtype=np.float32)
np_labels = np.asarray([1, 3, 2, 3], dtype=np.float32)
expected_error = np.mean(
np.divide(np.absolute(np_predictions - np_labels), np_labels))
predictions = constant_op.constant(
np_predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(np_labels, shape=(1, 4))
error, update_op = metrics.streaming_mean_relative_error(
predictions, labels, normalizer=labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(expected_error, sess.run(update_op))
self.assertEqual(expected_error, error.eval())
def testSingleUpdateNormalizedByZeros(self):
np_predictions = np.asarray([2, 4, 6, 8], dtype=np.float32)
predictions = constant_op.constant(
np_predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_relative_error(
predictions, labels, normalizer=array_ops.zeros_like(labels))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0.0, sess.run(update_op))
self.assertEqual(0.0, error.eval())
class StreamingMeanSquaredErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_squared_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self, ('mean_squared_error/count:0', 'mean_squared_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_mean_squared_error(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
predictions = array_ops.zeros((1, 3), dtype=dtypes_lib.float32)
labels = array_ops.zeros((1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_squared_error(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithError(self):
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_squared_error(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(6, sess.run(update_op))
self.assertEqual(6, error.eval())
def testSingleUpdateWithErrorAndWeights(self):
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
error, update_op = metrics.streaming_mean_squared_error(predictions, labels,
weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(13, sess.run(update_op))
self.assertEqual(13, error.eval())
def testMultipleBatchesOfSizeOne(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue, [10, 8, 6])
_enqueue_vector(sess, preds_queue, [-4, 3, -1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue, [1, 3, 2])
_enqueue_vector(sess, labels_queue, [2, 4, 6])
labels = labels_queue.dequeue()
error, update_op = metrics.streaming_mean_squared_error(predictions,
labels)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(208.0 / 6, sess.run(update_op), 5)
self.assertAlmostEqual(208.0 / 6, error.eval(), 5)
def testMetricsComputedConcurrently(self):
with self.test_session() as sess:
# Create the queue that populates one set of predictions.
preds_queue0 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue0, [10, 8, 6])
_enqueue_vector(sess, preds_queue0, [-4, 3, -1])
predictions0 = preds_queue0.dequeue()
# Create the queue that populates one set of predictions.
preds_queue1 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue1, [0, 1, 1])
_enqueue_vector(sess, preds_queue1, [1, 1, 0])
predictions1 = preds_queue1.dequeue()
# Create the queue that populates one set of labels.
labels_queue0 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue0, [1, 3, 2])
_enqueue_vector(sess, labels_queue0, [2, 4, 6])
labels0 = labels_queue0.dequeue()
# Create the queue that populates another set of labels.
labels_queue1 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue1, [-5, -3, -1])
_enqueue_vector(sess, labels_queue1, [5, 4, 3])
labels1 = labels_queue1.dequeue()
mse0, update_op0 = metrics.streaming_mean_squared_error(
predictions0, labels0, name='msd0')
mse1, update_op1 = metrics.streaming_mean_squared_error(
predictions1, labels1, name='msd1')
sess.run(variables.local_variables_initializer())
sess.run([update_op0, update_op1])
sess.run([update_op0, update_op1])
mse0, mse1 = sess.run([mse0, mse1])
self.assertAlmostEqual(208.0 / 6, mse0, 5)
self.assertAlmostEqual(79.0 / 6, mse1, 5)
def testMultipleMetricsOnMultipleBatchesOfSizeOne(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue, [10, 8, 6])
_enqueue_vector(sess, preds_queue, [-4, 3, -1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue, [1, 3, 2])
_enqueue_vector(sess, labels_queue, [2, 4, 6])
labels = labels_queue.dequeue()
mae, ma_update_op = metrics.streaming_mean_absolute_error(predictions,
labels)
mse, ms_update_op = metrics.streaming_mean_squared_error(predictions,
labels)
sess.run(variables.local_variables_initializer())
sess.run([ma_update_op, ms_update_op])
sess.run([ma_update_op, ms_update_op])
self.assertAlmostEqual(32.0 / 6, mae.eval(), 5)
self.assertAlmostEqual(208.0 / 6, mse.eval(), 5)
class StreamingRootMeanSquaredErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_root_mean_squared_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_metric_variables(
self,
('root_mean_squared_error/count:0', 'root_mean_squared_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_root_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_root_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_root_mean_squared_error(predictions,
labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
with self.test_session() as sess:
predictions = constant_op.constant(
0.0, shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(0.0, shape=(1, 3), dtype=dtypes_lib.float32)
rmse, update_op = metrics.streaming_root_mean_squared_error(predictions,
labels)
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, rmse.eval())
def testSingleUpdateWithError(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
rmse, update_op = metrics.streaming_root_mean_squared_error(predictions,
labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(math.sqrt(6), update_op.eval(), 5)
self.assertAlmostEqual(math.sqrt(6), rmse.eval(), 5)
def testSingleUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
rmse, update_op = metrics.streaming_root_mean_squared_error(predictions,
labels,
weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(math.sqrt(13), sess.run(update_op))
self.assertAlmostEqual(math.sqrt(13), rmse.eval(), 5)
class StreamingCovarianceTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_covariance(
predictions=math_ops.to_float(math_ops.range(10)) + array_ops.ones(
[10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]))
_assert_metric_variables(self, (
'covariance/comoment:0',
'covariance/count:0',
'covariance/mean_label:0',
'covariance/mean_prediction:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
cov, _ = metrics.streaming_covariance(
predictions=math_ops.to_float(math_ops.range(10)) + array_ops.ones(
[10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [cov])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_covariance(
predictions=math_ops.to_float(math_ops.range(10)) + array_ops.ones(
[10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
labels = random_ops.random_normal((10, 3), seed=2)
predictions = labels * 0.5 + random_ops.random_normal((10, 3), seed=1) * 0.5
cov, update_op = metrics.streaming_covariance(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_cov = cov.eval()
for _ in range(10):
self.assertEqual(initial_cov, cov.eval())
def testSingleUpdateIdentical(self):
with self.test_session() as sess:
predictions = math_ops.to_float(math_ops.range(10))
labels = math_ops.to_float(math_ops.range(10))
cov, update_op = metrics.streaming_covariance(predictions, labels)
expected_cov = np.cov(np.arange(10), np.arange(10))[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_cov, sess.run(update_op), 5)
self.assertAlmostEqual(expected_cov, cov.eval(), 5)
def testSingleUpdateNonIdentical(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
cov, update_op = metrics.streaming_covariance(predictions, labels)
expected_cov = np.cov([2, 4, 6], [1, 3, 2])[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_cov, update_op.eval())
self.assertAlmostEqual(expected_cov, cov.eval())
def testSingleUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 7], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[0, 1, 3, 1], shape=(1, 4), dtype=dtypes_lib.float32)
cov, update_op = metrics.streaming_covariance(
predictions, labels, weights=weights)
expected_cov = np.cov([2, 4, 6, 8],
[1, 3, 2, 7],
fweights=[0, 1, 3, 1])[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_cov, sess.run(update_op))
self.assertAlmostEqual(expected_cov, cov.eval())
def testMultiUpdateWithErrorNoWeights(self):
with self.test_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
cov, update_op = metrics.streaming_covariance(predictions_t, labels_t)
sess.run(variables.local_variables_initializer())
prev_expected_cov = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)]
}
self.assertEqual(np.isnan(prev_expected_cov),
np.isnan(sess.run(cov, feed_dict=feed_dict)))
if not np.isnan(prev_expected_cov):
self.assertAlmostEqual(
prev_expected_cov, sess.run(cov, feed_dict=feed_dict), 5)
expected_cov = np.cov(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)])[0, 1]
self.assertAlmostEqual(
expected_cov, sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(
expected_cov, sess.run(cov, feed_dict=feed_dict), 5)
prev_expected_cov = expected_cov
def testMultiUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
weights = np.tile(np.arange(n // 10), n // 10)
np.random.shuffle(weights)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
weights_t = array_ops.placeholder(dtypes_lib.float32, [stride])
cov, update_op = metrics.streaming_covariance(
predictions_t, labels_t, weights=weights_t)
sess.run(variables.local_variables_initializer())
prev_expected_cov = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)],
weights_t: weights[stride * i:stride * (i + 1)]
}
self.assertEqual(np.isnan(prev_expected_cov),
np.isnan(sess.run(cov, feed_dict=feed_dict)))
if not np.isnan(prev_expected_cov):
self.assertAlmostEqual(
prev_expected_cov, sess.run(cov, feed_dict=feed_dict), 5)
expected_cov = np.cov(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)],
fweights=weights[:stride * (i + 1)])[0, 1]
self.assertAlmostEqual(
expected_cov, sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(
expected_cov, sess.run(cov, feed_dict=feed_dict), 5)
prev_expected_cov = expected_cov
class StreamingPearsonRTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_pearson_correlation(
predictions=math_ops.to_float(math_ops.range(10)) + array_ops.ones(
[10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]))
_assert_metric_variables(self, (
'pearson_r/covariance/comoment:0',
'pearson_r/covariance/count:0',
'pearson_r/covariance/mean_label:0',
'pearson_r/covariance/mean_prediction:0',
'pearson_r/variance_labels/count:0',
'pearson_r/variance_labels/comoment:0',
'pearson_r/variance_labels/mean_label:0',
'pearson_r/variance_labels/mean_prediction:0',
'pearson_r/variance_predictions/comoment:0',
'pearson_r/variance_predictions/count:0',
'pearson_r/variance_predictions/mean_label:0',
'pearson_r/variance_predictions/mean_prediction:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
pearson_r, _ = metrics.streaming_pearson_correlation(
predictions=math_ops.to_float(math_ops.range(10)) + array_ops.ones(
[10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [pearson_r])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_pearson_correlation(
predictions=math_ops.to_float(math_ops.range(10)) + array_ops.ones(
[10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
labels = random_ops.random_normal((10, 3), seed=2)
predictions = labels * 0.5 + random_ops.random_normal((10, 3), seed=1) * 0.5
pearson_r, update_op = metrics.streaming_pearson_correlation(predictions,
labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_r = pearson_r.eval()
for _ in range(10):
self.assertEqual(initial_r, pearson_r.eval())
def testSingleUpdateIdentical(self):
with self.test_session() as sess:
predictions = math_ops.to_float(math_ops.range(10))
labels = math_ops.to_float(math_ops.range(10))
pearson_r, update_op = metrics.streaming_pearson_correlation(predictions,
labels)
expected_r = np.corrcoef(np.arange(10), np.arange(10))[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_r, sess.run(update_op), 5)
self.assertAlmostEqual(expected_r, pearson_r.eval(), 5)
def testSingleUpdateNonIdentical(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
pearson_r, update_op = metrics.streaming_pearson_correlation(predictions,
labels)
expected_r = np.corrcoef([2, 4, 6], [1, 3, 2])[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_r, update_op.eval())
self.assertAlmostEqual(expected_r, pearson_r.eval())
def testSingleUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
predictions = np.array([2, 4, 6, 8])
labels = np.array([1, 3, 2, 7])
weights = np.array([0, 1, 3, 1])
predictions_t = constant_op.constant(
predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels_t = constant_op.constant(
labels, shape=(1, 4), dtype=dtypes_lib.float32)
weights_t = constant_op.constant(
weights, shape=(1, 4), dtype=dtypes_lib.float32)
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t, weights=weights_t)
cmat = np.cov(predictions, labels, fweights=weights)
expected_r = cmat[0, 1] / np.sqrt(cmat[0, 0] * cmat[1, 1])
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_r, sess.run(update_op))
self.assertAlmostEqual(expected_r, pearson_r.eval())
def testMultiUpdateWithErrorNoWeights(self):
with self.test_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t)
sess.run(variables.local_variables_initializer())
prev_expected_r = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)]
}
self.assertEqual(np.isnan(prev_expected_r),
np.isnan(sess.run(pearson_r, feed_dict=feed_dict)))
if not np.isnan(prev_expected_r):
self.assertAlmostEqual(
prev_expected_r, sess.run(pearson_r, feed_dict=feed_dict), 5)
expected_r = np.corrcoef(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)])[0, 1]
self.assertAlmostEqual(
expected_r, sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(
expected_r, sess.run(pearson_r, feed_dict=feed_dict), 5)
prev_expected_r = expected_r
def testMultiUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
weights = np.tile(np.arange(n // 10), n // 10)
np.random.shuffle(weights)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
weights_t = array_ops.placeholder(dtypes_lib.float32, [stride])
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t, weights=weights_t)
sess.run(variables.local_variables_initializer())
prev_expected_r = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)],
weights_t: weights[stride * i:stride * (i + 1)]
}
self.assertEqual(np.isnan(prev_expected_r),
np.isnan(sess.run(pearson_r, feed_dict=feed_dict)))
if not np.isnan(prev_expected_r):
self.assertAlmostEqual(
prev_expected_r, sess.run(pearson_r, feed_dict=feed_dict), 5)
cmat = np.cov(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)],
fweights=weights[:stride * (i + 1)])
expected_r = cmat[0, 1] / np.sqrt(cmat[0, 0] * cmat[1, 1])
self.assertAlmostEqual(
expected_r, sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(
expected_r, sess.run(pearson_r, feed_dict=feed_dict), 5)
prev_expected_r = expected_r
def testMultiUpdateWithErrorAndSingletonBatches(self):
with self.test_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
stride = 10
weights = (np.arange(n).reshape(n//stride, stride) % stride == 0)
for row in weights:
np.random.shuffle(row)
# Now, weights is one-hot by row - one item per batch has non-zero weight.
weights = weights.reshape((n,))
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
weights_t = array_ops.placeholder(dtypes_lib.float32, [stride])
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t, weights=weights_t)
sess.run(variables.local_variables_initializer())
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)],
weights_t: weights[stride * i:stride * (i + 1)]
}
cmat = np.cov(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)],
fweights=weights[:stride * (i + 1)])
expected_r = cmat[0, 1] / np.sqrt(cmat[0, 0] * cmat[1, 1])
actual_r = sess.run(update_op, feed_dict=feed_dict)
self.assertEqual(np.isnan(expected_r), np.isnan(actual_r))
self.assertEqual(np.isnan(expected_r),
np.isnan(sess.run(pearson_r, feed_dict=feed_dict)))
if not np.isnan(expected_r):
self.assertAlmostEqual(
expected_r, actual_r, 5)
self.assertAlmostEqual(
expected_r, sess.run(pearson_r, feed_dict=feed_dict), 5)
class StreamingMeanCosineDistanceTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1)
_assert_metric_variables(self, (
'mean_cosine_distance/count:0',
'mean_cosine_distance/total:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=1)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_labels, shape=(1, 3, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(1, 3, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithError1(self):
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 5)
self.assertAlmostEqual(1, error.eval(), 5)
def testSingleUpdateWithError2(self):
np_predictions = np.matrix(
('0.819031913261206 0.567041924552012 0.087465312324590;'
'-0.665139432070255 -0.739487441769973 -0.103671883216994;'
'0.707106781186548 -0.707106781186548 0'))
np_labels = np.matrix(
('0.819031913261206 0.567041924552012 0.087465312324590;'
'0.665139432070255 0.739487441769973 0.103671883216994;'
'0.707106781186548 0.707106781186548 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1.0, sess.run(update_op), 5)
self.assertAlmostEqual(1.0, error.eval(), 5)
def testSingleUpdateWithErrorAndWeights1(self):
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[1, 0, 0], shape=(3, 1, 1), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithErrorAndWeights2(self):
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[0, 1, 1], shape=(3, 1, 1), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1.5, update_op.eval())
self.assertEqual(1.5, error.eval())
class PcntBelowThreshTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_percentage_less(values=array_ops.ones((10,)), threshold=2)
_assert_metric_variables(self, (
'percentage_below_threshold/count:0',
'percentage_below_threshold/total:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_percentage_less(
values=array_ops.ones((10,)),
threshold=2,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_percentage_less(
values=array_ops.ones((10,)),
threshold=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testOneUpdate(self):
with self.test_session() as sess:
values = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
pcnt0, update_op0 = metrics.streaming_percentage_less(
values, 100, name='high')
pcnt1, update_op1 = metrics.streaming_percentage_less(
values, 7, name='medium')
pcnt2, update_op2 = metrics.streaming_percentage_less(
values, 1, name='low')
sess.run(variables.local_variables_initializer())
sess.run([update_op0, update_op1, update_op2])
pcnt0, pcnt1, pcnt2 = sess.run([pcnt0, pcnt1, pcnt2])
self.assertAlmostEqual(1.0, pcnt0, 5)
self.assertAlmostEqual(0.75, pcnt1, 5)
self.assertAlmostEqual(0.0, pcnt2, 5)
def testSomePresentOneUpdate(self):
with self.test_session() as sess:
values = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[1, 0, 0, 1], shape=(1, 4), dtype=dtypes_lib.float32)
pcnt0, update_op0 = metrics.streaming_percentage_less(
values, 100, weights=weights, name='high')
pcnt1, update_op1 = metrics.streaming_percentage_less(
values, 7, weights=weights, name='medium')
pcnt2, update_op2 = metrics.streaming_percentage_less(
values, 1, weights=weights, name='low')
sess.run(variables.local_variables_initializer())
self.assertListEqual([1.0, 0.5, 0.0],
sess.run([update_op0, update_op1, update_op2]))
pcnt0, pcnt1, pcnt2 = sess.run([pcnt0, pcnt1, pcnt2])
self.assertAlmostEqual(1.0, pcnt0, 5)
self.assertAlmostEqual(0.5, pcnt1, 5)
self.assertAlmostEqual(0.0, pcnt2, 5)
class StreamingMeanIOUTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2)
_assert_metric_variables(self, ('mean_iou/total_confusion_matrix:0',))
def testMetricsCollections(self):
my_collection_name = '__metrics__'
mean_iou, _ = metrics.streaming_mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean_iou])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testPredictionsAndLabelsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones([10, 3])
labels = array_ops.ones([10, 4])
with self.assertRaises(ValueError):
metrics.streaming_mean_iou(predictions, labels, num_classes=2)
def testLabelsAndWeightsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones([10])
labels = array_ops.ones([10])
weights = array_ops.zeros([9])
with self.assertRaises(ValueError):
metrics.streaming_mean_iou(
predictions, labels, num_classes=2, weights=weights)
def testValueTensorIsIdempotent(self):
num_classes = 3
predictions = random_ops.random_uniform(
[10], maxval=num_classes, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
[10], maxval=num_classes, dtype=dtypes_lib.int64, seed=2)
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes=num_classes)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_miou = miou.eval()
for _ in range(10):
self.assertEqual(initial_miou, miou.eval())
def testMultipleUpdates(self):
num_classes = 3
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
for _ in range(5):
sess.run(update_op)
desired_output = np.mean([1.0 / 2.0, 1.0 / 4.0, 0.])
self.assertEqual(desired_output, miou.eval())
def testMultipleUpdatesWithWeights(self):
num_classes = 2
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [0.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [0.0])
weights = weights_queue.dequeue()
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes, weights=weights)
sess.run(variables.local_variables_initializer())
for _ in range(6):
sess.run(update_op)
desired_output = np.mean([2.0 / 3.0, 1.0 / 2.0])
self.assertAlmostEqual(desired_output, miou.eval())
def testMultipleUpdatesWithMissingClass(self):
# Test the case where there are no predicions and labels for
# one class, and thus there is one row and one column with
# zero entries in the confusion matrix.
num_classes = 3
with self.test_session() as sess:
# Create the queue that populates the predictions.
# There is no prediction for class 2.
preds_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
# There is label for class 2.
labels_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
for _ in range(5):
sess.run(update_op)
desired_output = np.mean([1.0 / 3.0, 2.0 / 4.0])
self.assertAlmostEqual(desired_output, miou.eval())
def testUpdateOpEvalIsAccumulatedConfusionMatrix(self):
predictions = array_ops.concat(
[
constant_op.constant(
0, shape=[5]), constant_op.constant(
1, shape=[5])
],
0)
labels = array_ops.concat(
[
constant_op.constant(
0, shape=[3]), constant_op.constant(
1, shape=[7])
],
0)
num_classes = 2
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
confusion_matrix = update_op.eval()
self.assertAllEqual([[3, 0], [2, 5]], confusion_matrix)
desired_miou = np.mean([3. / 5., 5. / 7.])
self.assertAlmostEqual(desired_miou, miou.eval())
def testAllCorrect(self):
predictions = array_ops.zeros([40])
labels = array_ops.zeros([40])
num_classes = 1
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
self.assertEqual(40, update_op.eval()[0])
self.assertEqual(1.0, miou.eval())
def testAllWrong(self):
predictions = array_ops.zeros([40])
labels = array_ops.ones([40])
num_classes = 2
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[0, 0], [40, 0]], update_op.eval())
self.assertEqual(0., miou.eval())
def testResultsWithSomeMissing(self):
predictions = array_ops.concat(
[
constant_op.constant(
0, shape=[5]), constant_op.constant(
1, shape=[5])
],
0)
labels = array_ops.concat(
[
constant_op.constant(
0, shape=[3]), constant_op.constant(
1, shape=[7])
],
0)
num_classes = 2
weights = array_ops.concat(
[
constant_op.constant(
0, shape=[1]), constant_op.constant(
1, shape=[8]), constant_op.constant(
0, shape=[1])
],
0)
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[2, 0], [2, 4]], update_op.eval())
desired_miou = np.mean([2. / 4., 4. / 6.])
self.assertAlmostEqual(desired_miou, miou.eval())
def testMissingClassInLabels(self):
labels = constant_op.constant([
[[0, 0, 1, 1, 0, 0],
[1, 0, 0, 0, 0, 1]],
[[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0]]])
predictions = constant_op.constant([
[[0, 0, 2, 1, 1, 0],
[0, 1, 2, 2, 0, 1]],
[[0, 0, 2, 1, 1, 1],
[1, 1, 2, 0, 0, 0]]])
num_classes = 3
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[7, 4, 3], [3, 5, 2], [0, 0, 0]], update_op.eval())
self.assertAlmostEqual(
1 / 3 * (7 / (7 + 3 + 7) + 5 / (5 + 4 + 5) + 0 / (0 + 5 + 0)),
miou.eval())
def testMissingClassOverallSmall(self):
labels = constant_op.constant([0])
predictions = constant_op.constant([0])
num_classes = 2
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[1, 0], [0, 0]], update_op.eval())
self.assertAlmostEqual(1, miou.eval())
def testMissingClassOverallLarge(self):
labels = constant_op.constant([
[[0, 0, 1, 1, 0, 0],
[1, 0, 0, 0, 0, 1]],
[[1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0]]])
predictions = constant_op.constant([
[[0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 1, 1]],
[[0, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0]]])
num_classes = 3
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[9, 5, 0], [3, 7, 0], [0, 0, 0]], update_op.eval())
self.assertAlmostEqual(
1 / 2 * (9 / (9 + 3 + 5) + 7 / (7 + 5 + 3)), miou.eval())
class StreamingConcatTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_concat(values=array_ops.ones((10,)))
_assert_metric_variables(self, (
'streaming_concat/array:0',
'streaming_concat/size:0',
))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
value, _ = metrics.streaming_concat(
values=array_ops.ones((10,)), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [value])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_concat(
values=array_ops.ones((10,)), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testNextArraySize(self):
next_array_size = metric_ops._next_array_size # pylint: disable=protected-access
with self.test_session():
self.assertEqual(next_array_size(2, growth_factor=2).eval(), 2)
self.assertEqual(next_array_size(3, growth_factor=2).eval(), 4)
self.assertEqual(next_array_size(4, growth_factor=2).eval(), 4)
self.assertEqual(next_array_size(5, growth_factor=2).eval(), 8)
self.assertEqual(next_array_size(6, growth_factor=2).eval(), 8)
def testStreamingConcat(self):
with self.test_session() as sess:
values = array_ops.placeholder(dtypes_lib.int32, [None])
concatenated, update_op = metrics.streaming_concat(values)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([], concatenated.eval())
sess.run([update_op], feed_dict={values: [0, 1, 2]})
self.assertAllEqual([0, 1, 2], concatenated.eval())
sess.run([update_op], feed_dict={values: [3, 4]})
self.assertAllEqual([0, 1, 2, 3, 4], concatenated.eval())
sess.run([update_op], feed_dict={values: [5, 6, 7, 8, 9]})
self.assertAllEqual(np.arange(10), concatenated.eval())
def testStreamingConcatStringValues(self):
with self.test_session() as sess:
values = array_ops.placeholder(dtypes_lib.string, [None])
concatenated, update_op = metrics.streaming_concat(values)
sess.run(variables.local_variables_initializer())
self.assertItemsEqual([], concatenated.eval())
sess.run([update_op], feed_dict={values: ['a', 'b', 'c']})
self.assertItemsEqual([b'a', b'b', b'c'], concatenated.eval())
sess.run([update_op], feed_dict={values: ['d', 'e']})
self.assertItemsEqual([b'a', b'b', b'c', b'd', b'e'], concatenated.eval())
sess.run([update_op], feed_dict={values: ['f', 'g', 'h', 'i', 'j']})
self.assertItemsEqual(
[b'a', b'b', b'c', b'd', b'e', b'f', b'g', b'h', b'i', b'j'],
concatenated.eval())
def testStreamingConcatMaxSize(self):
with self.test_session() as sess:
values = math_ops.range(3)
concatenated, update_op = metrics.streaming_concat(values, max_size=5)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([], concatenated.eval())
sess.run([update_op])
self.assertAllEqual([0, 1, 2], concatenated.eval())
sess.run([update_op])
self.assertAllEqual([0, 1, 2, 0, 1], concatenated.eval())
sess.run([update_op])
self.assertAllEqual([0, 1, 2, 0, 1], concatenated.eval())
def testStreamingConcat2D(self):
with self.test_session() as sess:
values = array_ops.reshape(math_ops.range(3), (3, 1))
concatenated, update_op = metrics.streaming_concat(values, axis=-1)
sess.run(variables.local_variables_initializer())
for _ in range(10):
sess.run([update_op])
self.assertAllEqual([[0] * 10, [1] * 10, [2] * 10], concatenated.eval())
def testStreamingConcatErrors(self):
with self.assertRaises(ValueError):
metrics.streaming_concat(array_ops.placeholder(dtypes_lib.float32))
values = array_ops.zeros((2, 3))
with self.assertRaises(ValueError):
metrics.streaming_concat(values, axis=-3, max_size=3)
with self.assertRaises(ValueError):
metrics.streaming_concat(values, axis=2, max_size=3)
with self.assertRaises(ValueError):
metrics.streaming_concat(
array_ops.placeholder(dtypes_lib.float32, [None, None]))
def testStreamingConcatReset(self):
with self.test_session() as sess:
values = array_ops.placeholder(dtypes_lib.int32, [None])
concatenated, update_op = metrics.streaming_concat(values)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([], concatenated.eval())
sess.run([update_op], feed_dict={values: [0, 1, 2]})
self.assertAllEqual([0, 1, 2], concatenated.eval())
sess.run(variables.local_variables_initializer())
sess.run([update_op], feed_dict={values: [3, 4]})
self.assertAllEqual([3, 4], concatenated.eval())
class AggregateMetricsTest(test.TestCase):
def testAggregateNoMetricsRaisesValueError(self):
with self.assertRaises(ValueError):
metrics.aggregate_metrics()
def testAggregateSingleMetricReturnsOneItemLists(self):
values = array_ops.ones((10, 4))
value_tensors, update_ops = metrics.aggregate_metrics(
metrics.streaming_mean(values))
self.assertEqual(len(value_tensors), 1)
self.assertEqual(len(update_ops), 1)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, update_ops[0].eval())
self.assertEqual(1, value_tensors[0].eval())
def testAggregateMultipleMetricsReturnsListsInOrder(self):
predictions = array_ops.ones((10, 4))
labels = array_ops.ones((10, 4)) * 3
value_tensors, update_ops = metrics.aggregate_metrics(
metrics.streaming_mean_absolute_error(predictions, labels),
metrics.streaming_mean_squared_error(predictions, labels))
self.assertEqual(len(value_tensors), 2)
self.assertEqual(len(update_ops), 2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(2, update_ops[0].eval())
self.assertEqual(4, update_ops[1].eval())
self.assertEqual(2, value_tensors[0].eval())
self.assertEqual(4, value_tensors[1].eval())
class AggregateMetricMapTest(test.TestCase):
def testAggregateMultipleMetricsReturnsListsInOrder(self):
predictions = array_ops.ones((10, 4))
labels = array_ops.ones((10, 4)) * 3
names_to_values, names_to_updates = metrics.aggregate_metric_map({
'm1': metrics.streaming_mean_absolute_error(predictions, labels),
'm2': metrics.streaming_mean_squared_error(predictions, labels),
})
self.assertEqual(2, len(names_to_values))
self.assertEqual(2, len(names_to_updates))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(2, names_to_updates['m1'].eval())
self.assertEqual(4, names_to_updates['m2'].eval())
self.assertEqual(2, names_to_values['m1'].eval())
self.assertEqual(4, names_to_values['m2'].eval())
class CountTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.count(array_ops.ones([4, 3]))
_assert_metric_variables(self, ['count/count:0'])
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.count(
array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.count(
array_ops.ones([4, 3]), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testBasic(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
result, update_op = metrics.count(values)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAlmostEqual(8.0, sess.run(result), 5)
def testUpdateOpsReturnsCurrentValue(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
result, update_op = metrics.count(values)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(2.0, sess.run(update_op), 5)
self.assertAlmostEqual(4.0, sess.run(update_op), 5)
self.assertAlmostEqual(6.0, sess.run(update_op), 5)
self.assertAlmostEqual(8.0, sess.run(update_op), 5)
self.assertAlmostEqual(8.0, sess.run(result), 5)
def test1dWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [0.5])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [1.2])
weights = weights_queue.dequeue()
result, update_op = metrics.count(values, weights)
variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual(3.4, result.eval(), 5)
def test1dWeightedValues_placeholders(self):
with self.test_session() as sess:
# Create the queue that populates the values.
feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1,))
_enqueue_vector(sess, weights_queue, 0.5, shape=(1,))
_enqueue_vector(sess, weights_queue, 0, shape=(1,))
_enqueue_vector(sess, weights_queue, 0, shape=(1,))
_enqueue_vector(sess, weights_queue, 1.2, shape=(1,))
weights = weights_queue.dequeue()
result, update_op = metrics.count(values, weights)
variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual(3.4, result.eval(), 5)
def test2dWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1.1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
result, update_op = metrics.count(values, weights)
variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual(4.1, result.eval(), 5)
def test2dWeightedValues_placeholders(self):
with self.test_session() as sess:
# Create the queue that populates the values.
feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(2,))
_enqueue_vector(sess, weights_queue, [1.1, 1], shape=(2,))
_enqueue_vector(sess, weights_queue, [1, 0], shape=(2,))
_enqueue_vector(sess, weights_queue, [0, 1], shape=(2,))
_enqueue_vector(sess, weights_queue, [0, 0], shape=(2,))
weights = weights_queue.dequeue()
result, update_op = metrics.count(values, weights)
variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual(4.1, result.eval(), 5)
class CohenKappaTest(test.TestCase):
def _confusion_matrix_to_samples(self, confusion_matrix):
x, y = confusion_matrix.shape
pairs = []
for label in range(x):
for feature in range(y):
pairs += [label, feature] * confusion_matrix[label, feature]
pairs = np.array(pairs).reshape((-1, 2))
return pairs[:, 0], pairs[:, 1]
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.cohen_kappa(
predictions_idx=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
num_classes=2)
_assert_metric_variables(self, (
'cohen_kappa/po:0',
'cohen_kappa/pe_row:0',
'cohen_kappa/pe_col:0',))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
kappa, _ = metrics.cohen_kappa(
predictions_idx=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
num_classes=2,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [kappa])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.cohen_kappa(
predictions_idx=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
num_classes=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 1), maxval=3, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 1), maxval=3, dtype=dtypes_lib.int64, seed=2)
kappa, update_op = metrics.cohen_kappa(labels, predictions, 3)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_kappa = kappa.eval()
for _ in range(10):
self.assertAlmostEqual(initial_kappa, kappa.eval(), 5)
def testBasic(self):
confusion_matrix = np.array([
[9, 3, 1],
[4, 8, 2],
[2, 1, 6]])
# overall total = 36
# po = [9, 8, 6], sum(po) = 23
# pe_row = [15, 12, 9], pe_col = [13, 14, 9], so pe = [5.42, 4.67, 2.25]
# finally, kappa = (sum(po) - sum(pe)) / (N - sum(pe))
# = (23 - 12.34) / (36 - 12.34)
# = 0.45
# see: http://psych.unl.edu/psycrs/handcomp/hckappa.PDF
expect = 0.45
labels, predictions = self._confusion_matrix_to_samples(confusion_matrix)
dtypes = [dtypes_lib.int16, dtypes_lib.int32, dtypes_lib.int64]
shapes = [(len(labels,)), # 1-dim
(len(labels), 1)] # 2-dim
weights = [None, np.ones_like(labels)]
for dtype in dtypes:
for shape in shapes:
for weight in weights:
with self.test_session() as sess:
predictions_tensor = constant_op.constant(
np.reshape(predictions, shape), dtype=dtype)
labels_tensor = constant_op.constant(
np.reshape(labels, shape), dtype=dtype)
kappa, update_op = metrics.cohen_kappa(
labels_tensor, predictions_tensor, 3, weights=weight)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expect, sess.run(update_op), 2)
self.assertAlmostEqual(expect, kappa.eval(), 2)
def testAllCorrect(self):
inputs = np.arange(0, 100) % 4
# confusion matrix
# [[25, 0, 0],
# [0, 25, 0],
# [0, 0, 25]]
# Calculated by v0.19: sklearn.metrics.cohen_kappa_score(inputs, inputs)
expect = 1.0
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
kappa, update_op = metrics.cohen_kappa(labels, predictions, 4)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expect, sess.run(update_op), 5)
self.assertAlmostEqual(expect, kappa.eval(), 5)
def testAllIncorrect(self):
labels = np.arange(0, 100) % 4
predictions = (labels + 1) % 4
# confusion matrix
# [[0, 25, 0],
# [0, 0, 25],
# [25, 0, 0]]
# Calculated by v0.19: sklearn.metrics.cohen_kappa_score(labels, predictions)
expect = -0.333333333333
with self.test_session() as sess:
predictions = constant_op.constant(predictions, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels)
kappa, update_op = metrics.cohen_kappa(labels, predictions, 4)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expect, sess.run(update_op), 5)
self.assertAlmostEqual(expect, kappa.eval(), 5)
def testWeighted(self):
confusion_matrix = np.array([
[9, 3, 1],
[4, 8, 2],
[2, 1, 6]])
labels, predictions = self._confusion_matrix_to_samples(confusion_matrix)
num_samples = np.sum(confusion_matrix, dtype=np.int32)
weights = (np.arange(0, num_samples) % 5) / 5.0
# Calculated by v0.19: sklearn.metrics.cohen_kappa_score(
# labels, predictions, sample_weight=weights)
expect = 0.453466583385
with self.test_session() as sess:
predictions = constant_op.constant(predictions, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels)
kappa, update_op = metrics.cohen_kappa(labels, predictions, 4,
weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expect, sess.run(update_op), 5)
self.assertAlmostEqual(expect, kappa.eval(), 5)
def testWithMultipleUpdates(self):
confusion_matrix = np.array([
[90, 30, 10, 20],
[40, 80, 20, 30],
[20, 10, 60, 35],
[15, 25, 30, 25]])
labels, predictions = self._confusion_matrix_to_samples(confusion_matrix)
num_samples = np.sum(confusion_matrix, dtype=np.int32)
weights = (np.arange(0, num_samples) % 5) / 5.0
num_classes = confusion_matrix.shape[0]
batch_size = num_samples // 10
predictions_t = array_ops.placeholder(dtypes_lib.float32,
shape=(batch_size,))
labels_t = array_ops.placeholder(dtypes_lib.int32,
shape=(batch_size,))
weights_t = array_ops.placeholder(dtypes_lib.float32,
shape=(batch_size,))
kappa, update_op = metrics.cohen_kappa(
labels_t, predictions_t, num_classes, weights=weights_t)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
for idx in range(0, num_samples, batch_size):
batch_start, batch_end = idx, idx + batch_size
sess.run(update_op,
feed_dict={labels_t: labels[batch_start:batch_end],
predictions_t: predictions[batch_start:batch_end],
weights_t: weights[batch_start:batch_end]})
# Calculated by v0.19: sklearn.metrics.cohen_kappa_score(
# labels_np, predictions_np, sample_weight=weights_np)
expect = 0.289965397924
self.assertAlmostEqual(expect, kappa.eval(), 5)
def testInvalidNumClasses(self):
predictions = array_ops.placeholder(dtypes_lib.float32, shape=(4, 1))
labels = array_ops.placeholder(dtypes_lib.int32, shape=(4, 1))
with self.assertRaisesRegexp(ValueError, 'num_classes'):
metrics.cohen_kappa(labels, predictions, 1)
def testInvalidDimension(self):
predictions = array_ops.placeholder(dtypes_lib.float32, shape=(4, 1))
invalid_labels = array_ops.placeholder(dtypes_lib.int32, shape=(4, 2))
with self.assertRaises(ValueError):
metrics.cohen_kappa(invalid_labels, predictions, 3)
invalid_predictions = array_ops.placeholder(dtypes_lib.float32, shape=(4, 2))
labels = array_ops.placeholder(dtypes_lib.int32, shape=(4, 1))
with self.assertRaises(ValueError):
metrics.cohen_kappa(labels, invalid_predictions, 3)
if __name__ == '__main__':
test.main()
|
apache-2.0
|
perrette/pyglacier
|
setup.py
|
1
|
2781
|
#!/usr/bin/env python
"""
"""
#from distutils.core import setup
import os, sys, re
from distutils.core import setup
import warnings
with open('README.md') as file:
long_description = file.read()
#
# Track version after pandas' setup.py
#
MAJOR = 0
MINOR = 0
MICRO = 0
ISRELEASED = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
QUALIFIER = ''
FULLVERSION = VERSION
write_version = True
if not ISRELEASED:
import subprocess
FULLVERSION += '.dev'
pipe = None
for cmd in ['git','git.cmd']:
try:
pipe = subprocess.Popen([cmd, "describe", "--always", "--match", "v[0-9]*"],
stdout=subprocess.PIPE)
(so,serr) = pipe.communicate()
if pipe.returncode == 0:
break
except:
pass
if pipe is None or pipe.returncode != 0:
# no git, or not in git dir
if os.path.exists('pyglacier/version.py'):
warnings.warn("WARNING: Couldn't get git revision, using existing pyglacier/version.py")
write_version = False
else:
warnings.warn("WARNING: Couldn't get git revision, using generic version string")
else:
# have git, in git dir, but may have used a shallow clone (travis does this)
rev = so.strip()
# makes distutils blow up on Python 2.7
if sys.version_info[0] >= 3:
rev = rev.decode('ascii')
if not rev.startswith('v') and re.match("[a-zA-Z0-9]{7,9}",rev):
# partial clone, manually construct version string
# this is the format before we started using git-describe
# to get an ordering on dev version strings.
rev ="v%s.dev-%s" % (VERSION, rev)
# Strip leading v from tags format "vx.y.z" to get th version string
FULLVERSION = rev.lstrip('v')
else:
FULLVERSION += QUALIFIER
def write_version_py(filename=None):
cnt = """\
version = '%s'
short_version = '%s'
"""
if not filename:
#filename = os.path.join(
# os.path.dirname(__file__), 'dimarray', 'version.py')
filename = os.path.join('pyglacier', 'version.py')
with open(filename, 'w') as a:
a.write(cnt % (FULLVERSION, VERSION))
# Write version.py to dimarray
if write_version:
write_version_py()
#
# Actually important part
#
setup(name='pyglacier',
version=FULLVERSION,
author='Mahe Perrette',
author_email='mahe.perrette@pik-potsdam.de',
description='Wrapper and helper methods to run the fortran glacier model',
keywords=('fortran','glacier','wrapper'),
# basic stuff here
packages = ['pyglacier'],
scripts = ['scripts/view_glacier', 'scripts/run_glacier'],
long_description=long_description,
license = "MIT",
)
|
mit
|
alekz112/statsmodels
|
statsmodels/datasets/spector/data.py
|
25
|
2000
|
"""Spector and Mazzeo (1980) - Program Effectiveness Data"""
__docformat__ = 'restructuredtext'
COPYRIGHT = """Used with express permission of the original author, who
retains all rights. """
TITLE = __doc__
SOURCE = """
http://pages.stern.nyu.edu/~wgreene/Text/econometricanalysis.htm
The raw data was downloaded from Bill Greene's Econometric Analysis web site,
though permission was obtained from the original researcher, Dr. Lee Spector,
Professor of Economics, Ball State University."""
DESCRSHORT = """Experimental data on the effectiveness of the personalized
system of instruction (PSI) program"""
DESCRLONG = DESCRSHORT
NOTE = """::
Number of Observations - 32
Number of Variables - 4
Variable name definitions::
Grade - binary variable indicating whether or not a student's grade
improved. 1 indicates an improvement.
TUCE - Test score on economics test
PSI - participation in program
GPA - Student's grade point average
"""
import numpy as np
from statsmodels.datasets import utils as du
from os.path import dirname, abspath
def load():
"""
Load the Spector dataset and returns a Dataset class instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray(data, endog_idx=3, dtype=float)
def load_pandas():
"""
Load the Spector dataset and returns a Dataset class instance.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_recarray_pandas(data, endog_idx=3, dtype=float)
def _get_data():
filepath = dirname(abspath(__file__))
##### EDIT THE FOLLOWING TO POINT TO DatasetName.csv #####
data = np.recfromtxt(open(filepath + '/spector.csv',"rb"), delimiter=" ",
names=True, dtype=float, usecols=(1,2,3,4))
return data
|
bsd-3-clause
|
annahs/atmos_research
|
AL_1h_timeseries_HG.py
|
1
|
5362
|
import sys
import os
import numpy as np
from pprint import pprint
from datetime import datetime
from datetime import timedelta
import mysql.connector
import math
import calendar
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib import dates
SP2_ID = 44
saved_1_of_every = 8
start = datetime(2013,7,15)
end = datetime(2013,8,5)
timestep = 1 #hours
sample_min = 117
sample_max = 123
yag_min = 1.5
yag_max = 7
pkht_max = 50000
UNIX_start = calendar.timegm(start.utctimetuple())
UNIX_end = calendar.timegm(end.utctimetuple())
#database connection
cnx = mysql.connector.connect(user='root', password='Suresh15', host='localhost', database='black_carbon')
cursor = cnx.cursor()
file_data = []
while start <= end:
print start
UNIX_start = calendar.timegm(start.utctimetuple())
UNIX_end = UNIX_start + timestep*3600
start_t = datetime.now()
cursor.execute('''(SELECT
mn.UNIX_UTC_ts_int_start,
mn.UNIX_UTC_ts_int_end,
mn.BB_incand_HG,
hk.sample_flow
FROM alert_mass_number_data_2013 mn
FORCE INDEX (time_binning)
JOIN alert_hk_data hk on mn.hk_id = hk.id
WHERE
mn.UNIX_UTC_ts_int_start >= %s
AND mn.UNIX_UTC_ts_int_end < %s
AND mn.BB_incand_HG < %s
AND hk.sample_flow >= %s
AND hk.sample_flow < %s
AND hk.yag_power >= %s
AND hk.yag_power < %s)''',
(UNIX_start,UNIX_end,pkht_max,sample_min,sample_max,yag_min,yag_max))
masses={
'rBC_mass_fg_HG_low':[],
'rBC_mass_fg_HG_high':[],
}
mass_uncertainties={
'rBC_mass_fg_HG_low':[],
'rBC_mass_fg_HG_high':[],
}
ind_data = cursor.fetchall()
ind_mass_tot = 0
ind_mass_uncer_tot = 0
ind_number_tot = 0
total_sample_vol = 0
for row in ind_data:
ind_start_time = row[0]
ind_end_time = row[1]
bbhg_incand_pk_amp = row[2]
sample_flow = row[3] #in vccm
if sample_flow == None:
print 'no flow'
continue
sample_vol = (sample_flow*(ind_end_time-ind_start_time)/60) #/60 b/c sccm and time in secs 0.87 = STP corr?????
total_sample_vol = total_sample_vol + sample_vol
#calculate masses and uncertainties
#HG
bbhg_mass_uncorr = 0.18821 + 1.36864E-4*bbhg_incand_pk_amp + 5.82331E-10*bbhg_incand_pk_amp*bbhg_incand_pk_amp #SP244
bbhg_mass_uncertainty_uncorr = 0.05827 + 7.26841E-6*bbhg_incand_pk_amp + 1.43898E-10*bbhg_incand_pk_amp*bbhg_incand_pk_amp #SP244
bbhg_mass_corr = bbhg_mass_uncorr/0.7 #AD correction factor is 0.7 +- 0.05
bbhg_mass_only_rel_err = bbhg_mass_uncertainty_uncorr/bbhg_mass_uncorr
bbhg_ADcorr_rel_err = (0.05/0.7)
bbhg_mass_abs_uncertainty_corr = (bbhg_ADcorr_rel_err + bbhg_mass_only_rel_err) * bbhg_mass_corr
masses['rBC_mass_fg_HG_low'].append(bbhg_mass_corr)
mass_uncertainties['rBC_mass_fg_HG_low'].append(bbhg_mass_abs_uncertainty_corr)
loop_end = datetime.now()
print 'loop end', (loop_end-start_t)
tot_rBC_mass_fg_HG_low = sum(masses['rBC_mass_fg_HG_low'])
tot_rBC_mass_uncer_HG_low = sum(mass_uncertainties['rBC_mass_fg_HG_low'])
rBC_number_HG_low = len(masses['rBC_mass_fg_HG_low'])
ind_mass_tot = ind_mass_tot + tot_rBC_mass_fg_HG_low
ind_mass_uncer_tot = ind_mass_uncer_tot + tot_rBC_mass_uncer_HG_low
ind_number_tot = ind_number_tot + rBC_number_HG_low
if total_sample_vol == 0:
file_data.append([start,start + timedelta(hours = timestep), np.nan, np.nan, np.nan,total_sample_vol])
else:
file_data.append([start,start + timedelta(hours = timestep), ind_mass_tot*saved_1_of_every/total_sample_vol,ind_mass_uncer_tot*saved_1_of_every/total_sample_vol,ind_number_tot*saved_1_of_every/total_sample_vol,total_sample_vol])
next_hour = start + timedelta(hours = timestep)
#if this is the last hour of the day write to file
if next_hour.day != start.day:
if start.month <10:
month_prefix = '0'
else:
month_prefix = ''
if start.day < 10:
file_name = str(start.year) + month_prefix + str(start.month) + '0' + str(start.day) + ' - hourly mass and number concentration'
else:
file_name = str(start.year) + month_prefix + str(start.month) + str(start.day) + ' - hourly mass and number concentration'
file = open('C:/Users/Sarah Hanna/Documents/Data/Alert Data/Alert 1h mass and number concentrations/2011-2013 - mass concentrations/' + file_name +'.txt', 'w')
file.write('mass and number concentration for SP2#' + str(SP2_ID)+ ' at Alert \n')
file.write('all concentrations have been corrected for sampling filter set at 1 of Every ' + str(saved_1_of_every) + ' particles saved \n')
file.write('interval_start(UTC) \t interval_end(UTC) \t rBC_mass_concentration(ng/m3) \t rBC_mass_concentration_uncertainty(ng/m3) \t rBC_number_concentration(#/cm3) \t sampling_volume(cc) \n')
for row in file_data:
line = '\t'.join(str(x) for x in row)
file.write(line + '\n')
file.close()
file_data = []
start += timedelta(hours = timestep)
cnx.close()
#ind_dates = [row[0] for row in file_data]
#ind_mass_tots = [row[2] for row in file_data]
#ind_mass_uncer_tots = [row[3] for row in file_data]
#ind_number_tots = [row[4] for row in file_data]
#
#
#hfmt = dates.DateFormatter('%Y%m%d %H:%M')
#
#fig = plt.figure()
#ax1 = fig.add_subplot(211)
#ax1.xaxis.set_major_formatter(hfmt)
#ax1.plot(ind_dates,ind_number_tots,'-ro')
#
#
#ax2 = fig.add_subplot(212)
#ax2.xaxis.set_major_formatter(hfmt)
#ax2.errorbar(ind_dates,ind_mass_tots,yerr = ind_mass_uncer_tots,color = 'r', marker = 'o')
#
#
#
#plt.show()
|
mit
|
RayMick/scikit-learn
|
examples/gaussian_process/plot_gp_probabilistic_classification_after_regression.py
|
252
|
3490
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
==============================================================================
Gaussian Processes classification example: exploiting the probabilistic output
==============================================================================
A two-dimensional regression exercise with a post-processing allowing for
probabilistic classification thanks to the Gaussian property of the prediction.
The figure illustrates the probability that the prediction is negative with
respect to the remaining uncertainty in the prediction. The red and blue lines
corresponds to the 95% confidence interval on the prediction of the zero level
set.
"""
print(__doc__)
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Licence: BSD 3 clause
import numpy as np
from scipy import stats
from sklearn.gaussian_process import GaussianProcess
from matplotlib import pyplot as pl
from matplotlib import cm
# Standard normal distribution functions
phi = stats.distributions.norm().pdf
PHI = stats.distributions.norm().cdf
PHIinv = stats.distributions.norm().ppf
# A few constants
lim = 8
def g(x):
"""The function to predict (classification will then consist in predicting
whether g(x) <= 0 or not)"""
return 5. - x[:, 1] - .5 * x[:, 0] ** 2.
# Design of experiments
X = np.array([[-4.61611719, -6.00099547],
[4.10469096, 5.32782448],
[0.00000000, -0.50000000],
[-6.17289014, -4.6984743],
[1.3109306, -6.93271427],
[-5.03823144, 3.10584743],
[-2.87600388, 6.74310541],
[5.21301203, 4.26386883]])
# Observations
y = g(X)
# Instanciate and fit Gaussian Process Model
gp = GaussianProcess(theta0=5e-1)
# Don't perform MLE or you'll get a perfect prediction for this simple example!
gp.fit(X, y)
# Evaluate real function, the prediction and its MSE on a grid
res = 50
x1, x2 = np.meshgrid(np.linspace(- lim, lim, res),
np.linspace(- lim, lim, res))
xx = np.vstack([x1.reshape(x1.size), x2.reshape(x2.size)]).T
y_true = g(xx)
y_pred, MSE = gp.predict(xx, eval_MSE=True)
sigma = np.sqrt(MSE)
y_true = y_true.reshape((res, res))
y_pred = y_pred.reshape((res, res))
sigma = sigma.reshape((res, res))
k = PHIinv(.975)
# Plot the probabilistic classification iso-values using the Gaussian property
# of the prediction
fig = pl.figure(1)
ax = fig.add_subplot(111)
ax.axes.set_aspect('equal')
pl.xticks([])
pl.yticks([])
ax.set_xticklabels([])
ax.set_yticklabels([])
pl.xlabel('$x_1$')
pl.ylabel('$x_2$')
cax = pl.imshow(np.flipud(PHI(- y_pred / sigma)), cmap=cm.gray_r, alpha=0.8,
extent=(- lim, lim, - lim, lim))
norm = pl.matplotlib.colors.Normalize(vmin=0., vmax=0.9)
cb = pl.colorbar(cax, ticks=[0., 0.2, 0.4, 0.6, 0.8, 1.], norm=norm)
cb.set_label('${\\rm \mathbb{P}}\left[\widehat{G}(\mathbf{x}) \leq 0\\right]$')
pl.plot(X[y <= 0, 0], X[y <= 0, 1], 'r.', markersize=12)
pl.plot(X[y > 0, 0], X[y > 0, 1], 'b.', markersize=12)
cs = pl.contour(x1, x2, y_true, [0.], colors='k', linestyles='dashdot')
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.025], colors='b',
linestyles='solid')
pl.clabel(cs, fontsize=11)
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.5], colors='k',
linestyles='dashed')
pl.clabel(cs, fontsize=11)
cs = pl.contour(x1, x2, PHI(- y_pred / sigma), [0.975], colors='r',
linestyles='solid')
pl.clabel(cs, fontsize=11)
pl.show()
|
bsd-3-clause
|
ambikeshwar1991/gnuradio
|
gr-utils/src/python/plot_psd_base.py
|
75
|
12725
|
#!/usr/bin/env python
#
# Copyright 2007,2008,2010,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
try:
import scipy
from scipy import fftpack
except ImportError:
print "Please install SciPy to run this script (http://www.scipy.org/)"
raise SystemExit, 1
try:
from pylab import *
except ImportError:
print "Please install Matplotlib to run this script (http://matplotlib.sourceforge.net/)"
raise SystemExit, 1
from optparse import OptionParser
from scipy import log10
from gnuradio.eng_option import eng_option
class plot_psd_base:
def __init__(self, datatype, filename, options):
self.hfile = open(filename, "r")
self.block_length = options.block
self.start = options.start
self.sample_rate = options.sample_rate
self.psdfftsize = options.psd_size
self.specfftsize = options.spec_size
self.dospec = options.enable_spec # if we want to plot the spectrogram
self.datatype = getattr(scipy, datatype) #scipy.complex64
self.sizeof_data = self.datatype().nbytes # number of bytes per sample in file
self.axis_font_size = 16
self.label_font_size = 18
self.title_font_size = 20
self.text_size = 22
# Setup PLOT
self.fig = figure(1, figsize=(16, 12), facecolor='w')
rcParams['xtick.labelsize'] = self.axis_font_size
rcParams['ytick.labelsize'] = self.axis_font_size
self.text_file = figtext(0.10, 0.95, ("File: %s" % filename),
weight="heavy", size=self.text_size)
self.text_file_pos = figtext(0.10, 0.92, "File Position: ",
weight="heavy", size=self.text_size)
self.text_block = figtext(0.35, 0.92, ("Block Size: %d" % self.block_length),
weight="heavy", size=self.text_size)
self.text_sr = figtext(0.60, 0.915, ("Sample Rate: %.2f" % self.sample_rate),
weight="heavy", size=self.text_size)
self.make_plots()
self.button_left_axes = self.fig.add_axes([0.45, 0.01, 0.05, 0.05], frameon=True)
self.button_left = Button(self.button_left_axes, "<")
self.button_left_callback = self.button_left.on_clicked(self.button_left_click)
self.button_right_axes = self.fig.add_axes([0.50, 0.01, 0.05, 0.05], frameon=True)
self.button_right = Button(self.button_right_axes, ">")
self.button_right_callback = self.button_right.on_clicked(self.button_right_click)
self.xlim = scipy.array(self.sp_iq.get_xlim())
self.manager = get_current_fig_manager()
connect('draw_event', self.zoom)
connect('key_press_event', self.click)
show()
def get_data(self):
self.position = self.hfile.tell()/self.sizeof_data
self.text_file_pos.set_text("File Position: %d" % self.position)
try:
self.iq = scipy.fromfile(self.hfile, dtype=self.datatype, count=self.block_length)
except MemoryError:
print "End of File"
return False
else:
# retesting length here as newer version of scipy does not throw a MemoryError, just
# returns a zero-length array
if(len(self.iq) > 0):
tstep = 1.0 / self.sample_rate
#self.time = scipy.array([tstep*(self.position + i) for i in xrange(len(self.iq))])
self.time = scipy.array([tstep*(i) for i in xrange(len(self.iq))])
self.iq_psd, self.freq = self.dopsd(self.iq)
return True
else:
print "End of File"
return False
def dopsd(self, iq):
''' Need to do this here and plot later so we can do the fftshift '''
overlap = self.psdfftsize/4
winfunc = scipy.blackman
psd,freq = mlab.psd(iq, self.psdfftsize, self.sample_rate,
window = lambda d: d*winfunc(self.psdfftsize),
noverlap = overlap)
psd = 10.0*log10(abs(psd))
return (psd, freq)
def make_plots(self):
# if specified on the command-line, set file pointer
self.hfile.seek(self.sizeof_data*self.start, 1)
iqdims = [[0.075, 0.2, 0.4, 0.6], [0.075, 0.55, 0.4, 0.3]]
psddims = [[0.575, 0.2, 0.4, 0.6], [0.575, 0.55, 0.4, 0.3]]
specdims = [0.2, 0.125, 0.6, 0.3]
# Subplot for real and imaginary parts of signal
self.sp_iq = self.fig.add_subplot(2,2,1, position=iqdims[self.dospec])
self.sp_iq.set_title(("I&Q"), fontsize=self.title_font_size, fontweight="bold")
self.sp_iq.set_xlabel("Time (s)", fontsize=self.label_font_size, fontweight="bold")
self.sp_iq.set_ylabel("Amplitude (V)", fontsize=self.label_font_size, fontweight="bold")
# Subplot for PSD plot
self.sp_psd = self.fig.add_subplot(2,2,2, position=psddims[self.dospec])
self.sp_psd.set_title(("PSD"), fontsize=self.title_font_size, fontweight="bold")
self.sp_psd.set_xlabel("Frequency (Hz)", fontsize=self.label_font_size, fontweight="bold")
self.sp_psd.set_ylabel("Power Spectrum (dBm)", fontsize=self.label_font_size, fontweight="bold")
r = self.get_data()
self.plot_iq = self.sp_iq.plot([], 'bo-') # make plot for reals
self.plot_iq += self.sp_iq.plot([], 'ro-') # make plot for imags
self.draw_time(self.time, self.iq) # draw the plot
self.plot_psd = self.sp_psd.plot([], 'b') # make plot for PSD
self.draw_psd(self.freq, self.iq_psd) # draw the plot
if self.dospec:
# Subplot for spectrogram plot
self.sp_spec = self.fig.add_subplot(2,2,3, position=specdims)
self.sp_spec.set_title(("Spectrogram"), fontsize=self.title_font_size, fontweight="bold")
self.sp_spec.set_xlabel("Time (s)", fontsize=self.label_font_size, fontweight="bold")
self.sp_spec.set_ylabel("Frequency (Hz)", fontsize=self.label_font_size, fontweight="bold")
self.draw_spec(self.time, self.iq)
draw()
def draw_time(self, t, iq):
reals = iq.real
imags = iq.imag
self.plot_iq[0].set_data([t, reals])
self.plot_iq[1].set_data([t, imags])
self.sp_iq.set_xlim(t.min(), t.max())
self.sp_iq.set_ylim([1.5*min([reals.min(), imags.min()]),
1.5*max([reals.max(), imags.max()])])
def draw_psd(self, f, p):
self.plot_psd[0].set_data([f, p])
self.sp_psd.set_ylim([p.min()-10, p.max()+10])
self.sp_psd.set_xlim([f.min(), f.max()])
def draw_spec(self, t, s):
overlap = self.specfftsize/4
winfunc = scipy.blackman
self.sp_spec.clear()
self.sp_spec.specgram(s, self.specfftsize, self.sample_rate,
window = lambda d: d*winfunc(self.specfftsize),
noverlap = overlap, xextent=[t.min(), t.max()])
def update_plots(self):
self.draw_time(self.time, self.iq)
self.draw_psd(self.freq, self.iq_psd)
if self.dospec:
self.draw_spec(self.time, self.iq)
self.xlim = scipy.array(self.sp_iq.get_xlim()) # so zoom doesn't get called
draw()
def zoom(self, event):
newxlim = scipy.array(self.sp_iq.get_xlim())
curxlim = scipy.array(self.xlim)
if(newxlim[0] != curxlim[0] or newxlim[1] != curxlim[1]):
#xmin = max(0, int(ceil(self.sample_rate*(newxlim[0] - self.position))))
#xmax = min(int(ceil(self.sample_rate*(newxlim[1] - self.position))), len(self.iq))
xmin = max(0, int(ceil(self.sample_rate*(newxlim[0]))))
xmax = min(int(ceil(self.sample_rate*(newxlim[1]))), len(self.iq))
iq = scipy.array(self.iq[xmin : xmax])
time = scipy.array(self.time[xmin : xmax])
iq_psd, freq = self.dopsd(iq)
self.draw_psd(freq, iq_psd)
self.xlim = scipy.array(self.sp_iq.get_xlim())
draw()
def click(self, event):
forward_valid_keys = [" ", "down", "right"]
backward_valid_keys = ["up", "left"]
if(find(event.key, forward_valid_keys)):
self.step_forward()
elif(find(event.key, backward_valid_keys)):
self.step_backward()
def button_left_click(self, event):
self.step_backward()
def button_right_click(self, event):
self.step_forward()
def step_forward(self):
r = self.get_data()
if(r):
self.update_plots()
def step_backward(self):
# Step back in file position
if(self.hfile.tell() >= 2*self.sizeof_data*self.block_length ):
self.hfile.seek(-2*self.sizeof_data*self.block_length, 1)
else:
self.hfile.seek(-self.hfile.tell(),1)
r = self.get_data()
if(r):
self.update_plots()
@staticmethod
def setup_options():
usage="%prog: [options] input_filename"
description = "Takes a GNU Radio binary file (with specified data type using --data-type) and displays the I&Q data versus time as well as the power spectral density (PSD) plot. The y-axis values are plotted assuming volts as the amplitude of the I&Q streams and converted into dBm in the frequency domain (the 1/N power adjustment out of the FFT is performed internally). The script plots a certain block of data at a time, specified on the command line as -B or --block. The start position in the file can be set by specifying -s or --start and defaults to 0 (the start of the file). By default, the system assumes a sample rate of 1, so in time, each sample is plotted versus the sample number. To set a true time and frequency axis, set the sample rate (-R or --sample-rate) to the sample rate used when capturing the samples. Finally, the size of the FFT to use for the PSD and spectrogram plots can be set independently with --psd-size and --spec-size, respectively. The spectrogram plot does not display by default and is turned on with -S or --enable-spec."
parser = OptionParser(option_class=eng_option, conflict_handler="resolve",
usage=usage, description=description)
parser.add_option("-d", "--data-type", type="string", default="complex64",
help="Specify the data type (complex64, float32, (u)int32, (u)int16, (u)int8) [default=%default]")
parser.add_option("-B", "--block", type="int", default=8192,
help="Specify the block size [default=%default]")
parser.add_option("-s", "--start", type="int", default=0,
help="Specify where to start in the file [default=%default]")
parser.add_option("-R", "--sample-rate", type="eng_float", default=1.0,
help="Set the sampler rate of the data [default=%default]")
parser.add_option("", "--psd-size", type="int", default=1024,
help="Set the size of the PSD FFT [default=%default]")
parser.add_option("", "--spec-size", type="int", default=256,
help="Set the size of the spectrogram FFT [default=%default]")
parser.add_option("-S", "--enable-spec", action="store_true", default=False,
help="Turn on plotting the spectrogram [default=%default]")
return parser
def find(item_in, list_search):
try:
return list_search.index(item_in) != None
except ValueError:
return False
def main():
parser = plot_psd_base.setup_options()
(options, args) = parser.parse_args ()
if len(args) != 1:
parser.print_help()
raise SystemExit, 1
filename = args[0]
dc = plot_psd_base(options.data_type, filename, options)
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
|
gpl-3.0
|
opikalo/pyfire
|
demo/level1.py
|
1
|
6926
|
#see http://stackoverflow.com/questions/10391123/how-to-run-two-python-blocking-functions-matplotlib-show-and-twisted-reactor-r
if __name__ == '__main__':
from level1 import main
raise SystemExit(main())
from matplotlib import use
use('GTK')
from matplotlib import pyplot
from matplotlib.backends import backend_gtk
from twisted.internet import gtk2reactor
gtk2reactor.install()
#OK, we are done with wierd stuff here, the rest is vanilla
from twisted.internet import reactor, task
from steering.twisted_steering import press
import os
import numpy as np
import time
from math import pi
from pylab import get_current_fig_manager
import networkx as nx
from planning.astar.global_map import (plot_map, GlobalMap,
MIN_UNCONTRAINED_PENALTY)
from screen_capture.localize_map import LocalizeMap
from screen_capture.capture import Capture, find_best_image
from planning.astar.local_graph import plan_path
from smoothing.gd import smooth_graph, graph_to_path
from control.robot_control import particles, robot
from utils import root
class LocalizationDisplay(object):
def __init__(self):
self.fig, self.ax = plot_map()
#position window properly
thismanager = get_current_fig_manager()
try:
thismanager.window.wm_geometry("+700+0")
except AttributeError:
self.fig.canvas.manager.window.move(700,0)
self.ax.set_aspect('equal')
self.ax.set_xlim(0,700)
self.ax.set_ylim(0,500)
self.ax.hold(True)
self.fig.canvas.draw()
def update(self, map_box):
(x0, y0, x1, y1) = map_box
self.ax.set_xlim([x0, x1])
self.ax.set_ylim([y1, y0])
#new_position = (max_loc[0] + w/2, max_loc[1] + h/2)
pyplot.scatter( [(x0 + x1)/2],
[(y0 + y1)/2])
self.fig.canvas.draw()
class LocalizationMapper(object):
def __init__(self):
map_filename = os.path.join(root, 'flash', 'fft2', 'processed', 'aligned_localization_data_map.png')
self.mapper = LocalizeMap(map_filename)
filename = os.path.join(root, 'flash', 'fft2', 'processed', 'level1_start.png')
self.c = Capture(filename)
#default starting value
self.start_pos = [2650, 2650]
self.goal_pos = [1900, 400]
#from twiddle
weight_data = 1.1
weight_smooth = 0.2
self.p_gain = 2.0
self.d_gain = 6.0
self.steering_noise = 0.01
self.distance_noise = 0.05
self.measurement_noise = 0.05
self.speed = 2
#planning
print "planning..."
graph_path = plan_path(self.start_pos, self.goal_pos)
#extract points from graph
path_pos = nx.get_node_attributes(graph_path, 'pos')
#smooth
print "smoothing..."
sg = smooth_graph(graph_path, self.start_pos, self.goal_pos, True,
weight_data, weight_smooth)
#extract points from ad smoothed graph
sg_pos = nx.get_node_attributes(sg, 'pos')
#convert graph to spath
self.spath = graph_to_path(sg)
#plot smoothed path on a graph
nx.draw(sg, sg_pos, node_size=5, edge_color='r')
def run(self):
prev_map_box = None
mg = nx.DiGraph()
myrobot = robot()
template = self.c.snap_gray()
map_box = self.mapper.localize(template, None)
(x0, y0, x1, y1) = map_box
#this is approximate sensor measurement
ax = (x0 + x1)/2
ay = (y0 + y1)/2
self.start_pos = (ax, ay)
return (None, None, None)
myrobot.set(self.start_pos[0], self.start_pos[1], -pi/2)
mg.add_node(0, pos=(myrobot.x, myrobot.y))
myrobot.set_noise(self.steering_noise,
self.distance_noise,
self.measurement_noise)
pfilter = particles(myrobot.x, myrobot.y, myrobot.orientation,
self.steering_noise,
self.distance_noise,
self.measurement_noise)
cte = 0.0
err = 0.0
N = 0
index = 0 # index into the path
if not myrobot.check_goal(self.goal_pos):
start_time = time.time()
diff_cte = -cte
# ----------------------------------------
# compute the CTE
estimate = pfilter.get_position()
### ENTER CODE HERE
x, y, theta = estimate
#find the rigt spath
while True:
x1, y1 = self.spath[index]
Rx = x - x1
Ry = y - y1
x2, y2 = self.spath[index + 1]
dx = x2 - x1
dy = y2 - y1
u = abs(Rx*dx + Ry*dy)/(dx*dx + dy*dy)
if u > 1 and index < (len(self.spath) - 2):
index +=1
else:
break
cte = (Ry * dx - Rx * dy) / (dx * dx + dy * dy)
diff_cte += cte
steer = - self.p_gain * cte - self.d_gain * diff_cte
myrobot = myrobot.move(steer, self.speed, real=True)
pfilter.move(steer, self.speed)
#sense
template = self.c.snap_gray()
map_box = self.mapper.localize(template, prev_map_box)
prev_map_box = map_box
(x0, y0, x1, y1) = map_box
#this is approximate sensor measurement
ax = (x0 + x1)/2
ay = (y0 + y1)/2
Z = (ax, ay)
pfilter.sense(Z)
err += (cte ** 2)
N += 1
robot_pos = (myrobot.x, myrobot.y)
#mg.add_node(N, pos=(myrobot.x, myrobot.y))
#mg.add_edge(N-1, N)
#send update to matplotlib
time_pos = (time.time(), map_box, robot_pos)
#self.connec.send(time_pos)
end_time = time.time()
#fps
fps = 1/(end_time-start_time)
print "%2d frames per sec\r" % fps,
return time_pos
display = LocalizationDisplay()
mapper = LocalizationMapper()
#replaced the call to pyplot.show() with a call to my own Show subclass with a mainloop
class TwistedShow(backend_gtk.Show):
running = False
def mainloop(self):
if not self.running:
self.running = True
reactor.run()
def main():
def proof():
global display, mapper
start_time = time.time()
(t, map_box, robot) = mapper.run()
#display.update(map_box)
end_time = time.time()
fps = 1/(end_time-start_time)
print "%2d frames per sec\r" % fps,
task.LoopingCall(proof).start(0)
TwistedShow()()
|
mit
|
scikit-learn-contrib/project-template
|
skltemplate/_template.py
|
1
|
6474
|
"""
This is a module to be used as a reference for building other modules
"""
import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin, TransformerMixin
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted
from sklearn.utils.multiclass import unique_labels
from sklearn.metrics import euclidean_distances
class TemplateEstimator(BaseEstimator):
""" A template estimator to be used as a reference implementation.
For more information regarding how to build your own estimator, read more
in the :ref:`User Guide <user_guide>`.
Parameters
----------
demo_param : str, default='demo_param'
A parameter used for demonstation of how to pass and store paramters.
Examples
--------
>>> from skltemplate import TemplateEstimator
>>> import numpy as np
>>> X = np.arange(100).reshape(100, 1)
>>> y = np.zeros((100, ))
>>> estimator = TemplateEstimator()
>>> estimator.fit(X, y)
TemplateEstimator()
"""
def __init__(self, demo_param='demo_param'):
self.demo_param = demo_param
def fit(self, X, y):
"""A reference implementation of a fitting function.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,) or (n_samples, n_outputs)
The target values (class labels in classification, real numbers in
regression).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, accept_sparse=True)
self.is_fitted_ = True
# `fit` should always return `self`
return self
def predict(self, X):
""" A reference implementation of a predicting function.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The training input samples.
Returns
-------
y : ndarray, shape (n_samples,)
Returns an array of ones.
"""
X = check_array(X, accept_sparse=True)
check_is_fitted(self, 'is_fitted_')
return np.ones(X.shape[0], dtype=np.int64)
class TemplateClassifier(ClassifierMixin, BaseEstimator):
""" An example classifier which implements a 1-NN algorithm.
For more information regarding how to build your own classifier, read more
in the :ref:`User Guide <user_guide>`.
Parameters
----------
demo_param : str, default='demo'
A parameter used for demonstation of how to pass and store paramters.
Attributes
----------
X_ : ndarray, shape (n_samples, n_features)
The input passed during :meth:`fit`.
y_ : ndarray, shape (n_samples,)
The labels passed during :meth:`fit`.
classes_ : ndarray, shape (n_classes,)
The classes seen at :meth:`fit`.
"""
def __init__(self, demo_param='demo'):
self.demo_param = demo_param
def fit(self, X, y):
"""A reference implementation of a fitting function for a classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values. An array of int.
Returns
-------
self : object
Returns self.
"""
# Check that X and y have correct shape
X, y = check_X_y(X, y)
# Store the classes seen during fit
self.classes_ = unique_labels(y)
self.X_ = X
self.y_ = y
# Return the classifier
return self
def predict(self, X):
""" A reference implementation of a prediction for a classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The input samples.
Returns
-------
y : ndarray, shape (n_samples,)
The label for each sample is the label of the closest sample
seen during fit.
"""
# Check is fit had been called
check_is_fitted(self, ['X_', 'y_'])
# Input validation
X = check_array(X)
closest = np.argmin(euclidean_distances(X, self.X_), axis=1)
return self.y_[closest]
class TemplateTransformer(TransformerMixin, BaseEstimator):
""" An example transformer that returns the element-wise square root.
For more information regarding how to build your own transformer, read more
in the :ref:`User Guide <user_guide>`.
Parameters
----------
demo_param : str, default='demo'
A parameter used for demonstation of how to pass and store paramters.
Attributes
----------
n_features_ : int
The number of features of the data passed to :meth:`fit`.
"""
def __init__(self, demo_param='demo'):
self.demo_param = demo_param
def fit(self, X, y=None):
"""A reference implementation of a fitting function for a transformer.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The training input samples.
y : None
There is no need of a target in a transformer, yet the pipeline API
requires this parameter.
Returns
-------
self : object
Returns self.
"""
X = check_array(X, accept_sparse=True)
self.n_features_ = X.shape[1]
# Return the transformer
return self
def transform(self, X):
""" A reference implementation of a transform function.
Parameters
----------
X : {array-like, sparse-matrix}, shape (n_samples, n_features)
The input samples.
Returns
-------
X_transformed : array, shape (n_samples, n_features)
The array containing the element-wise square roots of the values
in ``X``.
"""
# Check is fit had been called
check_is_fitted(self, 'n_features_')
# Input validation
X = check_array(X, accept_sparse=True)
# Check that the input is of the same shape as the one passed
# during fit.
if X.shape[1] != self.n_features_:
raise ValueError('Shape of input is different from what was seen'
'in `fit`')
return np.sqrt(X)
|
bsd-3-clause
|
mjgrav2001/scikit-learn
|
examples/linear_model/plot_ols.py
|
220
|
1940
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean square error
print("Residual sum of squares: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
|
bsd-3-clause
|
stkubr/zipline
|
zipline/finance/risk/cumulative.py
|
3
|
18852
|
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logbook
import math
import numpy as np
from zipline.finance import trading
import zipline.utils.math_utils as zp_math
import pandas as pd
from pandas.tseries.tools import normalize_date
from six import iteritems
from . risk import (
alpha,
check_entry,
choose_treasury,
downside_risk,
sharpe_ratio,
sortino_ratio,
)
from zipline.utils.serialization_utils import (
VERSION_LABEL
)
log = logbook.Logger('Risk Cumulative')
choose_treasury = functools.partial(choose_treasury, lambda *args: '10year',
compound=False)
def information_ratio(algo_volatility, algorithm_return, benchmark_return):
"""
http://en.wikipedia.org/wiki/Information_ratio
Args:
algorithm_returns (np.array-like):
All returns during algorithm lifetime.
benchmark_returns (np.array-like):
All benchmark returns during algo lifetime.
Returns:
float. Information ratio.
"""
if zp_math.tolerant_equals(algo_volatility, 0):
return np.nan
# The square of the annualization factor is in the volatility,
# because the volatility is also annualized,
# i.e. the sqrt(annual factor) is in the volatility's numerator.
# So to have the the correct annualization factor for the
# Sharpe value's numerator, which should be the sqrt(annual factor).
# The square of the sqrt of the annual factor, i.e. the annual factor
# itself, is needed in the numerator to factor out the division by
# its square root.
return (algorithm_return - benchmark_return) / algo_volatility
class RiskMetricsCumulative(object):
"""
:Usage:
Instantiate RiskMetricsCumulative once.
Call update() method on each dt to update the metrics.
"""
METRIC_NAMES = (
'alpha',
'beta',
'sharpe',
'algorithm_volatility',
'benchmark_volatility',
'downside_risk',
'sortino',
'information',
)
def __init__(self, sim_params,
returns_frequency=None,
create_first_day_stats=False,
account=None):
"""
- @returns_frequency allows for configuration of the whether
the benchmark and algorithm returns are in units of minutes or days,
if `None` defaults to the `emission_rate` in `sim_params`.
"""
self.treasury_curves = trading.environment.treasury_curves
self.start_date = sim_params.period_start.replace(
hour=0, minute=0, second=0, microsecond=0
)
self.end_date = sim_params.period_end.replace(
hour=0, minute=0, second=0, microsecond=0
)
self.trading_days = trading.environment.days_in_range(
self.start_date,
self.end_date)
# Hold on to the trading day before the start,
# used for index of the zero return value when forcing returns
# on the first day.
self.day_before_start = self.start_date - \
trading.environment.trading_days.freq
last_day = normalize_date(sim_params.period_end)
if last_day not in self.trading_days:
last_day = pd.tseries.index.DatetimeIndex(
[last_day]
)
self.trading_days = self.trading_days.append(last_day)
self.sim_params = sim_params
self.create_first_day_stats = create_first_day_stats
if returns_frequency is None:
returns_frequency = self.sim_params.emission_rate
self.returns_frequency = returns_frequency
if returns_frequency == 'daily':
cont_index = self.get_daily_index()
elif returns_frequency == 'minute':
cont_index = self.get_minute_index(sim_params)
self.cont_index = cont_index
self.algorithm_returns_cont = pd.Series(index=cont_index)
self.benchmark_returns_cont = pd.Series(index=cont_index)
self.algorithm_cumulative_leverages_cont = pd.Series(index=cont_index)
self.mean_returns_cont = pd.Series(index=cont_index)
self.annualized_mean_returns_cont = pd.Series(index=cont_index)
self.mean_benchmark_returns_cont = pd.Series(index=cont_index)
self.annualized_mean_benchmark_returns_cont = pd.Series(
index=cont_index)
# The returns at a given time are read and reset from the respective
# returns container.
self.algorithm_returns = None
self.benchmark_returns = None
self.mean_returns = None
self.annualized_mean_returns = None
self.mean_benchmark_returns = None
self.annualized_mean_benchmark_returns = None
self.algorithm_cumulative_returns = pd.Series(index=cont_index)
self.benchmark_cumulative_returns = pd.Series(index=cont_index)
self.algorithm_cumulative_leverages = pd.Series(index=cont_index)
self.excess_returns = pd.Series(index=cont_index)
self.latest_dt = cont_index[0]
self.metrics = pd.DataFrame(index=cont_index,
columns=self.METRIC_NAMES,
dtype=float)
self.drawdowns = pd.Series(index=cont_index)
self.max_drawdowns = pd.Series(index=cont_index)
self.max_drawdown = 0
self.max_leverages = pd.Series(index=cont_index)
self.max_leverage = 0
self.current_max = -np.inf
self.daily_treasury = pd.Series(index=self.trading_days)
self.treasury_period_return = np.nan
self.num_trading_days = 0
def get_minute_index(self, sim_params):
"""
Stitches together multiple days worth of business minutes into
one continous index.
"""
trading_minutes = None
for day in self.trading_days:
minutes_for_day = trading.environment.market_minutes_for_day(day)
if trading_minutes is None:
# Create container for all minutes on first iteration
trading_minutes = minutes_for_day
else:
trading_minutes = trading_minutes.union(minutes_for_day)
return trading_minutes
def get_daily_index(self):
return self.trading_days
def update(self, dt, algorithm_returns, benchmark_returns, account):
# Keep track of latest dt for use in to_dict and other methods
# that report current state.
self.latest_dt = dt
self.algorithm_returns_cont[dt] = algorithm_returns
self.algorithm_returns = self.algorithm_returns_cont[:dt]
self.num_trading_days = len(self.algorithm_returns)
if self.create_first_day_stats:
if len(self.algorithm_returns) == 1:
self.algorithm_returns = pd.Series(
{self.day_before_start: 0.0}).append(
self.algorithm_returns)
self.algorithm_cumulative_returns[dt] = \
self.calculate_cumulative_returns(self.algorithm_returns)
algo_cumulative_returns_to_date = \
self.algorithm_cumulative_returns[:dt]
self.mean_returns_cont[dt] = \
algo_cumulative_returns_to_date[dt] / self.num_trading_days
self.mean_returns = self.mean_returns_cont[:dt]
self.annualized_mean_returns_cont[dt] = \
self.mean_returns_cont[dt] * 252
self.annualized_mean_returns = self.annualized_mean_returns_cont[:dt]
if self.create_first_day_stats:
if len(self.mean_returns) == 1:
self.mean_returns = pd.Series(
{self.day_before_start: 0.0}).append(self.mean_returns)
self.annualized_mean_returns = pd.Series(
{self.day_before_start: 0.0}).append(
self.annualized_mean_returns)
self.benchmark_returns_cont[dt] = benchmark_returns
self.benchmark_returns = self.benchmark_returns_cont[:dt]
if self.create_first_day_stats:
if len(self.benchmark_returns) == 1:
self.benchmark_returns = pd.Series(
{self.day_before_start: 0.0}).append(
self.benchmark_returns)
self.benchmark_cumulative_returns[dt] = \
self.calculate_cumulative_returns(self.benchmark_returns)
benchmark_cumulative_returns_to_date = \
self.benchmark_cumulative_returns[:dt]
self.mean_benchmark_returns_cont[dt] = \
benchmark_cumulative_returns_to_date[dt] / self.num_trading_days
self.mean_benchmark_returns = self.mean_benchmark_returns_cont[:dt]
self.annualized_mean_benchmark_returns_cont[dt] = \
self.mean_benchmark_returns_cont[dt] * 252
self.annualized_mean_benchmark_returns = \
self.annualized_mean_benchmark_returns_cont[:dt]
self.algorithm_cumulative_leverages_cont[dt] = account['leverage']
self.algorithm_cumulative_leverages = \
self.algorithm_cumulative_leverages_cont[:dt]
if self.create_first_day_stats:
if len(self.algorithm_cumulative_leverages) == 1:
self.algorithm_cumulative_leverages = pd.Series(
{self.day_before_start: 0.0}).append(
self.algorithm_cumulative_leverages)
if not self.algorithm_returns.index.equals(
self.benchmark_returns.index
):
message = "Mismatch between benchmark_returns ({bm_count}) and \
algorithm_returns ({algo_count}) in range {start} : {end} on {dt}"
message = message.format(
bm_count=len(self.benchmark_returns),
algo_count=len(self.algorithm_returns),
start=self.start_date,
end=self.end_date,
dt=dt
)
raise Exception(message)
self.update_current_max()
self.metrics.benchmark_volatility[dt] = \
self.calculate_volatility(self.benchmark_returns)
self.metrics.algorithm_volatility[dt] = \
self.calculate_volatility(self.algorithm_returns)
# caching the treasury rates for the minutely case is a
# big speedup, because it avoids searching the treasury
# curves on every minute.
# In both minutely and daily, the daily curve is always used.
treasury_end = dt.replace(hour=0, minute=0)
if np.isnan(self.daily_treasury[treasury_end]):
treasury_period_return = choose_treasury(
self.treasury_curves,
self.start_date,
treasury_end
)
self.daily_treasury[treasury_end] = treasury_period_return
self.treasury_period_return = self.daily_treasury[treasury_end]
self.excess_returns[self.latest_dt] = (
self.algorithm_cumulative_returns[self.latest_dt] -
self.treasury_period_return)
self.metrics.beta[dt] = self.calculate_beta()
self.metrics.alpha[dt] = self.calculate_alpha()
self.metrics.sharpe[dt] = self.calculate_sharpe()
self.metrics.downside_risk[dt] = self.calculate_downside_risk()
self.metrics.sortino[dt] = self.calculate_sortino()
self.metrics.information[dt] = self.calculate_information()
self.max_drawdown = self.calculate_max_drawdown()
self.max_drawdowns[dt] = self.max_drawdown
self.max_leverage = self.calculate_max_leverage()
self.max_leverages[dt] = self.max_leverage
def to_dict(self):
"""
Creates a dictionary representing the state of the risk report.
Returns a dict object of the form:
"""
dt = self.latest_dt
period_label = dt.strftime("%Y-%m")
rval = {
'trading_days': self.num_trading_days,
'benchmark_volatility': self.metrics.benchmark_volatility[dt],
'algo_volatility': self.metrics.algorithm_volatility[dt],
'treasury_period_return': self.treasury_period_return,
# Though the two following keys say period return,
# they would be more accurately called the cumulative return.
# However, the keys need to stay the same, for now, for backwards
# compatibility with existing consumers.
'algorithm_period_return': self.algorithm_cumulative_returns[dt],
'benchmark_period_return': self.benchmark_cumulative_returns[dt],
'beta': self.metrics.beta[dt],
'alpha': self.metrics.alpha[dt],
'sharpe': self.metrics.sharpe[dt],
'sortino': self.metrics.sortino[dt],
'information': self.metrics.information[dt],
'excess_return': self.excess_returns[dt],
'max_drawdown': self.max_drawdown,
'max_leverage': self.max_leverage,
'period_label': period_label
}
return {k: (None if check_entry(k, v) else v)
for k, v in iteritems(rval)}
def __repr__(self):
statements = []
for metric in self.METRIC_NAMES:
value = getattr(self.metrics, metric)[-1]
if isinstance(value, list):
if len(value) == 0:
value = np.nan
else:
value = value[-1]
statements.append("{m}:{v}".format(m=metric, v=value))
return '\n'.join(statements)
def calculate_cumulative_returns(self, returns):
return (1. + returns).prod() - 1
def update_current_max(self):
if len(self.algorithm_cumulative_returns) == 0:
return
current_cumulative_return = \
self.algorithm_cumulative_returns[self.latest_dt]
if self.current_max < current_cumulative_return:
self.current_max = current_cumulative_return
def calculate_max_drawdown(self):
if len(self.algorithm_cumulative_returns) == 0:
return self.max_drawdown
# The drawdown is defined as: (high - low) / high
# The above factors out to: 1.0 - (low / high)
#
# Instead of explicitly always using the low, use the current total
# return value, and test that against the max drawdown, which will
# exceed the previous max_drawdown iff the current return is lower than
# the previous low in the current drawdown window.
cur_drawdown = 1.0 - (
(1.0 + self.algorithm_cumulative_returns[self.latest_dt]) /
(1.0 + self.current_max))
self.drawdowns[self.latest_dt] = cur_drawdown
if self.max_drawdown < cur_drawdown:
return cur_drawdown
else:
return self.max_drawdown
def calculate_max_leverage(self):
# The leverage is defined as: the gross_exposure/net_liquidation
# gross_exposure = long_exposure + abs(short_exposure)
# net_liquidation = ending_cash + long_exposure + short_exposure
cur_leverage = self.algorithm_cumulative_leverages[self.latest_dt]
return max(cur_leverage, self.max_leverage)
def calculate_sharpe(self):
"""
http://en.wikipedia.org/wiki/Sharpe_ratio
"""
return sharpe_ratio(self.metrics.algorithm_volatility[self.latest_dt],
self.annualized_mean_returns[self.latest_dt],
self.daily_treasury[self.latest_dt.date()])
def calculate_sortino(self):
"""
http://en.wikipedia.org/wiki/Sortino_ratio
"""
return sortino_ratio(self.annualized_mean_returns[self.latest_dt],
self.daily_treasury[self.latest_dt.date()],
self.metrics.downside_risk[self.latest_dt])
def calculate_information(self):
"""
http://en.wikipedia.org/wiki/Information_ratio
"""
return information_ratio(
self.metrics.algorithm_volatility[self.latest_dt],
self.annualized_mean_returns[self.latest_dt],
self.annualized_mean_benchmark_returns[self.latest_dt])
def calculate_alpha(self):
"""
http://en.wikipedia.org/wiki/Alpha_(investment)
"""
return alpha(self.annualized_mean_returns[self.latest_dt],
self.treasury_period_return,
self.annualized_mean_benchmark_returns[self.latest_dt],
self.metrics.beta[self.latest_dt])
def calculate_volatility(self, daily_returns):
if len(daily_returns) <= 1:
return 0.0
return np.std(daily_returns, ddof=1) * math.sqrt(252)
def calculate_downside_risk(self):
return downside_risk(self.algorithm_returns.values,
self.mean_returns.values,
252)
def calculate_beta(self):
"""
.. math::
\\beta_a = \\frac{\mathrm{Cov}(r_a,r_p)}{\mathrm{Var}(r_p)}
http://en.wikipedia.org/wiki/Beta_(finance)
"""
# it doesn't make much sense to calculate beta for less than two
# values, so return none.
if len(self.algorithm_returns) < 2:
return 0.0
returns_matrix = np.vstack([self.algorithm_returns,
self.benchmark_returns])
C = np.cov(returns_matrix, ddof=1)
algorithm_covariance = C[0][1]
benchmark_variance = C[1][1]
beta = algorithm_covariance / benchmark_variance
return beta
def __getstate__(self):
state_dict = \
{k: v for k, v in iteritems(self.__dict__) if
(not k.startswith('_') and not k == 'treasury_curves')}
STATE_VERSION = 2
state_dict[VERSION_LABEL] = STATE_VERSION
return state_dict
def __setstate__(self, state):
OLDEST_SUPPORTED_STATE = 2
version = state.pop(VERSION_LABEL)
if version < OLDEST_SUPPORTED_STATE:
raise BaseException("RiskMetricsCumulative \
saved state is too old.")
self.__dict__.update(state)
# This are big and we don't need to serialize them
# pop them back in now
self.treasury_curves = trading.environment.treasury_curves
|
apache-2.0
|
GaZ3ll3/scikit-image
|
doc/examples/plot_gabor.py
|
11
|
4450
|
"""
=============================================
Gabor filter banks for texture classification
=============================================
In this example, we will see how to classify textures based on Gabor filter
banks. Frequency and orientation representations of the Gabor filter are similar
to those of the human visual system.
The images are filtered using the real parts of various different Gabor filter
kernels. The mean and variance of the filtered images are then used as features
for classification, which is based on the least squared error for simplicity.
"""
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
from scipy import ndimage as ndi
from skimage import data
from skimage.util import img_as_float
from skimage.filters import gabor_kernel
def compute_feats(image, kernels):
feats = np.zeros((len(kernels), 2), dtype=np.double)
for k, kernel in enumerate(kernels):
filtered = ndi.convolve(image, kernel, mode='wrap')
feats[k, 0] = filtered.mean()
feats[k, 1] = filtered.var()
return feats
def match(feats, ref_feats):
min_error = np.inf
min_i = None
for i in range(ref_feats.shape[0]):
error = np.sum((feats - ref_feats[i, :])**2)
if error < min_error:
min_error = error
min_i = i
return min_i
# prepare filter bank kernels
kernels = []
for theta in range(4):
theta = theta / 4. * np.pi
for sigma in (1, 3):
for frequency in (0.05, 0.25):
kernel = np.real(gabor_kernel(frequency, theta=theta,
sigma_x=sigma, sigma_y=sigma))
kernels.append(kernel)
shrink = (slice(0, None, 3), slice(0, None, 3))
brick = img_as_float(data.load('brick.png'))[shrink]
grass = img_as_float(data.load('grass.png'))[shrink]
wall = img_as_float(data.load('rough-wall.png'))[shrink]
image_names = ('brick', 'grass', 'wall')
images = (brick, grass, wall)
# prepare reference features
ref_feats = np.zeros((3, len(kernels), 2), dtype=np.double)
ref_feats[0, :, :] = compute_feats(brick, kernels)
ref_feats[1, :, :] = compute_feats(grass, kernels)
ref_feats[2, :, :] = compute_feats(wall, kernels)
print('Rotated images matched against references using Gabor filter banks:')
print('original: brick, rotated: 30deg, match result: ', end='')
feats = compute_feats(ndi.rotate(brick, angle=190, reshape=False), kernels)
print(image_names[match(feats, ref_feats)])
print('original: brick, rotated: 70deg, match result: ', end='')
feats = compute_feats(ndi.rotate(brick, angle=70, reshape=False), kernels)
print(image_names[match(feats, ref_feats)])
print('original: grass, rotated: 145deg, match result: ', end='')
feats = compute_feats(ndi.rotate(grass, angle=145, reshape=False), kernels)
print(image_names[match(feats, ref_feats)])
def power(image, kernel):
# Normalize images for better comparison.
image = (image - image.mean()) / image.std()
return np.sqrt(ndi.convolve(image, np.real(kernel), mode='wrap')**2 +
ndi.convolve(image, np.imag(kernel), mode='wrap')**2)
# Plot a selection of the filter bank kernels and their responses.
results = []
kernel_params = []
for theta in (0, 1):
theta = theta / 4. * np.pi
for frequency in (0.1, 0.4):
kernel = gabor_kernel(frequency, theta=theta)
params = 'theta=%d,\nfrequency=%.2f' % (theta * 180 / np.pi, frequency)
kernel_params.append(params)
# Save kernel and the power image for each image
results.append((kernel, [power(img, kernel) for img in images]))
fig, axes = plt.subplots(nrows=5, ncols=4, figsize=(5, 6))
plt.gray()
fig.suptitle('Image responses for Gabor filter kernels', fontsize=12)
axes[0][0].axis('off')
# Plot original images
for label, img, ax in zip(image_names, images, axes[0][1:]):
ax.imshow(img)
ax.set_title(label, fontsize=9)
ax.axis('off')
for label, (kernel, powers), ax_row in zip(kernel_params, results, axes[1:]):
# Plot Gabor kernel
ax = ax_row[0]
ax.imshow(np.real(kernel), interpolation='nearest')
ax.set_ylabel(label, fontsize=7)
ax.set_xticks([])
ax.set_yticks([])
# Plot Gabor responses with the contrast normalized for each filter
vmin = np.min(powers)
vmax = np.max(powers)
for patch, ax in zip(powers, ax_row[1:]):
ax.imshow(patch, vmin=vmin, vmax=vmax)
ax.axis('off')
plt.show()
|
bsd-3-clause
|
krez13/scikit-learn
|
sklearn/utils/estimator_checks.py
|
17
|
56282
|
from __future__ import print_function
import types
import warnings
import sys
import traceback
import pickle
from copy import deepcopy
import numpy as np
from scipy import sparse
import struct
from sklearn.externals.six.moves import zip
from sklearn.externals.joblib import hash, Memory
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns
from sklearn.base import (clone, ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin, BaseEstimator)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.pipeline import make_pipeline
from sklearn.decomposition import NMF, ProjectedGradientNMF
from sklearn.exceptions import ConvergenceWarning
from sklearn.exceptions import DataConversionWarning
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from sklearn.utils.fixes import signature
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet',
'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcess',
'GaussianProcessRegressor',
'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso',
'LassoLars', 'LinearRegression', 'MultiTaskElasticNet',
'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV',
'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression',
'RANSACRegressor', 'RadiusNeighborsRegressor',
'RandomForestRegressor', 'Ridge', 'RidgeCV']
# Estimators with deprecated transform methods. Should be removed in 0.19 when
# _LearntSelectorMixin is removed.
DEPRECATED_TRANSFORM = [
"RandomForestClassifier", "RandomForestRegressor", "ExtraTreesClassifier",
"ExtraTreesRegressor", "DecisionTreeClassifier",
"DecisionTreeRegressor", "ExtraTreeClassifier", "ExtraTreeRegressor",
"LinearSVC", "SGDClassifier", "SGDRegressor", "Perceptron",
"LogisticRegression", "LogisticRegressionCV",
"GradientBoostingClassifier", "GradientBoostingRegressor"]
def _yield_non_meta_checks(name, Estimator):
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_dtype_object
yield check_estimators_fit_returns_self
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if name not in ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if name not in ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params
if hasattr(Estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
def _yield_classifier_checks(name, Classifier):
# test classfiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
yield check_classifiers_regression_target
if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]
# TODO some complication with -1 label
and name not in ["DecisionTreeClassifier",
"ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
yield check_supervised_y_2d
# test if NotFittedError is raised
yield check_estimators_unfitted
if 'class_weight' in Classifier().get_params().keys():
yield check_class_weight_classifiers
def check_supervised_y_no_nan(name, Estimator):
# Checks that the Estimator targets are not NaN.
rng = np.random.RandomState(888)
X = rng.randn(10, 5)
y = np.ones(10) * np.inf
y = multioutput_estimator_convert_y_2d(name, y)
errmsg = "Input contains NaN, infinity or a value too large for " \
"dtype('float64')."
try:
Estimator().fit(X, y)
except ValueError as e:
if str(e) != errmsg:
raise ValueError("Estimator {0} raised warning as expected, but "
"does not match expected error message" \
.format(name))
else:
raise ValueError("Estimator {0} should have raised error on fitting "
"array y with NaN value.".format(name))
def _yield_regressor_checks(name, Regressor):
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
yield check_supervised_y_2d
yield check_supervised_y_no_nan
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
if name != "GaussianProcessRegressor":
# Test if NotFittedError is raised
yield check_estimators_unfitted
def _yield_transformer_checks(name, Transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer',
'FunctionTransformer', 'Normalizer']:
# basic tests
yield check_transformer_general
yield check_transformers_unfitted
def _yield_clustering_checks(name, Clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield check_estimators_partial_fit_n_features
def _yield_all_checks(name, Estimator):
for check in _yield_non_meta_checks(name, Estimator):
yield check
if issubclass(Estimator, ClassifierMixin):
for check in _yield_classifier_checks(name, Estimator):
yield check
if issubclass(Estimator, RegressorMixin):
for check in _yield_regressor_checks(name, Estimator):
yield check
if issubclass(Estimator, TransformerMixin):
if name not in DEPRECATED_TRANSFORM:
for check in _yield_transformer_checks(name, Estimator):
yield check
if issubclass(Estimator, ClusterMixin):
for check in _yield_clustering_checks(name, Estimator):
yield check
yield check_fit2d_predict1d
yield check_fit2d_1sample
yield check_fit2d_1feature
yield check_fit1d_1feature
yield check_fit1d_1sample
def check_estimator(Estimator):
"""Check if estimator adheres to sklearn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
Parameters
----------
Estimator : class
Class to check. Estimator is a class object (not an instance).
"""
name = Estimator.__name__
check_parameters_default_constructible(name, Estimator)
for check in _yield_all_checks(name, Estimator):
check(name, Estimator)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_testing_parameters(estimator):
# set parameters to speed up some estimators and
# avoid deprecated behaviour
params = estimator.get_params()
if ("n_iter" in params
and estimator.__class__.__name__ != "TSNE"):
estimator.set_params(n_iter=5)
if "max_iter" in params:
warnings.simplefilter("ignore", ConvergenceWarning)
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR
if estimator.__class__.__name__ == 'LinearSVR':
estimator.set_params(max_iter=20)
# NMF
if estimator.__class__.__name__ == 'NMF':
estimator.set_params(max_iter=100)
# MLP
if estimator.__class__.__name__ in ['MLPClassifier', 'MLPRegressor']:
estimator.set_params(max_iter=100)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if "decision_function_shape" in params:
# SVC
estimator.set_params(decision_function_shape='ovo')
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
if isinstance(estimator, NMF):
if not isinstance(estimator, ProjectedGradientNMF):
estimator.set_params(solver='cd')
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X_csr = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
for sparse_format in ['csr', 'csc', 'dok', 'lil', 'coo', 'dia', 'bsr']:
X = X_csr.asformat(sparse_format)
# catch deprecation warnings
with warnings.catch_warnings():
if name in ['Scaler', 'StandardScaler']:
estimator = Estimator(with_mean=False)
else:
estimator = Estimator()
set_testing_parameters(estimator)
# fit and predict
try:
estimator.fit(X, y)
if hasattr(estimator, "predict"):
pred = estimator.predict(X)
assert_equal(pred.shape, (X.shape[0],))
if hasattr(estimator, 'predict_proba'):
probs = estimator.predict_proba(X)
assert_equal(probs.shape, (X.shape[0], 4))
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_dtype_object(name, Estimator):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = rng.rand(40, 10).astype(object)
y = (X[:, 0] * 4).astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
with warnings.catch_warnings():
estimator = Estimator()
set_testing_parameters(estimator)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if (hasattr(estimator, "transform") and
name not in DEPRECATED_TRANSFORM):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string or a number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
@ignore_warnings
def check_fit2d_predict1d(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20, 3))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
estimator.fit(X, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
try:
assert_warns(DeprecationWarning,
getattr(estimator, method), X[0])
except ValueError:
pass
@ignore_warnings
def check_fit2d_1sample(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(1, 10))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit2d_1feature(name, Estimator):
# check by fitting a 2d array and prediting with a 1d array
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(10, 1))
y = X[:, 0].astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1feature(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = X.astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
@ignore_warnings
def check_fit1d_1sample(name, Estimator):
# check fitting 1d array with 1 feature
rnd = np.random.RandomState(0)
X = 3 * rnd.uniform(size=(20))
y = np.array([1])
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
if hasattr(estimator, "n_components"):
estimator.n_components = 1
if hasattr(estimator, "n_clusters"):
estimator.n_clusters = 1
set_random_state(estimator, 1)
try:
estimator.fit(X, y)
except ValueError:
pass
def check_transformer_general(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def check_transformers_unfitted(name, Transformer):
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
transformer = Transformer()
assert_raises((AttributeError, ValueError), transformer.transform, X)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
set_random_state(transformer)
set_testing_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
# fit_transform method should work on non fitted estimator
transformer_clone = clone(transformer)
X_pred = transformer_clone.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
# check for consistent n_samples
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
assert_equal(len(X_pred2), n_samples)
assert_equal(len(X_pred3), n_samples)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
@ignore_warnings
def check_pipeline_consistency(name, Estimator):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
if name in DEPRECATED_TRANSFORM:
funcs = ["score"]
else:
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_array_almost_equal(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, Estimator):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
if name in DEPRECATED_TRANSFORM:
funcs = ["fit", "score", "partial_fit", "fit_predict"]
else:
funcs = [
"fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = [p.name for p in signature(func).parameters.values()]
assert_true(args[1] in ["y", "Y"],
"Expected y or Y as second argument for method "
"%s of %s. Got arguments: %r."
% (func_name, Estimator.__name__, args))
@ignore_warnings
def check_estimators_dtypes(name, Estimator):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(name, y)
if name in DEPRECATED_TRANSFORM:
methods = ["predict", "decision_function", "predict_proba"]
else:
methods = [
"predict", "transform", "decision_function", "predict_proba"]
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in methods:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
def check_estimators_empty_data_messages(name, Estimator):
e = Estimator()
set_testing_parameters(e)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
assert_raises(ValueError, e.fit, X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(name, np.array([1, 0, 1]))
msg = "0 feature\(s\) \(shape=\(3, 0\)\) while a minimum of \d* is required."
assert_raises_regex(ValueError, msg, e.fit, X_zero_features, y)
def check_estimators_nan_inf(name, Estimator):
# Checks that Estimator X's do not contain NaN or inf.
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if (hasattr(estimator, "transform") and
name not in DEPRECATED_TRANSFORM):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
@ignore_warnings
def check_estimators_pickle(name, Estimator):
"""Test that we can pickle all estimators"""
if name in DEPRECATED_TRANSFORM:
check_methods = ["predict", "decision_function", "predict_proba"]
else:
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
# some estimators can't do features less than 0
X -= X.min()
# some estimators only take multioutputs
y = multioutput_estimator_convert_y_2d(name, y)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_random_state(estimator)
set_testing_parameters(estimator)
estimator.fit(X, y)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
unpickled_estimator = pickle.loads(pickled_estimator)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_array_almost_equal(result[method], unpickled_result)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with warnings.catch_warnings(record=True):
alg = Alg()
if not hasattr(alg, 'partial_fit'):
# check again as for mlp this depends on algorithm
return
set_testing_parameters(alg)
try:
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
except NotImplementedError:
return
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with warnings.catch_warnings(record=True):
alg = Alg()
set_testing_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name is 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_clusterer_compute_labels_predict(name, Clusterer):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = Clusterer()
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_testing_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
@ignore_warnings # Warnings are raised by decision function
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# catch deprecation warnings
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_testing_parameters(classifier)
set_random_state(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3
and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
def check_estimators_fit_returns_self(name, Estimator):
"""Check if self is returned when calling fit"""
X, y = make_blobs(random_state=0, n_samples=9, n_features=4)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
assert_true(estimator.fit(X, y) is estimator)
@ignore_warnings
def check_estimators_unfitted(name, Estimator):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise either AttributeError or ValueError.
The specific exception type NotFittedError inherits from both and can
therefore be adequately raised for that purpose.
"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
est = Estimator()
msg = "fit"
if hasattr(est, 'predict'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict, X)
if hasattr(est, 'decision_function'):
assert_raise_message((AttributeError, ValueError), msg,
est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_log_proba, X)
def check_supervised_y_2d(name, Estimator):
if "MultiTask" in name:
# These only work on 2d, so this test makes no sense
return
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
if name not in MULTI_OUTPUT:
# check that we warned if we don't support multi-output
assert_greater(len(w), 0, msg)
assert_true("DataConversionWarning('A column-vector y"
" was passed when a 1d array was expected" in msg)
assert_array_almost_equal(y_pred.ravel(), y_pred_2d.ravel())
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_testing_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_testing_parameters(regressor_1)
set_testing_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y.reshape(-1, 1)) # X is already scaled
y = y.ravel()
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_testing_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert_equal(y_pred.shape, y_.shape)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
assert_greater(regressor.score(X, y_), 0.5)
@ignore_warnings
def check_regressors_no_decision_function(name, Regressor):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
y = multioutput_estimator_convert_y_2d(name, X[:, 0])
regressor = Regressor()
set_testing_parameters(regressor)
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
def check_class_weight_classifiers(name, Classifier):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with warnings.catch_warnings(record=True):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.89)
def check_class_weight_balanced_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_balanced, coef_manual)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
with warnings.catch_warnings(record=True):
# catch deprecation warnings
estimator = Estimator()
set_testing_parameters(estimator)
set_random_state(estimator)
# Make a physical copy of the orginal estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
def check_sparsify_coefficients(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_testing_parameters(estimator_1)
set_testing_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LinearDiscriminantAnalysis()
# test default-constructibility
# get rid of deprecation warnings
with warnings.catch_warnings(record=True):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(estimator.set_params() is estimator)
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
def param_filter(p):
"""Identify hyper parameters of an estimator"""
return (p.name != 'self'
and p.kind != p.VAR_KEYWORD
and p.kind != p.VAR_POSITIONAL)
init_params = [p for p in signature(init).parameters.values()
if param_filter(p)]
except (TypeError, ValueError):
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they can need a non-default argument
init_params = init_params[1:]
for init_param in init_params:
assert_not_equal(init_param.default, init_param.empty,
"parameter %s for %s has no default value"
% (init_param.name, type(estimator).__name__))
assert_in(type(init_param.default),
[str, int, float, bool, tuple, type(None),
np.float64, types.FunctionType, Memory])
if init_param.name not in params.keys():
# deprecated parameter, not in get_params
assert_true(init_param.default is None)
continue
param_value = params[init_param.name]
if isinstance(param_value, np.ndarray):
assert_array_equal(param_value, init_param.default)
else:
assert_equal(param_value, init_param.default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if "MultiTask" in name:
return np.reshape(y, (-1, 1))
return y
def check_non_transformer_estimators_n_iter(name, estimator,
multi_output=False):
# Check if all iterative solvers, run for more than one iteratiom
iris = load_iris()
X, y_ = iris.data, iris.target
if multi_output:
y_ = np.reshape(y_, (-1, 1))
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
assert_greater(estimator.n_iter_, 0)
def check_transformer_n_iter(name, estimator):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater(iter_, 1)
else:
assert_greater(estimator.n_iter_, 1)
def check_get_params_invariance(name, estimator):
class T(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
pass
def fit(self, X, y):
return self
if name in ('FeatureUnion', 'Pipeline'):
e = estimator([('clf', T())])
elif name in ('GridSearchCV', 'RandomizedSearchCV', 'SelectFromModel'):
return
else:
e = estimator()
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert_true(all(item in deep_params.items() for item in
shallow_params.items()))
def check_classifiers_regression_target(name, Estimator):
# Check if classifier throws an exception when fed regression targets
boston = load_boston()
X, y = boston.data, boston.target
e = Estimator()
msg = 'Unknown label type: '
assert_raises_regex(ValueError, msg, e.fit, X, y)
|
bsd-3-clause
|
amauboussin/arxiv-twitterbot
|
preprocessing.py
|
1
|
4531
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.feature_extraction.text import TfidfVectorizer
import spacy
from constants import PICKLE_PATH, TWEET_CSV_PATH
# python -m spacy download en
def load_arxiv_and_tweets():
arxiv = pd.read_pickle(PICKLE_PATH)
arxiv['link'] = arxiv.link.apply(clean_arxiv_api_link)
miles_links = pd.read_csv(TWEET_CSV_PATH)
miles_links['time'] = miles_links.time.apply(pd.Timestamp)
miles_links['link'] = miles_links['link'].apply(clean_miles_link)
df = miles_links.set_index('link').join(arxiv.set_index('link'), how='right')
df = df.reset_index().groupby('link').apply(group_tweeted_multiple).reset_index(drop=True)
df = df.assign(tweeted=(~df.time.isnull()).astype(int))
# remove papers past the day of the last tweet
return df
def get_sklearn_data():
"""Get data for training an sklearn model"""
df = load_arxiv_and_tweets().sort_values('published')
max_date = df[df.tweeted == 1].published.max()
return get_features_matrix(df[df.published < max_date])
def get_tokenized_list_of_dicts():
"""Get data as a list of dictionaries with spacy docs + labels for training the conv net"""
df = load_arxiv_and_tweets()
max_date = df[df.tweeted == 1].published.max()
data_dicts = arxiv_df_to_list_of_dicts(df[df.published < max_date])
tokenized_data = parse_content_serial(data_dicts)
return tokenized_data
def get_features_matrix(df, min_author_freq=3, min_term_freq=30, ngram_range=(1, 3)):
"""Return numpy array of data for sklearn models"""
text = [title + ' ' + summary for title, summary in zip(df.title.values, df.summary.values)]
vectorizer = TfidfVectorizer(min_df=min_term_freq, stop_words='english', ngram_range=ngram_range)
text_features = vectorizer.fit_transform(text).toarray()
author_counts = pd.Series([a for author_set in df.authors.values for a in author_set]).value_counts()
allowed_authors = author_counts[author_counts >= min_author_freq].index
filtered_authors = df.authors.apply(lambda authors: [a for a in authors if a in allowed_authors])
author_binarizer = MultiLabelBinarizer()
author_features = author_binarizer.fit_transform(filtered_authors.values)
category_dummies = pd.get_dummies(df.category)
category_features = category_dummies.values
all_features = [text_features, author_features, category_features]
x = np.concatenate(all_features, axis=1)
if 'tweeted' in df:
y = df.tweeted.astype(int).values
else:
y = None
feature_names = np.concatenate((vectorizer.get_feature_names(),
category_dummies.columns.values,
author_binarizer.classes_))
return x, y, feature_names
def get_spacy_parser():
return spacy.load('en')
def group_tweeted_multiple(df):
row = df.iloc[0]
if df.shape[0] > 1:
row[['rts', 'favorites']] = df.rts.sum(), df.favorites.sum()
return row
def arxiv_df_to_list_of_dicts(df):
def row_to_example(row):
def to_token(s):
"""Squash a string into one token by removing non-alpha characters"""
return ''.join([c for c in s if c.isalpha()])
category_token = to_token(row.category)
author_tokens = ' '.join([to_token(author) for author in row.authors])
to_concat = [row.title, row.summary, author_tokens, category_token]
text = ' '.join(to_concat).replace('\n', ' ')
return {
'label': row.tweeted,
'id': row['index'],
'content': text,
'link': row.link
}
return [row_to_example(row) for i, row in df.reset_index().iterrows()]
def clean_arxiv_api_link(link):
if not link[-1].isdigit():
return None
return link.replace('http://', '').replace('https://', '')[:-2]
def clean_miles_link(link):
if not link[-1].isdigit():
return None
return link.replace('http://', '').replace('https://', '')
def parse_content_serial(data):
"""Parse the content field of a list of dicts from unicode to a spacy doc"""
spacy_parser = get_spacy_parser()
for row in data:
row['content'] = spacy_parser(row['content'])
return data
def sorted_train_test_split(x, y, test_size):
train_size = 1. - test_size
train_end_index = int(len(x) * train_size)
return x[:train_end_index], x[train_end_index:], y[:train_end_index], y[train_end_index:]
|
mit
|
Mazecreator/tensorflow
|
tensorflow/python/estimator/inputs/pandas_io.py
|
86
|
4503
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow pandas.DataFrame."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.estimator.inputs.queues import feeding_functions
try:
# pylint: disable=g-import-not-at-top
# pylint: disable=unused-import
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def pandas_input_fn(x,
y=None,
batch_size=128,
num_epochs=1,
shuffle=None,
queue_capacity=1000,
num_threads=1,
target_column='target'):
"""Returns input function that would feed Pandas DataFrame into the model.
Note: `y`'s index must match `x`'s index.
Args:
x: pandas `DataFrame` object.
y: pandas `Series` object. `None` if absent.
batch_size: int, size of batches to return.
num_epochs: int, number of epochs to iterate over data. If not `None`,
read attempts that would exceed this value will raise `OutOfRangeError`.
shuffle: bool, whether to read the records in random order.
queue_capacity: int, size of the read queue. If `None`, it will be set
roughly to the size of `x`.
num_threads: Integer, number of threads used for reading and enqueueing. In
order to have predicted and repeatable order of reading and enqueueing,
such as in prediction and evaluation mode, `num_threads` should be 1.
target_column: str, name to give the target column `y`.
Returns:
Function, that has signature of ()->(dict of `features`, `target`)
Raises:
ValueError: if `x` already contains a column with the same name as `y`, or
if the indexes of `x` and `y` don't match.
TypeError: `shuffle` is not bool.
"""
if not HAS_PANDAS:
raise TypeError(
'pandas_input_fn should not be called without pandas installed')
if not isinstance(shuffle, bool):
raise TypeError('shuffle must be explicitly set as boolean; '
'got {}'.format(shuffle))
x = x.copy()
if y is not None:
if target_column in x:
raise ValueError(
'Cannot use name %s for target column: DataFrame already has a '
'column with that name: %s' % (target_column, x.columns))
if not np.array_equal(x.index, y.index):
raise ValueError('Index for x and y are mismatched.\nIndex for x: %s\n'
'Index for y: %s\n' % (x.index, y.index))
x[target_column] = y
# TODO(mdan): These are memory copies. We probably don't need 4x slack space.
# The sizes below are consistent with what I've seen elsewhere.
if queue_capacity is None:
if shuffle:
queue_capacity = 4 * len(x)
else:
queue_capacity = len(x)
min_after_dequeue = max(queue_capacity / 4, 1)
def input_fn():
"""Pandas input function."""
queue = feeding_functions._enqueue_data( # pylint: disable=protected-access
x,
queue_capacity,
shuffle=shuffle,
min_after_dequeue=min_after_dequeue,
num_threads=num_threads,
enqueue_size=batch_size,
num_epochs=num_epochs)
if num_epochs is None:
features = queue.dequeue_many(batch_size)
else:
features = queue.dequeue_up_to(batch_size)
assert len(features) == len(x.columns) + 1, ('Features should have one '
'extra element for the index.')
features = features[1:]
features = dict(zip(list(x.columns), features))
if y is not None:
target = features.pop(target_column)
return features, target
return features
return input_fn
|
apache-2.0
|
DGrady/pandas
|
pandas/tests/api/test_api.py
|
2
|
8024
|
# -*- coding: utf-8 -*-
from warnings import catch_warnings
import pytest
import pandas as pd
from pandas import api
from pandas.util import testing as tm
class Base(object):
def check(self, namespace, expected, ignored=None):
# see which names are in the namespace, minus optional
# ignored ones
# compare vs the expected
result = sorted([f for f in dir(namespace) if not f.startswith('_')])
if ignored is not None:
result = sorted(list(set(result) - set(ignored)))
expected = sorted(expected)
tm.assert_almost_equal(result, expected)
class TestPDApi(Base):
# these are optionally imported based on testing
# & need to be ignored
ignored = ['tests', 'locale', 'conftest']
# top-level sub-packages
lib = ['api', 'compat', 'core', 'errors', 'pandas',
'plotting', 'test', 'testing', 'tools', 'tseries',
'util', 'options', 'io']
# these are already deprecated; awaiting removal
deprecated_modules = ['stats', 'datetools', 'parser',
'json', 'lib', 'tslib']
# misc
misc = ['IndexSlice', 'NaT']
# top-level classes
classes = ['Categorical', 'CategoricalIndex', 'DataFrame', 'DateOffset',
'DatetimeIndex', 'ExcelFile', 'ExcelWriter', 'Float64Index',
'Grouper', 'HDFStore', 'Index', 'Int64Index', 'MultiIndex',
'Period', 'PeriodIndex', 'RangeIndex', 'UInt64Index',
'Series', 'SparseArray', 'SparseDataFrame',
'SparseSeries', 'TimeGrouper', 'Timedelta',
'TimedeltaIndex', 'Timestamp', 'Interval', 'IntervalIndex']
# these are already deprecated; awaiting removal
deprecated_classes = ['WidePanel', 'Panel4D',
'SparseList', 'Expr', 'Term']
# these should be deprecated in the future
deprecated_classes_in_future = ['Panel']
# external modules exposed in pandas namespace
modules = ['np', 'datetime']
# top-level functions
funcs = ['bdate_range', 'concat', 'crosstab', 'cut',
'date_range', 'interval_range', 'eval',
'factorize', 'get_dummies',
'infer_freq', 'isna', 'isnull', 'lreshape',
'melt', 'notna', 'notnull', 'offsets',
'merge', 'merge_ordered', 'merge_asof',
'period_range',
'pivot', 'pivot_table', 'qcut',
'show_versions', 'timedelta_range', 'unique',
'value_counts', 'wide_to_long']
# top-level option funcs
funcs_option = ['reset_option', 'describe_option', 'get_option',
'option_context', 'set_option',
'set_eng_float_format']
# top-level read_* funcs
funcs_read = ['read_clipboard', 'read_csv', 'read_excel', 'read_fwf',
'read_gbq', 'read_hdf', 'read_html', 'read_json',
'read_msgpack', 'read_pickle', 'read_sas', 'read_sql',
'read_sql_query', 'read_sql_table', 'read_stata',
'read_table', 'read_feather', 'read_parquet']
# top-level to_* funcs
funcs_to = ['to_datetime', 'to_msgpack',
'to_numeric', 'to_pickle', 'to_timedelta']
# top-level to deprecate in the future
deprecated_funcs_in_future = []
# these are already deprecated; awaiting removal
deprecated_funcs = ['ewma', 'ewmcorr', 'ewmcov', 'ewmstd', 'ewmvar',
'ewmvol', 'expanding_apply', 'expanding_corr',
'expanding_count', 'expanding_cov', 'expanding_kurt',
'expanding_max', 'expanding_mean', 'expanding_median',
'expanding_min', 'expanding_quantile',
'expanding_skew', 'expanding_std', 'expanding_sum',
'expanding_var', 'rolling_apply',
'rolling_corr', 'rolling_count', 'rolling_cov',
'rolling_kurt', 'rolling_max', 'rolling_mean',
'rolling_median', 'rolling_min', 'rolling_quantile',
'rolling_skew', 'rolling_std', 'rolling_sum',
'rolling_var', 'rolling_window', 'ordered_merge',
'pnow', 'match', 'groupby', 'get_store',
'plot_params', 'scatter_matrix']
def test_api(self):
self.check(pd,
self.lib + self.misc +
self.modules + self.deprecated_modules +
self.classes + self.deprecated_classes +
self.deprecated_classes_in_future +
self.funcs + self.funcs_option +
self.funcs_read + self.funcs_to +
self.deprecated_funcs_in_future +
self.deprecated_funcs,
self.ignored)
class TestApi(Base):
allowed = ['types']
def test_api(self):
self.check(api, self.allowed)
class TestTesting(Base):
funcs = ['assert_frame_equal', 'assert_series_equal',
'assert_index_equal']
def test_testing(self):
from pandas import testing
self.check(testing, self.funcs)
class TestDatetoolsDeprecation(object):
def test_deprecation_access_func(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pd.datetools.to_datetime('2016-01-01')
def test_deprecation_access_obj(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pd.datetools.monthEnd
class TestTopLevelDeprecations(object):
# top-level API deprecations
# GH 13790
def test_pnow(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pd.pnow(freq='M')
def test_term(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pd.Term('index>=date')
def test_expr(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pd.Expr('2>1')
def test_match(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pd.match([1, 2, 3], [1])
def test_groupby(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pd.groupby(pd.Series([1, 2, 3]), [1, 1, 1])
# GH 15940
def test_get_store(self):
pytest.importorskip('tables')
with tm.ensure_clean() as path:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
s = pd.get_store(path)
s.close()
class TestJson(object):
def test_deprecation_access_func(self):
with catch_warnings(record=True):
pd.json.dumps([])
class TestParser(object):
def test_deprecation_access_func(self):
with catch_warnings(record=True):
pd.parser.na_values
class TestLib(object):
def test_deprecation_access_func(self):
with catch_warnings(record=True):
pd.lib.infer_dtype('foo')
class TestTSLib(object):
def test_deprecation_access_func(self):
with catch_warnings(record=True):
pd.tslib.Timestamp('20160101')
class TestTypes(object):
def test_deprecation_access_func(self):
with tm.assert_produces_warning(
FutureWarning, check_stacklevel=False):
from pandas.types.concat import union_categoricals
c1 = pd.Categorical(list('aabc'))
c2 = pd.Categorical(list('abcd'))
union_categoricals(
[c1, c2],
sort_categories=True,
ignore_order=True)
|
bsd-3-clause
|
tdhopper/scikit-learn
|
sklearn/utils/tests/test_multiclass.py
|
128
|
12853
|
from __future__ import division
import numpy as np
import scipy.sparse as sp
from itertools import product
from sklearn.externals.six.moves import xrange
from sklearn.externals.six import iteritems
from scipy.sparse import issparse
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.multiclass import unique_labels
from sklearn.utils.multiclass import is_multilabel
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.multiclass import class_distribution
class NotAnArray(object):
"""An object that is convertable to an array. This is useful to
simulate a Pandas timeseries."""
def __init__(self, data):
self.data = data
def __array__(self):
return self.data
EXAMPLES = {
'multilabel-indicator': [
# valid when the data is formated as sparse or dense, identified
# by CSR format when the testing takes place
csr_matrix(np.random.RandomState(42).randint(2, size=(10, 10))),
csr_matrix(np.array([[0, 1], [1, 0]])),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.bool)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.int8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.uint8)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float)),
csr_matrix(np.array([[0, 1], [1, 0]], dtype=np.float32)),
csr_matrix(np.array([[0, 0], [0, 0]])),
csr_matrix(np.array([[0, 1]])),
# Only valid when data is dense
np.array([[-1, 1], [1, -1]]),
np.array([[-3, 3], [3, -3]]),
NotAnArray(np.array([[-3, 3], [3, -3]])),
],
'multiclass': [
[1, 0, 2, 2, 1, 4, 2, 4, 4, 4],
np.array([1, 0, 2]),
np.array([1, 0, 2], dtype=np.int8),
np.array([1, 0, 2], dtype=np.uint8),
np.array([1, 0, 2], dtype=np.float),
np.array([1, 0, 2], dtype=np.float32),
np.array([[1], [0], [2]]),
NotAnArray(np.array([1, 0, 2])),
[0, 1, 2],
['a', 'b', 'c'],
np.array([u'a', u'b', u'c']),
np.array([u'a', u'b', u'c'], dtype=object),
np.array(['a', 'b', 'c'], dtype=object),
],
'multiclass-multioutput': [
np.array([[1, 0, 2, 2], [1, 4, 2, 4]]),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.int8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.uint8),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float),
np.array([[1, 0, 2, 2], [1, 4, 2, 4]], dtype=np.float32),
np.array([['a', 'b'], ['c', 'd']]),
np.array([[u'a', u'b'], [u'c', u'd']]),
np.array([[u'a', u'b'], [u'c', u'd']], dtype=object),
np.array([[1, 0, 2]]),
NotAnArray(np.array([[1, 0, 2]])),
],
'binary': [
[0, 1],
[1, 1],
[],
[0],
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1]),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.bool),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.int8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.uint8),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float),
np.array([0, 1, 1, 1, 0, 0, 0, 1, 1, 1], dtype=np.float32),
np.array([[0], [1]]),
NotAnArray(np.array([[0], [1]])),
[1, -1],
[3, 5],
['a'],
['a', 'b'],
['abc', 'def'],
np.array(['abc', 'def']),
[u'a', u'b'],
np.array(['abc', 'def'], dtype=object),
],
'continuous': [
[1e-5],
[0, .5],
np.array([[0], [.5]]),
np.array([[0], [.5]], dtype=np.float32),
],
'continuous-multioutput': [
np.array([[0, .5], [.5, 0]]),
np.array([[0, .5], [.5, 0]], dtype=np.float32),
np.array([[0, .5]]),
],
'unknown': [
[[]],
[()],
# sequence of sequences that were'nt supported even before deprecation
np.array([np.array([]), np.array([1, 2, 3])], dtype=object),
[np.array([]), np.array([1, 2, 3])],
[set([1, 2, 3]), set([1, 2])],
[frozenset([1, 2, 3]), frozenset([1, 2])],
# and also confusable as sequences of sequences
[{0: 'a', 1: 'b'}, {0: 'a'}],
# empty second dimension
np.array([[], []]),
# 3d
np.array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]),
]
}
NON_ARRAY_LIKE_EXAMPLES = [
set([1, 2, 3]),
{0: 'a', 1: 'b'},
{0: [5], 1: [5]},
'abc',
frozenset([1, 2, 3]),
None,
]
MULTILABEL_SEQUENCES = [
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
np.array([[], [1, 2]], dtype='object'),
NotAnArray(np.array([[], [1, 2]], dtype='object'))
]
def test_unique_labels():
# Empty iterable
assert_raises(ValueError, unique_labels)
# Multiclass problem
assert_array_equal(unique_labels(xrange(10)), np.arange(10))
assert_array_equal(unique_labels(np.arange(10)), np.arange(10))
assert_array_equal(unique_labels([4, 0, 2]), np.array([0, 2, 4]))
# Multilabel indicator
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[1, 0, 1],
[0, 0, 0]])),
np.arange(3))
assert_array_equal(unique_labels(np.array([[0, 0, 1],
[0, 0, 0]])),
np.arange(3))
# Several arrays passed
assert_array_equal(unique_labels([4, 0, 2], xrange(5)),
np.arange(5))
assert_array_equal(unique_labels((0, 1, 2), (0,), (2, 1)),
np.arange(3))
# Border line case with binary indicator matrix
assert_raises(ValueError, unique_labels, [4, 0, 2], np.ones((5, 5)))
assert_raises(ValueError, unique_labels, np.ones((5, 4)), np.ones((5, 5)))
assert_array_equal(unique_labels(np.ones((4, 5)), np.ones((5, 5))),
np.arange(5))
def test_unique_labels_non_specific():
# Test unique_labels with a variety of collected examples
# Smoke test for all supported format
for format in ["binary", "multiclass", "multilabel-indicator"]:
for y in EXAMPLES[format]:
unique_labels(y)
# We don't support those format at the moment
for example in NON_ARRAY_LIKE_EXAMPLES:
assert_raises(ValueError, unique_labels, example)
for y_type in ["unknown", "continuous", 'continuous-multioutput',
'multiclass-multioutput']:
for example in EXAMPLES[y_type]:
assert_raises(ValueError, unique_labels, example)
def test_unique_labels_mixed_types():
# Mix with binary or multiclass and multilabel
mix_clf_format = product(EXAMPLES["multilabel-indicator"],
EXAMPLES["multiclass"] +
EXAMPLES["binary"])
for y_multilabel, y_multiclass in mix_clf_format:
assert_raises(ValueError, unique_labels, y_multiclass, y_multilabel)
assert_raises(ValueError, unique_labels, y_multilabel, y_multiclass)
assert_raises(ValueError, unique_labels, [[1, 2]], [["a", "d"]])
assert_raises(ValueError, unique_labels, ["1", 2])
assert_raises(ValueError, unique_labels, [["1", 2], [1, 3]])
assert_raises(ValueError, unique_labels, [["1", "2"], [2, 3]])
def test_is_multilabel():
for group, group_examples in iteritems(EXAMPLES):
if group in ['multilabel-indicator']:
dense_assert_, dense_exp = assert_true, 'True'
else:
dense_assert_, dense_exp = assert_false, 'False'
for example in group_examples:
# Only mark explicitly defined sparse examples as valid sparse
# multilabel-indicators
if group == 'multilabel-indicator' and issparse(example):
sparse_assert_, sparse_exp = assert_true, 'True'
else:
sparse_assert_, sparse_exp = assert_false, 'False'
if (issparse(example) or
(hasattr(example, '__array__') and
np.asarray(example).ndim == 2 and
np.asarray(example).dtype.kind in 'biuf' and
np.asarray(example).shape[1] > 0)):
examples_sparse = [sparse_matrix(example)
for sparse_matrix in [coo_matrix,
csc_matrix,
csr_matrix,
dok_matrix,
lil_matrix]]
for exmpl_sparse in examples_sparse:
sparse_assert_(is_multilabel(exmpl_sparse),
msg=('is_multilabel(%r)'
' should be %s')
% (exmpl_sparse, sparse_exp))
# Densify sparse examples before testing
if issparse(example):
example = example.toarray()
dense_assert_(is_multilabel(example),
msg='is_multilabel(%r) should be %s'
% (example, dense_exp))
def test_type_of_target():
for group, group_examples in iteritems(EXAMPLES):
for example in group_examples:
assert_equal(type_of_target(example), group,
msg=('type_of_target(%r) should be %r, got %r'
% (example, group, type_of_target(example))))
for example in NON_ARRAY_LIKE_EXAMPLES:
msg_regex = 'Expected array-like \(array or non-string sequence\).*'
assert_raises_regex(ValueError, msg_regex, type_of_target, example)
for example in MULTILABEL_SEQUENCES:
msg = ('You appear to be using a legacy multi-label data '
'representation. Sequence of sequences are no longer supported;'
' use a binary array or sparse matrix instead.')
assert_raises_regex(ValueError, msg, type_of_target, example)
def test_class_distribution():
y = np.array([[1, 0, 0, 1],
[2, 2, 0, 1],
[1, 3, 0, 1],
[4, 2, 0, 1],
[2, 0, 0, 1],
[1, 3, 0, 1]])
# Define the sparse matrix with a mix of implicit and explicit zeros
data = np.array([1, 2, 1, 4, 2, 1, 0, 2, 3, 2, 3, 1, 1, 1, 1, 1, 1])
indices = np.array([0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 5, 0, 1, 2, 3, 4, 5])
indptr = np.array([0, 6, 11, 11, 17])
y_sp = sp.csc_matrix((data, indices, indptr), shape=(6, 4))
classes, n_classes, class_prior = class_distribution(y)
classes_sp, n_classes_sp, class_prior_sp = class_distribution(y_sp)
classes_expected = [[1, 2, 4],
[0, 2, 3],
[0],
[1]]
n_classes_expected = [3, 3, 1, 1]
class_prior_expected = [[3/6, 2/6, 1/6],
[1/3, 1/3, 1/3],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
# Test again with explicit sample weights
(classes,
n_classes,
class_prior) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
(classes_sp,
n_classes_sp,
class_prior_sp) = class_distribution(y, [1.0, 2.0, 1.0, 2.0, 1.0, 2.0])
class_prior_expected = [[4/9, 3/9, 2/9],
[2/9, 4/9, 3/9],
[1.0],
[1.0]]
for k in range(y.shape[1]):
assert_array_almost_equal(classes[k], classes_expected[k])
assert_array_almost_equal(n_classes[k], n_classes_expected[k])
assert_array_almost_equal(class_prior[k], class_prior_expected[k])
assert_array_almost_equal(classes_sp[k], classes_expected[k])
assert_array_almost_equal(n_classes_sp[k], n_classes_expected[k])
assert_array_almost_equal(class_prior_sp[k], class_prior_expected[k])
|
bsd-3-clause
|
xzh86/scikit-learn
|
examples/svm/plot_svm_regression.py
|
249
|
1451
|
"""
===================================================================
Support Vector Regression (SVR) using linear and non-linear kernels
===================================================================
Toy example of 1D regression using linear, polynomial and RBF kernels.
"""
print(__doc__)
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
###############################################################################
# Generate sample data
X = np.sort(5 * np.random.rand(40, 1), axis=0)
y = np.sin(X).ravel()
###############################################################################
# Add noise to targets
y[::5] += 3 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
svr_poly = SVR(kernel='poly', C=1e3, degree=2)
y_rbf = svr_rbf.fit(X, y).predict(X)
y_lin = svr_lin.fit(X, y).predict(X)
y_poly = svr_poly.fit(X, y).predict(X)
###############################################################################
# look at the results
plt.scatter(X, y, c='k', label='data')
plt.hold('on')
plt.plot(X, y_rbf, c='g', label='RBF model')
plt.plot(X, y_lin, c='r', label='Linear model')
plt.plot(X, y_poly, c='b', label='Polynomial model')
plt.xlabel('data')
plt.ylabel('target')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
|
bsd-3-clause
|
azjps/bokeh
|
bokeh/charts/attributes.py
|
6
|
14622
|
from __future__ import absolute_import
from copy import copy
from itertools import cycle
import pandas as pd
from bokeh.charts import DEFAULT_PALETTE
from bokeh.charts.properties import ColumnLabel
from bokeh.charts.utils import marker_types
from bokeh.charts.data_source import ChartDataSource
from bokeh.charts.stats import Bins
from bokeh.core.enums import DashPattern
from bokeh.models.sources import ColumnDataSource
from bokeh.core.properties import (HasProps, String, List, Instance, Either, Any, Dict,
Bool, Override)
class AttrSpec(HasProps):
"""A container for assigning attributes to values and retrieving them as needed.
A special function this provides is automatically handling cases where the provided
iterator is too short compared to the distinct values provided.
Once created as attr_spec, you can do attr_spec[data_label], where data_label must
be a one dimensional tuple of values, representing the unique group in the data.
See the :meth:`AttrSpec.setup` method for the primary way to provide an existing
AttrSpec with data and column values and update all derived property values.
"""
data = Instance(ColumnDataSource)
iterable = List(Any, default=None)
attrname = String(help='Name of the attribute the spec provides.')
columns = Either(ColumnLabel, List(ColumnLabel), help="""
The label or list of column labels that correspond to the columns that will be
used to find all distinct values (single column) or combination of values (
multiple columns) to then assign a unique attribute to. If not enough unique
attribute values are found, then the attribute values will be cycled.
""")
default = Any(default=None, help="""
The default value for the attribute, which is used if no column is assigned to
the attribute for plotting. If the default value is not provided, the first
value in the `iterable` property is used.
""")
attr_map = Dict(Any, Any, help="""
Created by the attribute specification when `iterable` and `data` are
available. The `attr_map` will include a mapping between the distinct value(s)
found in `columns` and the attribute value that has been assigned.
""")
items = Any(default=None, help="""
The attribute specification calculates this list of distinct values that are
found in `columns` of `data`.
""")
sort = Bool(default=True, help="""
A boolean flag to tell the attribute specification to sort `items`, when it is
calculated. This affects which value of `iterable` is assigned to each distinct
value in `items`.
""")
ascending = Bool(default=True, help="""
A boolean flag to tell the attribute specification how to sort `items` if the
`sort` property is set to `True`. The default setting for `ascending` is `True`.
""")
bins = Instance(Bins, help="""
If an attribute spec is binning data, so that we can map one value in the
`iterable` to one value in `items`, then this attribute will contain an instance
of the Bins stat. This is used to create unique labels for each bin, which is
then used for `items` instead of the actual unique values in `columns`.
""")
def __init__(self, columns=None, df=None, iterable=None, default=None,
items=None, **properties):
"""Create a lazy evaluated attribute specification.
Args:
columns: a list of column labels
df(:class:`~pandas.DataFrame`): the data source for the attribute spec.
iterable: an iterable of distinct attribute values
default: a value to use as the default attribute when no columns are passed
items: the distinct values in columns. If items is provided as input,
then the values provided are used instead of being calculated. This can
be used to force a specific order for assignment.
**properties: other properties to pass to parent :class:`HasProps`
"""
properties['columns'] = self._ensure_list(columns)
if df is not None:
properties['data'] = ColumnDataSource(df)
if default is None and iterable is not None:
default_iter = copy(iterable)
properties['default'] = next(iter(default_iter))
elif default is not None:
properties['default'] = default
if iterable is not None:
properties['iterable'] = iterable
if items is not None:
properties['items'] = items
super(AttrSpec, self).__init__(**properties)
if self.default is None and self.iterable is not None:
self.default = next(iter(copy(self.iterable)))
if self.data is not None and self.columns is not None:
if df is None:
df = self.data.to_df()
self._generate_items(df, columns=self.columns)
if self.items is not None and self.iterable is not None:
self.attr_map = self._create_attr_map()
@staticmethod
def _ensure_list(attr):
"""Always returns a list with the provided value. Returns the value if a list."""
if isinstance(attr, str):
return [attr]
elif isinstance(attr, tuple):
return list(attr)
else:
return attr
@staticmethod
def _ensure_tuple(attr):
"""Return tuple with the provided value. Returns the value if a tuple."""
if not isinstance(attr, tuple):
return (attr,)
else:
return attr
def _setup_default(self):
"""Stores the first value of iterable into `default` property."""
self.default = next(self._setup_iterable())
def _setup_iterable(self):
"""Default behavior is to copy and cycle the provided iterable."""
return cycle(copy(self.iterable))
def _generate_items(self, df, columns):
"""Produce list of unique tuples that identify each item."""
if self.sort:
# TODO (fpliger): this handles pandas API change so users do not experience
# the related annoying deprecation warning. This is probably worth
# removing when pandas deprecated version (0.16) is "old" enough
try:
df = df.sort_values(by=columns, ascending=self.ascending)
except AttributeError:
df = df.sort(columns=columns, ascending=self.ascending)
items = df[columns].drop_duplicates()
self.items = [tuple(x) for x in items.to_records(index=False)]
def _create_attr_map(self, df=None, columns=None):
"""Creates map between unique values and available attributes."""
if df is not None and columns is not None:
self._generate_items(df, columns)
iterable = self._setup_iterable()
return {item: next(iterable) for item in self._item_tuples()}
def _item_tuples(self):
return [self._ensure_tuple(item) for item in self.items]
def set_columns(self, columns):
"""Set columns property and update derived properties as needed."""
columns = self._ensure_list(columns)
if all([col in self.data.column_names for col in columns]):
self.columns = columns
else:
# we have input values other than columns
# assume this is now the iterable at this point
self.iterable = columns
self._setup_default()
def setup(self, data=None, columns=None):
"""Set the data and update derived properties as needed."""
if data is not None:
self.data = data
if columns is not None and self.data is not None:
self.set_columns(columns)
if self.columns is not None and self.data is not None:
self.attr_map = self._create_attr_map(self.data.to_df(), self.columns)
def update_data(self, data):
self.setup(data=data, columns=self.columns)
def __getitem__(self, item):
"""Lookup the attribute to use for the given unique group label."""
if not self.attr_map:
return self.default
elif self._ensure_tuple(item) not in self.attr_map.keys():
# make sure we have attr map
self.setup()
return self.attr_map[self._ensure_tuple(item)]
@property
def series(self):
if not self.attr_map:
return pd.Series()
else:
index = pd.MultiIndex.from_tuples(self._item_tuples(), names=self.columns)
return pd.Series(list(self.attr_map.values()), index=index)
class ColorAttr(AttrSpec):
"""An attribute specification for mapping unique data values to colors.
.. note::
Should be expanded to support more complex coloring options.
"""
attrname = Override(default='color')
iterable = Override(default=DEFAULT_PALETTE)
bin = Bool(default=False)
def __init__(self, **kwargs):
iterable = kwargs.pop('palette', None)
if iterable is not None:
kwargs['iterable'] = iterable
super(ColorAttr, self).__init__(**kwargs)
def _generate_items(self, df, columns):
"""Produce list of unique tuples that identify each item."""
if not self.bin:
super(ColorAttr, self)._generate_items(df, columns)
else:
if len(columns) == 1 and ChartDataSource.is_number(df[columns[0]]):
self.bins = Bins(source=ColumnDataSource(df), column=columns[0],
bins=len(self.iterable), aggregate=False)
if self.sort:
self.bins.sort(ascending=self.ascending)
self.items = [bin.label[0] for bin in self.bins]
else:
raise ValueError('Binned colors can only be created for one column of \
numerical data.')
def add_bin_labels(self, data):
col = self.columns[0]
# save original values into new column
data._data[col + '_values'] = data._data[col]
for bin in self.bins:
# set all rows associated to each bin to the bin label being mapped to colors
data._data.ix[data._data[col + '_values'].isin(bin.values),
col] = bin.label[0]
data._data[col] = pd.Categorical(data._data[col], categories=list(self.items),
ordered=self.sort)
class MarkerAttr(AttrSpec):
"""An attribute specification for mapping unique data values to markers."""
attrname = Override(default='marker')
iterable = Override(default=list(marker_types.keys()))
def __init__(self, **kwargs):
iterable = kwargs.pop('markers', None)
if iterable is not None:
kwargs['iterable'] = iterable
super(MarkerAttr, self).__init__(**kwargs)
dashes = DashPattern._values
class DashAttr(AttrSpec):
"""An attribute specification for mapping unique data values to line dashes."""
attrname = Override(default='dash')
iterable = Override(default=dashes)
def __init__(self, **kwargs):
iterable = kwargs.pop('dash', None)
if iterable is not None:
kwargs['iterable'] = iterable
super(DashAttr, self).__init__(**kwargs)
class IdAttr(AttrSpec):
"""An attribute specification for mapping unique data values to line dashes."""
attrname = Override(default='id')
def _setup_iterable(self):
return iter(range(0, len(self.items)))
class CatAttr(AttrSpec):
"""An attribute specification for mapping unique data values to labels.
.. note::
this is a special attribute specification, which is used for defining which
labels are used for one aspect of a chart (grouping) vs another (stacking or
legend)
"""
attrname = Override(default='nest')
def __init__(self, **kwargs):
super(CatAttr, self).__init__(**kwargs)
def _setup_iterable(self):
return iter(self.items)
def get_levels(self, columns):
"""Provides a list of levels the attribute represents."""
if self.columns is not None:
levels = [columns.index(col) for col in self.columns]
return levels
else:
return []
""" Attribute Spec Functions
Convenient functions for producing attribute specifications. These would be
the interface used by end users when providing attribute specs as inputs
to the Chart.
"""
def color(columns=None, palette=None, bin=False, **kwargs):
"""Produces a ColorAttr specification for coloring groups of data based on columns.
Args:
columns (str or list(str), optional): a column or list of columns for coloring
palette (list(str), optional): a list of colors to use for assigning to unique
values in `columns`.
**kwargs: any keyword, arg supported by :class:`AttrSpec`
Returns:
a `ColorAttr` object
"""
if palette is not None:
kwargs['palette'] = palette
kwargs['columns'] = columns
kwargs['bin'] = bin
return ColorAttr(**kwargs)
def marker(columns=None, markers=None, **kwargs):
""" Specifies detailed configuration for a marker attribute.
Args:
columns (list or str):
markers (list(str) or str): a custom list of markers. Must exist within
:data:`marker_types`.
**kwargs: any keyword, arg supported by :class:`AttrSpec`
Returns:
a `MarkerAttr` object
"""
if markers is not None:
kwargs['markers'] = markers
kwargs['columns'] = columns
return MarkerAttr(**kwargs)
def cat(columns=None, cats=None, sort=True, ascending=True, **kwargs):
""" Specifies detailed configuration for a chart attribute that uses categoricals.
Args:
columns (list or str): the columns used to generate the categorical variable
cats (list, optional): overrides the values derived from columns
sort (bool, optional): whether to sort the categorical values (default=True)
ascending (bool, optional): whether to sort the categorical values (default=True)
**kwargs: any keyword, arg supported by :class:`AttrSpec`
Returns:
a `CatAttr` object
"""
if cats is not None:
kwargs['cats'] = cats
kwargs['columns'] = columns
kwargs['sort'] = sort
kwargs['ascending'] = ascending
return CatAttr(**kwargs)
|
bsd-3-clause
|
datachand/h2o-3
|
py2/h2o_cmd.py
|
20
|
16497
|
import h2o_nodes
from h2o_test import dump_json, verboseprint
import h2o_util
import h2o_print as h2p
from h2o_test import OutputObj
#************************************************************************
def runStoreView(node=None, **kwargs):
print "FIX! disabling runStoreView for now"
return {}
if not node: node = h2o_nodes.nodes[0]
print "\nStoreView:"
# FIX! are there keys other than frames and models
a = node.frames(**kwargs)
# print "storeview frames:", dump_json(a)
frameList = [af['key']['name'] for af in a['frames']]
for f in frameList:
print "frame:", f
print "# of frames:", len(frameList)
b = node.models()
# print "storeview models:", dump_json(b)
modelList = [bm['key'] for bm in b['models']]
for m in modelList:
print "model:", m
print "# of models:", len(modelList)
return {'keys': frameList + modelList}
#************************************************************************
def runExec(node=None, **kwargs):
if not node: node = h2o_nodes.nodes[0]
a = node.rapids(**kwargs)
return a
def runInspect(node=None, key=None, verbose=False, **kwargs):
if not key: raise Exception('No key for Inspect')
if not node: node = h2o_nodes.nodes[0]
a = node.frames(key, **kwargs)
if verbose:
print "inspect of %s:" % key, dump_json(a)
return a
#************************************************************************
def infoFromParse(parse):
if not parse:
raise Exception("parse is empty for infoFromParse")
# assumes just one result from Frames
if 'frames' not in parse:
raise Exception("infoFromParse expects parse= param from parse result: %s" % parse)
if len(parse['frames'])!=1:
raise Exception("infoFromParse expects parse= param from parse result: %s " % parse['frames'])
# it it index[0] or key '0' in a dictionary?
frame = parse['frames'][0]
# need more info about this dataset for debug
numCols = len(frame['columns'])
numRows = frame['rows']
key_name = frame['frame_id']['name']
return numRows, numCols, key_name
#************************************************************************
# make this be the basic way to get numRows, numCols
def infoFromInspect(inspect):
if not inspect:
raise Exception("inspect is empty for infoFromInspect")
# assumes just one result from Frames
if 'frames' not in inspect:
raise Exception("infoFromInspect expects inspect= param from Frames result (single): %s" % inspect)
if len(inspect['frames'])!=1:
raise Exception("infoFromInspect expects inspect= param from Frames result (single): %s " % inspect['frames'])
# it it index[0] or key '0' in a dictionary?
frame = inspect['frames'][0]
# need more info about this dataset for debug
columns = frame['columns']
key_name = frame['frame_id']['name']
missingList = []
labelList = []
typeList = []
for i, colDict in enumerate(columns): # columns is a list
if 'missing_count' not in colDict:
# debug
print "\ncolDict"
for k in colDict:
print " key: %s" % k
# data
# domain
# string_data
# type
# label
# percentiles
# precision
# mins
# maxs
# mean
# histogram_base
# histogram_bins
# histogram_stride
# zero_count
# missing_count
# positive_infinity_count
# negative_infinity_count
# __meta
mins = colDict['mins']
maxs = colDict['maxs']
missing = colDict['missing_count']
label = colDict['label']
stype = colDict['type']
missingList.append(missing)
labelList.append(label)
typeList.append(stype)
if missing!=0:
print "%s: col: %s %s, missing: %d" % (key_name, i, label, missing)
print "inspect typeList:", typeList
# make missingList empty if all 0's
if sum(missingList)==0:
missingList = []
# no type per col in inspect2
numCols = len(frame['columns'])
numRows = frame['rows']
print "\n%s numRows: %s, numCols: %s" % (key_name, numRows, numCols)
return missingList, labelList, numRows, numCols
#************************************************************************
# does all columns unless you specify column index.
# only will return first or specified column
def runSummary(node=None, key=None, column=None, expected=None, maxDelta=None, noPrint=False, **kwargs):
if not key: raise Exception('No key for Summary')
if not node: node = h2o_nodes.nodes[0]
# return node.summary(key, **kwargs)
i = InspectObj(key=key)
# just so I don't have to change names below
missingList = i.missingList
labelList = i.labelList
numRows = i.numRows
numCols = i.numCols
print "labelList:", labelList
assert labelList is not None
# doesn't take indices? only column labels?
# return first column, unless specified
if not (column is None or isinstance(column, (basestring, int))):
raise Exception("column param should be string or integer index or None %s %s" % (type(column), column))
# either return the first col, or the col indentified by label. the column identifed could be string or index?
if column is None: # means the summary json when we ask for col 0, will be what we return (do all though)
colNameToDo = labelList
colIndexToDo = range(len(labelList))
elif isinstance(column, int):
colNameToDo = [labelList[column]]
colIndexToDo = [column]
elif isinstance(column, basestring):
colNameToDo = [column]
if column not in labelList:
raise Exception("% not in labellist: %s" % (column, labellist))
colIndexToDo = [labelList.index(column)]
else:
raise Exception("wrong type %s for column %s" % (type(column), column))
# we get the first column as result after walking across all, if no column parameter
desiredResult = None
for (colIndex, colName) in zip(colIndexToDo, colNameToDo):
print "doing summary on %s %s" % (colIndex, colName)
# ugly looking up the colIndex
co = SummaryObj(key=key, colIndex=colIndex, colName=colName)
if not desiredResult:
desiredResult = co
if not noPrint:
for k,v in co:
# only print [0] of mins and maxs because of the e308 values when they don't have dataset values
if k=='mins' or k=='maxs':
print "%s[0]" % k, v[0]
else:
print k, v
if expected is not None:
print "len(co.histogram_bins):", len(co.histogram_bins)
print "co.label:", co.label, "mean (2 places):", h2o_util.twoDecimals(co.mean)
# what is precision. -1?
print "co.label:", co.label, "std dev. (2 places):", h2o_util.twoDecimals(co.sigma)
# print "FIX! hacking the co.percentiles because it's short by two"
# if co.percentiles:
# percentiles = [0] + co.percentiles + [0]
# else:
# percentiles = None
percentiles = co.percentiles
assert len(co.percentiles) == len(co.default_percentiles)
# the thresholds h2o used, should match what we expected
# expected = [0] * 5
# Fix. doesn't check for expected = 0?
# max of one bin
if maxDelta is None:
maxDelta = (co.maxs[0] - co.mins[0])/1000
if expected[0]: h2o_util.assertApproxEqual(co.mins[0], expected[0], tol=maxDelta,
msg='min is not approx. expected')
if expected[1]: h2o_util.assertApproxEqual(percentiles[2], expected[1], tol=maxDelta,
msg='25th percentile is not approx. expected')
if expected[2]: h2o_util.assertApproxEqual(percentiles[4], expected[2], tol=maxDelta,
msg='50th percentile (median) is not approx. expected')
if expected[3]: h2o_util.assertApproxEqual(percentiles[6], expected[3], tol=maxDelta,
msg='75th percentile is not approx. expected')
if expected[4]: h2o_util.assertApproxEqual(co.maxs[0], expected[4], tol=maxDelta,
msg='max is not approx. expected')
# figure out the expected max error
# use this for comparing to sklearn/sort
MAX_QBINS = 1000
if expected[0] and expected[4]:
expectedRange = expected[4] - expected[0]
# because of floor and ceil effects due we potentially lose 2 bins (worst case)
# the extra bin for the max value, is an extra bin..ignore
expectedBin = expectedRange/(MAX_QBINS-2)
maxErr = expectedBin # should we have some fuzz for fp?
else:
print "Test won't calculate max expected error"
maxErr = 0
pt = h2o_util.twoDecimals(percentiles)
# only look at [0] for now...bit e308 numbers if unpopulated due to not enough unique values in dataset column
mx = h2o_util.twoDecimals(co.maxs[0])
mn = h2o_util.twoDecimals(co.mins[0])
print "co.label:", co.label, "co.percentiles (2 places):", pt
print "co.default_percentiles:", co.default_percentiles
print "co.label:", co.label, "co.maxs: (2 places):", mx
print "co.label:", co.label, "co.mins: (2 places):", mn
# FIX! why would percentiles be None? enums?
if pt is None:
compareActual = mn, [None] * 3, mx
else:
compareActual = mn, pt[2], pt[4], pt[6], mx
h2p.green_print("actual min/25/50/75/max co.label:", co.label, "(2 places):", compareActual)
h2p.green_print("expected min/25/50/75/max co.label:", co.label, "(2 places):", expected)
return desiredResult
# this parses the json object returned for one col from runSummary...returns an OutputObj object
# summaryResult = h2o_cmd.runSummary(key=hex_key, column=0)
# co = h2o_cmd.infoFromSummary(summaryResult)
# print co.label
# legacy
def infoFromSummary(summaryResult, column=None):
return SummaryObj(summaryResult, column=column)
class ParseObj(OutputObj):
# the most basic thing is that the data frame has the # of rows and cols we expected
# embed that checking here, so every test doesn't have to
def __init__(self, parseResult, expectedNumRows=None, expectedNumCols=None, noPrint=False, **kwargs):
super(ParseObj, self).__init__(parseResult['frames'][0], "Parse", noPrint=noPrint)
# add my stuff
self.numRows, self.numCols, self.parse_key = infoFromParse(parseResult)
# h2o_import.py does this for test support
if 'python_elapsed' in parseResult:
self.python_elapsed = parseResult['python_elapsed']
if expectedNumRows is not None:
assert self.numRows == expectedNumRows, "%s %s" % (self.numRows, expectedNumRows)
if expectedNumCols is not None:
assert self.numCols == expectedNumCols, "%s %s" % (self.numCols, expectedNumCols)
print "ParseObj created for:", self.parse_key # vars(self)
# Let's experiment with creating new objects that are an api I control for generic operations (Inspect)
class InspectObj(OutputObj):
# the most basic thing is that the data frame has the # of rows and cols we expected
# embed that checking here, so every test doesn't have to
def __init__(self, key,
expectedNumRows=None, expectedNumCols=None, expectedMissingList=None, expectedLabelList=None,
noPrint=False, **kwargs):
inspectResult = runInspect(key=key)
super(InspectObj, self).__init__(inspectResult['frames'][0], "Inspect", noPrint=noPrint)
# add my stuff
self.missingList, self.labelList, self.numRows, self.numCols = infoFromInspect(inspectResult)
if expectedNumRows is not None:
assert self.numRows == expectedNumRows, "%s %s" % (self.numRows, expectedNumRows)
if expectedNumCols is not None:
assert self.numCols == expectedNumCols, "%s %s" % (self.numCols, expectedNumCols)
if expectedMissingList is not None:
assert self.missingList == expectedMissingList, "%s %s" % (self.MissingList, expectedMissingList)
if expectedLabelList is not None:
assert self.labelList == expectedLabelList, "%s %s" % (self.labelList, expectedLabelList)
print "InspectObj created for:", key #, vars(self)
class SummaryObj(OutputObj):
@classmethod
def check(self,
expectedNumRows=None, expectedNumCols=None,
expectedLabel=None, expectedType=None, expectedMissing=None, expectedDomain=None, expectedBinsSum=None,
noPrint=False, **kwargs):
if expectedLabel is not None:
assert self.label != expectedLabel
if expectedType is not None:
assert self.type != expectedType
if expectedMissing is not None:
assert self.missing != expectedMissing
if expectedDomain is not None:
assert self.domain != expectedDomain
if expectedBinsSum is not None:
assert self.binsSum != expectedBinsSum
# column is column name?
def __init__(self, key, colIndex, colName,
expectedNumRows=None, expectedNumCols=None,
expectedLabel=None, expectedType=None, expectedMissing=None, expectedDomain=None, expectedBinsSum=None,
noPrint=False, timeoutSecs=30, **kwargs):
# we need both colInndex and colName for doing Summary efficiently
# ugly.
assert colIndex is not None
assert colName is not None
summaryResult = h2o_nodes.nodes[0].summary(key=key, column=colName, timeoutSecs=timeoutSecs, **kwargs)
# this should be the same for all the cols? Or does the checksum change?
frame = summaryResult['frames'][0]
default_percentiles = frame['default_percentiles']
checksum = frame['checksum']
rows = frame['rows']
# assert colIndex < len(frame['columns']), "You're asking for colIndex %s but there are only %s. " % \
# (colIndex, len(frame['columns']))
# coJson = frame['columns'][colIndex]
# is it always 0 now? the one I asked for ?
coJson = frame['columns'][0]
assert checksum !=0 and checksum is not None
assert rows!=0 and rows is not None
# FIX! why is frame['key'] = None here?
# assert frame['key'] == key, "%s %s" % (frame['key'], key)
super(SummaryObj, self).__init__(coJson, "Summary for %s" % colName, noPrint=noPrint)
# how are enums binned. Stride of 1? (what about domain values)
# touch all
# print "vars", vars(self)
coList = [
len(self.data),
self.domain,
self.string_data,
self.type,
self.label,
self.percentiles,
self.precision,
self.mins,
self.maxs,
self.mean,
self.histogram_base,
len(self.histogram_bins),
self.histogram_stride,
self.zero_count,
self.missing_count,
self.positive_infinity_count,
self.negative_infinity_count,
]
assert self.label==colName, "%s You must have told me the wrong colName %s for the given colIndex %s" % \
(self.label, colName, colIndex)
print "you can look at this attributes in the returned object (which is OutputObj if you assigned to 'co')"
for k,v in self:
print "%s" % k,
# hack these into the column object from the full summary
self.default_percentiles = default_percentiles
self.checksum = checksum
self.rows = rows
print "\nSummaryObj for", key, "for colName", colName, "colIndex:", colIndex
print "SummaryObj created for:", key # vars(self)
# now do the assertion checks
self.check(expectedNumRows, expectedNumCols,
expectedLabel, expectedType, expectedMissing, expectedDomain, expectedBinsSum,
noPrint=noPrint, **kwargs)
|
apache-2.0
|
chanceraine/nupic
|
external/linux32/lib/python2.6/site-packages/matplotlib/delaunay/interpolate.py
|
73
|
7068
|
import numpy as np
from matplotlib._delaunay import compute_planes, linear_interpolate_grid, nn_interpolate_grid
from matplotlib._delaunay import nn_interpolate_unstructured
__all__ = ['LinearInterpolator', 'NNInterpolator']
def slice2gridspec(key):
"""Convert a 2-tuple of slices to start,stop,steps for x and y.
key -- (slice(ystart,ystop,ystep), slice(xtart, xstop, xstep))
For now, the only accepted step values are imaginary integers (interpreted
in the same way numpy.mgrid, etc. do).
"""
if ((len(key) != 2) or
(not isinstance(key[0], slice)) or
(not isinstance(key[1], slice))):
raise ValueError("only 2-D slices, please")
x0 = key[1].start
x1 = key[1].stop
xstep = key[1].step
if not isinstance(xstep, complex) or int(xstep.real) != xstep.real:
raise ValueError("only the [start:stop:numsteps*1j] form supported")
xstep = int(xstep.imag)
y0 = key[0].start
y1 = key[0].stop
ystep = key[0].step
if not isinstance(ystep, complex) or int(ystep.real) != ystep.real:
raise ValueError("only the [start:stop:numsteps*1j] form supported")
ystep = int(ystep.imag)
return x0, x1, xstep, y0, y1, ystep
class LinearInterpolator(object):
"""Interpolate a function defined on the nodes of a triangulation by
using the planes defined by the three function values at each corner of
the triangles.
LinearInterpolator(triangulation, z, default_value=numpy.nan)
triangulation -- Triangulation instance
z -- the function values at each node of the triangulation
default_value -- a float giving the default value should the interpolating
point happen to fall outside of the convex hull of the triangulation
At the moment, the only regular rectangular grids are supported for
interpolation.
vals = interp[ystart:ystop:ysteps*1j, xstart:xstop:xsteps*1j]
vals would then be a (ysteps, xsteps) array containing the interpolated
values. These arguments are interpreted the same way as numpy.mgrid.
Attributes:
planes -- (ntriangles, 3) array of floats specifying the plane for each
triangle.
Linear Interpolation
--------------------
Given the Delauany triangulation (or indeed *any* complete triangulation) we
can interpolate values inside the convex hull by locating the enclosing
triangle of the interpolation point and returning the value at that point of
the plane defined by the three node values.
f = planes[tri,0]*x + planes[tri,1]*y + planes[tri,2]
The interpolated function is C0 continuous across the convex hull of the
input points. It is C1 continuous across the convex hull except for the
nodes and the edges of the triangulation.
"""
def __init__(self, triangulation, z, default_value=np.nan):
self.triangulation = triangulation
self.z = np.asarray(z, dtype=np.float64)
self.default_value = default_value
self.planes = compute_planes(triangulation.x, triangulation.y, self.z,
triangulation.triangle_nodes)
def __getitem__(self, key):
x0, x1, xstep, y0, y1, ystep = slice2gridspec(key)
grid = linear_interpolate_grid(x0, x1, xstep, y0, y1, ystep, self.default_value,
self.planes, self.triangulation.x, self.triangulation.y,
self.triangulation.triangle_nodes, self.triangulation.triangle_neighbors)
return grid
class NNInterpolator(object):
"""Interpolate a function defined on the nodes of a triangulation by
the natural neighbors method.
NNInterpolator(triangulation, z, default_value=numpy.nan)
triangulation -- Triangulation instance
z -- the function values at each node of the triangulation
default_value -- a float giving the default value should the interpolating
point happen to fall outside of the convex hull of the triangulation
At the moment, the only regular rectangular grids are supported for
interpolation.
vals = interp[ystart:ystop:ysteps*1j, xstart:xstop:xsteps*1j]
vals would then be a (ysteps, xsteps) array containing the interpolated
values. These arguments are interpreted the same way as numpy.mgrid.
Natural Neighbors Interpolation
-------------------------------
One feature of the Delaunay triangulation is that for each triangle, its
circumcircle contains no other point (although in degenerate cases, like
squares, other points may be *on* the circumcircle). One can also construct
what is called the Voronoi diagram from a Delaunay triangulation by
connecting the circumcenters of the triangles to those of their neighbors to
form a tesselation of irregular polygons covering the plane and containing
only one node from the triangulation. Each point in one node's Voronoi
polygon is closer to that node than any other node.
To compute the Natural Neighbors interpolant, we consider adding the
interpolation point to the triangulation. We define the natural neighbors of
this point as the set of nodes participating in Delaunay triangles whose
circumcircles contain the point. To restore the Delaunay-ness of the
triangulation, one would only have to alter those triangles and Voronoi
polygons. The new Voronooi diagram would have a polygon around the inserted
point. This polygon would "steal" area from the original Voronoi polygons.
For each node i in the natural neighbors set, we compute the area stolen
from its original Voronoi polygon, stolen[i]. We define the natural
neighbors coordinates
phi[i] = stolen[i] / sum(stolen,axis=0)
We then use these phi[i] to weight the corresponding function values from
the input data z to compute the interpolated value.
The interpolated surface is C1-continuous except at the nodes themselves
across the convex hull of the input points. One can find the set of points
that a given node will affect by computing the union of the areas covered by
the circumcircles of each Delaunay triangle that node participates in.
"""
def __init__(self, triangulation, z, default_value=np.nan):
self.triangulation = triangulation
self.z = np.asarray(z, dtype=np.float64)
self.default_value = default_value
def __getitem__(self, key):
x0, x1, xstep, y0, y1, ystep = slice2gridspec(key)
grid = nn_interpolate_grid(x0, x1, xstep, y0, y1, ystep, self.default_value,
self.triangulation.x, self.triangulation.y, self.z,
self.triangulation.circumcenters,
self.triangulation.triangle_nodes,
self.triangulation.triangle_neighbors)
return grid
def __call__(self, intx, inty):
intz = nn_interpolate_unstructured(intx, inty, self.default_value,
self.triangulation.x, self.triangulation.y, self.z,
self.triangulation.circumcenters,
self.triangulation.triangle_nodes,
self.triangulation.triangle_neighbors)
return intz
|
agpl-3.0
|
eriol/pywt
|
util/refguide_check.py
|
2
|
27051
|
#!/usr/bin/env python
"""
refguide_check.py [OPTIONS] [-- ARGS]
Check for a PyWavelets submodule whether the objects in its __all__ dict
correspond to the objects included in the reference guide.
Example of usage::
$ python refguide_check.py optimize
Note that this is a helper script to be able to check if things are missing;
the output of this script does need to be checked manually. In some cases
objects are left out of the refguide for a good reason (it's an alias of
another function, or deprecated, or ...)
Another use of this helper script is to check validity of code samples
in docstrings. This is different from doctesting [we do not aim to have
scipy docstrings doctestable!], this is just to make sure that code in
docstrings is valid python::
$ python refguide_check.py --check_docs optimize
"""
from __future__ import print_function
import sys
import os
import re
import copy
import inspect
import warnings
import doctest
import tempfile
import io
import docutils.core
from docutils.parsers.rst import directives
import shutil
import glob
from doctest import NORMALIZE_WHITESPACE, ELLIPSIS, IGNORE_EXCEPTION_DETAIL
from argparse import ArgumentParser
import numpy as np
# sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'doc',
# 'sphinxext'))
from numpydoc.docscrape_sphinx import get_doc_object
# Remove sphinx directives that don't run without Sphinx environment
directives._directives.pop('versionadded', None)
directives._directives.pop('versionchanged', None)
directives._directives.pop('moduleauthor', None)
directives._directives.pop('sectionauthor', None)
directives._directives.pop('codeauthor', None)
directives._directives.pop('toctree', None)
BASE_MODULE = "pywt"
PUBLIC_SUBMODULES = []
# Docs for these modules are included in the parent module
OTHER_MODULE_DOCS = {}
# these names are known to fail doctesting and we like to keep it that way
# e.g. sometimes pseudocode is acceptable etc
DOCTEST_SKIPLIST = set([])
# these names are not required to be present in ALL despite being in
# autosummary:: listing
REFGUIDE_ALL_SKIPLIST = []
HAVE_MATPLOTLIB = False
def short_path(path, cwd=None):
"""
Return relative or absolute path name, whichever is shortest.
"""
if not isinstance(path, str):
return path
if cwd is None:
cwd = os.getcwd()
abspath = os.path.abspath(path)
relpath = os.path.relpath(path, cwd)
if len(abspath) <= len(relpath):
return abspath
return relpath
def find_names(module, names_dict):
# Refguide entries:
#
# - 3 spaces followed by function name, and maybe some spaces, some
# dashes, and an explanation; only function names listed in
# refguide are formatted like this (mostly, there may be some false
# positives)
#
# - special directives, such as data and function
#
# - (scipy.constants only): quoted list
#
patterns = [
r"^\s\s\s([a-z_0-9A-Z]+)(\s+-+.*)?$",
r"^\.\. (?:data|function)::\s*([a-z_0-9A-Z]+)\s*$"
]
if module.__name__ == 'scipy.constants':
patterns += ["^``([a-z_0-9A-Z]+)``"]
patterns = [re.compile(pattern) for pattern in patterns]
module_name = module.__name__
for line in module.__doc__.splitlines():
res = re.search(r"^\s*\.\. (?:currentmodule|module):: ([a-z0-9A-Z_.]+)\s*$", line)
if res:
module_name = res.group(1)
continue
for pattern in patterns:
res = re.match(pattern, line)
if res is not None:
name = res.group(1)
entry = '.'.join([module_name, name])
names_dict.setdefault(module_name, set()).add(name)
break
def get_all_dict(module):
"""Return a copy of the __all__ dict with irrelevant items removed."""
if hasattr(module, "__all__"):
all_dict = copy.deepcopy(module.__all__)
else:
all_dict = copy.deepcopy(dir(module))
all_dict = [name for name in all_dict
if not name.startswith("_")]
for name in ['absolute_import', 'division', 'print_function']:
try:
all_dict.remove(name)
except ValueError:
pass
# Modules are almost always private; real submodules need a separate
# run of refguide_check.
all_dict = [name for name in all_dict
if not inspect.ismodule(getattr(module, name, None))]
deprecated = []
not_deprecated = []
for name in all_dict:
f = getattr(module, name, None)
if callable(f) and is_deprecated(f):
deprecated.append(name)
else:
not_deprecated.append(name)
others = set(dir(module)).difference(set(deprecated)).difference(set(not_deprecated))
return not_deprecated, deprecated, others
def compare(all_dict, others, names, module_name):
"""Return sets of objects only in __all__, refguide, or completely missing."""
only_all = set()
for name in all_dict:
if name not in names:
only_all.add(name)
only_ref = set()
missing = set()
for name in names:
if name not in all_dict:
for pat in REFGUIDE_ALL_SKIPLIST:
if re.match(pat, module_name + '.' + name):
if name not in others:
missing.add(name)
break
else:
only_ref.add(name)
return only_all, only_ref, missing
def is_deprecated(f):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("error")
try:
f(**{"not a kwarg": None})
except DeprecationWarning:
return True
except:
pass
return False
def check_items(all_dict, names, deprecated, others, module_name, dots=True):
num_all = len(all_dict)
num_ref = len(names)
output = ""
output += "Non-deprecated objects in __all__: %i\n" % num_all
output += "Objects in refguide: %i\n\n" % num_ref
only_all, only_ref, missing = compare(all_dict, others, names, module_name)
dep_in_ref = set(only_ref).intersection(deprecated)
only_ref = set(only_ref).difference(deprecated)
if len(dep_in_ref) > 0:
output += "Deprecated objects in refguide::\n\n"
for name in sorted(deprecated):
output += " " + name + "\n"
if len(only_all) == len(only_ref) == len(missing) == 0:
if dots:
output_dot('.')
return [(None, True, output)]
else:
if len(only_all) > 0:
output += "ERROR: objects in %s.__all__ but not in refguide::\n\n" % module_name
for name in sorted(only_all):
output += " " + name + "\n"
if len(only_ref) > 0:
output += "ERROR: objects in refguide but not in %s.__all__::\n\n" % module_name
for name in sorted(only_ref):
output += " " + name + "\n"
if len(missing) > 0:
output += "ERROR: missing objects::\n\n"
for name in sorted(missing):
output += " " + name + "\n"
if dots:
output_dot('F')
return [(None, False, output)]
def validate_rst_syntax(text, name, dots=True):
if text is None:
if dots:
output_dot('E')
return False, "ERROR: %s: no documentation" % (name,)
ok_unknown_items = set([
'mod', 'currentmodule', 'autosummary', 'data',
'obj', 'versionadded', 'versionchanged', 'module', 'class',
'ref', 'func', 'toctree', 'moduleauthor',
'sectionauthor', 'codeauthor', 'eq',
])
# Run through docutils
error_stream = io.StringIO()
def resolve(name, is_label=False):
return ("http://foo", name)
token = '<RST-VALIDATE-SYNTAX-CHECK>'
docutils.core.publish_doctree(
text, token,
settings_overrides = dict(halt_level=5,
traceback=True,
default_reference_context='title-reference',
default_role='emphasis',
link_base='',
resolve_name=resolve,
stylesheet_path='',
raw_enabled=0,
file_insertion_enabled=0,
warning_stream=error_stream))
# Print errors, disregarding unimportant ones
error_msg = error_stream.getvalue()
errors = error_msg.split(token)
success = True
output = ""
for error in errors:
lines = error.splitlines()
if not lines:
continue
m = re.match(r'.*Unknown (?:interpreted text role|directive type) "(.*)".*$', lines[0])
if m:
if m.group(1) in ok_unknown_items:
continue
m = re.match(r'.*Error in "math" directive:.*unknown option: "label"', " ".join(lines), re.S)
if m:
continue
output += name + lines[0] + "::\n " + "\n ".join(lines[1:]).rstrip() + "\n"
success = False
if not success:
output += " " + "-"*72 + "\n"
for lineno, line in enumerate(text.splitlines()):
output += " %-4d %s\n" % (lineno+1, line)
output += " " + "-"*72 + "\n\n"
if dots:
output_dot('.' if success else 'F')
return success, output
def output_dot(msg='.', stream=sys.stderr):
stream.write(msg)
stream.flush()
def check_rest(module, names, dots=True):
"""
Check reStructuredText formatting of docstrings
Returns: [(name, success_flag, output), ...]
"""
try:
skip_types = (dict, str, unicode, float, int)
except NameError:
# python 3
skip_types = (dict, str, float, int)
results = []
if module.__name__[6:] not in OTHER_MODULE_DOCS:
results += [(module.__name__,) +
validate_rst_syntax(inspect.getdoc(module),
module.__name__, dots=dots)]
for name in names:
full_name = module.__name__ + '.' + name
obj = getattr(module, name, None)
if obj is None:
results.append((full_name, False, "%s has no docstring" % (full_name,)))
continue
elif isinstance(obj, skip_types):
continue
if inspect.ismodule(obj):
text = inspect.getdoc(obj)
else:
try:
text = str(get_doc_object(obj))
except:
import traceback
results.append((full_name, False,
"Error in docstring format!\n" +
traceback.format_exc()))
continue
m = re.search("([\x00-\x09\x0b-\x1f])", text)
if m:
msg = ("Docstring contains a non-printable character %r! "
"Maybe forgot r\"\"\"?" % (m.group(1),))
results.append((full_name, False, msg))
continue
try:
src_file = short_path(inspect.getsourcefile(obj))
except TypeError:
src_file = None
if src_file:
file_full_name = src_file + ':' + full_name
else:
file_full_name = full_name
results.append((full_name,) +
validate_rst_syntax(text, file_full_name, dots=dots))
return results
### Doctest helpers ####
# the namespace to run examples in
DEFAULT_NAMESPACE = {'np': np}
# the namespace to do checks in
CHECK_NAMESPACE = {
'np': np,
'assert_allclose': np.testing.assert_allclose,
'assert_equal': np.testing.assert_equal,
# recognize numpy repr's
'array': np.array,
'matrix': np.matrix,
'int64': np.int64,
'uint64': np.uint64,
'int8': np.int8,
'int32': np.int32,
'float64': np.float64,
'dtype': np.dtype,
'nan': np.nan,
'NaN': np.nan,
'inf': np.inf,
'Inf': np.inf, }
class DTRunner(doctest.DocTestRunner):
DIVIDER = "\n"
def __init__(self, item_name, checker=None, verbose=None, optionflags=0):
self._item_name = item_name
doctest.DocTestRunner.__init__(self, checker=checker, verbose=verbose,
optionflags=optionflags)
def _report_item_name(self, out, new_line=False):
if self._item_name is not None:
if new_line:
out("\n")
self._item_name = None
def report_start(self, out, test, example):
self._checker._source = example.source
return doctest.DocTestRunner.report_start(self, out, test, example)
def report_success(self, out, test, example, got):
if self._verbose:
self._report_item_name(out, new_line=True)
return doctest.DocTestRunner.report_success(
self, out, test, example, got)
def report_unexpected_exception(self, out, test, example, exc_info):
self._report_item_name(out)
return doctest.DocTestRunner.report_unexpected_exception(
self, out, test, example, exc_info)
def report_failure(self, out, test, example, got):
self._report_item_name(out)
return doctest.DocTestRunner.report_failure(self, out, test,
example, got)
class Checker(doctest.OutputChecker):
obj_pattern = re.compile('at 0x[0-9a-fA-F]+>')
vanilla = doctest.OutputChecker()
rndm_markers = {'# random', '# Random', '#random', '#Random', "# may vary"}
stopwords = {'plt.', '.hist', '.show', '.ylim', '.subplot(',
'set_title', 'imshow', 'plt.show', 'ax.axis', 'plt.plot(',
'.bar(', '.title', '.ylabel', '.xlabel', 'set_ylim',
'set_xlim', '# reformatted'}
def __init__(self, parse_namedtuples=True, ns=None, atol=1e-8, rtol=1e-2):
self.parse_namedtuples = parse_namedtuples
self.atol, self.rtol = atol, rtol
if ns is None:
self.ns = dict(CHECK_NAMESPACE)
else:
self.ns = ns
def check_output(self, want, got, optionflags):
# cut it short if they are equal
if want == got:
return True
# skip stopwords in source
if any(word in self._source for word in self.stopwords):
return True
# skip random stuff
if any(word in want for word in self.rndm_markers):
return True
# skip function/object addresses
if self.obj_pattern.search(got):
return True
# ignore comments (e.g. signal.freqresp)
if want.lstrip().startswith("#"):
return True
# try the standard doctest
try:
if self.vanilla.check_output(want, got, optionflags):
return True
except Exception:
pass
# OK then, convert strings to objects
try:
a_want = eval(want, dict(self.ns))
a_got = eval(got, dict(self.ns))
except:
if not self.parse_namedtuples:
return False
# suppose that "want" is a tuple, and "got" is smth like
# MoodResult(statistic=10, pvalue=0.1).
# Then convert the latter to the tuple (10, 0.1),
# and then compare the tuples.
try:
num = len(a_want)
regex = ('[\w\d_]+\(' +
', '.join(['[\w\d_]+=(.+)']*num) +
'\)')
grp = re.findall(regex, got.replace('\n', ' '))
if len(grp) > 1: # no more than one for now
return False
# fold it back to a tuple
got_again = '(' + ', '.join(grp[0]) + ')'
return self.check_output(want, got_again, optionflags)
except Exception:
return False
# ... and defer to numpy
try:
return self._do_check(a_want, a_got)
except Exception:
# heterog tuple, eg (1, np.array([1., 2.]))
try:
return all(self._do_check(w, g) for w, g in zip(a_want, a_got))
except (TypeError, ValueError):
return False
def _do_check(self, want, got):
# This should be done exactly as written to correctly handle all of
# numpy-comparable objects, strings, and heterogenous tuples
try:
if want == got:
return True
except Exception:
pass
return np.allclose(want, got, atol=self.atol, rtol=self.rtol)
def _run_doctests(tests, full_name, verbose, doctest_warnings):
"""Run modified doctests for the set of `tests`.
Returns: list of [(success_flag, output), ...]
"""
flags = NORMALIZE_WHITESPACE | ELLIPSIS | IGNORE_EXCEPTION_DETAIL
runner = DTRunner(full_name, checker=Checker(), optionflags=flags,
verbose=verbose)
output = []
success = True
def out(msg):
output.append(msg)
class MyStderr(object):
"""Redirect stderr to the current stdout"""
def write(self, msg):
if doctest_warnings:
sys.stdout.write(msg)
else:
out(msg)
# Run tests, trying to restore global state afterward
old_printoptions = np.get_printoptions()
old_errstate = np.seterr()
old_stderr = sys.stderr
cwd = os.getcwd()
tmpdir = tempfile.mkdtemp()
sys.stderr = MyStderr()
try:
os.chdir(tmpdir)
# try to ensure random seed is NOT reproducible
np.random.seed(None)
for t in tests:
t.filename = short_path(t.filename, cwd)
fails, successes = runner.run(t, out=out)
if fails > 0:
success = False
finally:
sys.stderr = old_stderr
os.chdir(cwd)
shutil.rmtree(tmpdir)
np.set_printoptions(**old_printoptions)
np.seterr(**old_errstate)
return success, output
def check_doctests(module, verbose, ns=None,
dots=True, doctest_warnings=False):
"""Check code in docstrings of the module's public symbols.
Returns: list of [(item_name, success_flag, output), ...]
"""
if ns is None:
ns = dict(DEFAULT_NAMESPACE)
# Loop over non-deprecated items
results = []
for name in get_all_dict(module)[0]:
full_name = module.__name__ + '.' + name
if full_name in DOCTEST_SKIPLIST:
continue
try:
obj = getattr(module, name)
except AttributeError:
import traceback
results.append((full_name, False,
"Missing item!\n" +
traceback.format_exc()))
continue
finder = doctest.DocTestFinder()
try:
tests = finder.find(obj, name, globs=dict(ns))
except:
import traceback
results.append((full_name, False,
"Failed to get doctests!\n" +
traceback.format_exc()))
continue
success, output = _run_doctests(tests, full_name, verbose,
doctest_warnings)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, "".join(output)))
if HAVE_MATPLOTLIB:
import matplotlib.pyplot as plt
plt.close('all')
return results
def check_doctests_testfile(fname, verbose, ns=None,
dots=True, doctest_warnings=False):
"""Check code in a text file.
Mimic `check_doctests` above, differing mostly in test discovery.
(which is borrowed from stdlib's doctest.testfile here,
https://github.com/python-git/python/blob/master/Lib/doctest.py)
Returns: list of [(item_name, success_flag, output), ...]
Notes
-----
We also try to weed out pseudocode:
* We maintain a list of exceptions which signal pseudocode,
* We split the text file into "blocks" of code separated by empty lines
and/or intervening text.
* If a block contains a marker, the whole block is then assumed to be
pseudocode. It is then not being doctested.
The rationale is that typically, the text looks like this:
blah
<BLANKLINE>
>>> from numpy import some_module # pseudocode!
>>> func = some_module.some_function
>>> func(42) # still pseudocode
146
<BLANKLINE>
blah
<BLANKLINE>
>>> 2 + 3 # real code, doctest it
5
"""
results = []
if ns is None:
ns = dict(DEFAULT_NAMESPACE)
_, short_name = os.path.split(fname)
if short_name in DOCTEST_SKIPLIST:
return results
full_name = fname
text = open(fname).read()
PSEUDOCODE = set(['some_function', 'some_module', 'import example',
'ctypes.CDLL', # likely need compiling, skip it
'integrate.nquad(func,' # ctypes integrate tutotial
])
# split the text into "blocks" and try to detect and omit pseudocode blocks.
parser = doctest.DocTestParser()
good_parts = []
for part in text.split('\n\n'):
tests = parser.get_doctest(part, ns, fname, fname, 0)
if any(word in ex.source for word in PSEUDOCODE
for ex in tests.examples):
# omit it
pass
else:
# `part` looks like a good code, let's doctest it
good_parts += [part]
# Reassemble the good bits and doctest them:
good_text = '\n\n'.join(good_parts)
tests = parser.get_doctest(good_text, ns, fname, fname, 0)
success, output = _run_doctests([tests], full_name, verbose,
doctest_warnings)
if dots:
output_dot('.' if success else 'F')
results.append((full_name, success, "".join(output)))
if HAVE_MATPLOTLIB:
import matplotlib.pyplot as plt
plt.close('all')
return results
def init_matplotlib():
global HAVE_MATPLOTLIB
try:
import matplotlib
matplotlib.use('Agg')
HAVE_MATPLOTLIB = True
except ImportError:
HAVE_MATPLOTLIB = False
def main(argv):
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("module_names", metavar="SUBMODULES", default=[],
nargs='*',
help="Submodules to check (default: all public)")
parser.add_argument("--doctests", action="store_true",
help="Run also doctests")
parser.add_argument("-v", "--verbose", action="count", default=0)
parser.add_argument("--doctest-warnings", action="store_true",
help="Enforce warning checking for doctests")
parser.add_argument("--skip-examples", action="store_true",
help="Skip running doctests in the examples.")
args = parser.parse_args(argv)
modules = []
names_dict = {}
if args.module_names:
args.skip_examples = True
else:
args.module_names = list(PUBLIC_SUBMODULES)
os.environ['SCIPY_PIL_IMAGE_VIEWER'] = 'true'
module_names = list(args.module_names)
for name in list(module_names):
if name in OTHER_MODULE_DOCS:
name = OTHER_MODULE_DOCS[name]
if name not in module_names:
module_names.append(name)
for submodule_name in module_names:
module_name = BASE_MODULE + '.' + submodule_name
__import__(module_name)
module = sys.modules[module_name]
if submodule_name not in OTHER_MODULE_DOCS:
find_names(module, names_dict)
if submodule_name in args.module_names:
modules.append(module)
dots = True
success = True
results = []
print("Running checks for %d modules:" % (len(modules),))
if args.doctests or not args.skip_examples:
init_matplotlib()
for module in modules:
if dots:
if module is not modules[0]:
sys.stderr.write(' ')
sys.stderr.write(module.__name__ + ' ')
sys.stderr.flush()
all_dict, deprecated, others = get_all_dict(module)
names = names_dict.get(module.__name__, set())
mod_results = []
mod_results += check_items(all_dict, names, deprecated, others, module.__name__)
mod_results += check_rest(module, set(names).difference(deprecated),
dots=dots)
if args.doctests:
mod_results += check_doctests(module, (args.verbose >= 2), dots=dots,
doctest_warnings=args.doctest_warnings)
for v in mod_results:
assert isinstance(v, tuple), v
results.append((module, mod_results))
if dots:
sys.stderr.write("\n")
sys.stderr.flush()
if not args.skip_examples:
examples_path = os.path.join(
os.getcwd(), 'doc', 'source', 'regression', '*.rst')
print('\nChecking examples files at %s:' % examples_path)
for filename in sorted(glob.glob(examples_path)):
if dots:
sys.stderr.write('\n')
sys.stderr.write(os.path.split(filename)[1] + ' ')
sys.stderr.flush()
examples_results = check_doctests_testfile(
filename, (args.verbose >= 2), dots=dots,
doctest_warnings=args.doctest_warnings)
def scratch(): pass # stub out a "module", see below
scratch.__name__ = filename
results.append((scratch, examples_results))
if dots:
sys.stderr.write("\n")
sys.stderr.flush()
# Report results
all_success = True
for module, mod_results in results:
success = all(x[1] for x in mod_results)
all_success = all_success and success
if success and args.verbose == 0:
continue
print("")
print("=" * len(module.__name__))
print(module.__name__)
print("=" * len(module.__name__))
print("")
for name, success, output in mod_results:
if name is None:
if not success or args.verbose >= 1:
print(output.strip())
print("")
elif not success or (args.verbose >= 2 and output.strip()):
print(name)
print("-"*len(name))
print("")
print(output.strip())
print("")
if all_success:
print("\nOK: refguide and doctests checks passed!")
sys.exit(0)
else:
print("\nERROR: refguide or doctests have errors")
sys.exit(1)
if __name__ == '__main__':
main(argv=sys.argv[1:])
|
mit
|
ales-erjavec/scipy
|
scipy/interpolate/tests/test_rbf.py
|
41
|
4367
|
#!/usr/bin/env python
# Created by John Travers, Robert Hetland, 2007
""" Test functions for rbf module """
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (assert_, assert_array_almost_equal,
assert_almost_equal, run_module_suite)
from numpy import linspace, sin, random, exp, allclose
from scipy.interpolate.rbf import Rbf
FUNCTIONS = ('multiquadric', 'inverse multiquadric', 'gaussian',
'cubic', 'quintic', 'thin-plate', 'linear')
def check_rbf1d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (1D)
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y, function=function)
yi = rbf(x)
assert_array_almost_equal(y, yi)
assert_almost_equal(rbf(float(x[0])), y[0])
def check_rbf2d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (2D).
x = random.rand(50,1)*4-2
y = random.rand(50,1)*4-2
z = x*exp(-x**2-1j*y**2)
rbf = Rbf(x, y, z, epsilon=2, function=function)
zi = rbf(x, y)
zi.shape = x.shape
assert_array_almost_equal(z, zi)
def check_rbf3d_interpolation(function):
# Check that the Rbf function interpolates through the nodes (3D).
x = random.rand(50, 1)*4 - 2
y = random.rand(50, 1)*4 - 2
z = random.rand(50, 1)*4 - 2
d = x*exp(-x**2 - y**2)
rbf = Rbf(x, y, z, d, epsilon=2, function=function)
di = rbf(x, y, z)
di.shape = x.shape
assert_array_almost_equal(di, d)
def test_rbf_interpolation():
for function in FUNCTIONS:
yield check_rbf1d_interpolation, function
yield check_rbf2d_interpolation, function
yield check_rbf3d_interpolation, function
def check_rbf1d_regularity(function, atol):
# Check that the Rbf function approximates a smooth function well away
# from the nodes.
x = linspace(0, 10, 9)
y = sin(x)
rbf = Rbf(x, y, function=function)
xi = linspace(0, 10, 100)
yi = rbf(xi)
# import matplotlib.pyplot as plt
# plt.figure()
# plt.plot(x, y, 'o', xi, sin(xi), ':', xi, yi, '-')
# plt.plot(x, y, 'o', xi, yi-sin(xi), ':')
# plt.title(function)
# plt.show()
msg = "abs-diff: %f" % abs(yi - sin(xi)).max()
assert_(allclose(yi, sin(xi), atol=atol), msg)
def test_rbf_regularity():
tolerances = {
'multiquadric': 0.1,
'inverse multiquadric': 0.15,
'gaussian': 0.15,
'cubic': 0.15,
'quintic': 0.1,
'thin-plate': 0.1,
'linear': 0.2
}
for function in FUNCTIONS:
yield check_rbf1d_regularity, function, tolerances.get(function, 1e-2)
def check_rbf1d_stability(function):
# Check that the Rbf function with default epsilon is not subject
# to overshoot. Regression for issue #4523.
#
# Generate some data (fixed random seed hence deterministic)
np.random.seed(1234)
x = np.linspace(0, 10, 50)
z = x + 4.0 * np.random.randn(len(x))
rbf = Rbf(x, z, function=function)
xi = np.linspace(0, 10, 1000)
yi = rbf(xi)
# subtract the linear trend and make sure there no spikes
assert_(np.abs(yi-xi).max() / np.abs(z-x).max() < 1.1)
def test_rbf_stability():
for function in FUNCTIONS:
yield check_rbf1d_stability, function
def test_default_construction():
# Check that the Rbf class can be constructed with the default
# multiquadric basis function. Regression test for ticket #1228.
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_function_is_callable():
# Check that the Rbf class can be constructed with function=callable.
x = linspace(0,10,9)
y = sin(x)
linfunc = lambda x:x
rbf = Rbf(x, y, function=linfunc)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_two_arg_function_is_callable():
# Check that the Rbf class can be constructed with a two argument
# function=callable.
def _func(self, r):
return self.epsilon + r
x = linspace(0,10,9)
y = sin(x)
rbf = Rbf(x, y, function=_func)
yi = rbf(x)
assert_array_almost_equal(y, yi)
def test_rbf_epsilon_none():
x = linspace(0, 10, 9)
y = sin(x)
rbf = Rbf(x, y, epsilon=None)
if __name__ == "__main__":
run_module_suite()
|
bsd-3-clause
|
Aasmi/scikit-learn
|
sklearn/decomposition/tests/test_fastica.py
|
272
|
7798
|
"""
Test the fastica algorithm.
"""
import itertools
import warnings
import numpy as np
from scipy import stats
from nose.tools import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
# Test gram schmidt orthonormalization
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
# Test FastICA.fit_transform
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, None]]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components_, 10))
assert_equal(Xt.shape, (100, n_components_))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components_, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
# Test FastICA.inverse_transform
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
with warnings.catch_warnings(record=True):
# catch "n_components ignored" warning
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components_)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
|
bsd-3-clause
|
RegulatoryGenomicsUPF/pyicoteo
|
pyicoteolib/parser_old.py
|
1
|
45496
|
"""
Pyicoteo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import os
if sys.version_info < (2, 6):
print "Pyicoteo requires python 2.6 or greater (no Python 3 support yet, sorry)"
sys.exit(1)
from lib import argparse
import ConfigParser
from turbomix import Turbomix, OperationFailed
from defaults import *
VERSION = "1.2b"
__version__ = VERSION
class PicosParser:
def _big_warning(self, message):
print "\n**************************************WARNING*********************************WARNING**************************************************************"
print message
print "**************************************WARNING*********************************WARNING**************************************************************\n"
def _error_exit(self, message):
print ("\nERROR: %s")%message
sys.exit(1)
def config_section_map(self, section, config_file):
dict1 = {}
options = config_file.options(section)
for option in options:
try:
dict1[option] = config_file.get(section, option)
if dict1[option] == -1:
DebugPrint("skip: %s" % option)
except:
print("exception on %s!" % option)
dict1[option] = None
return dict1
def _file_exists(self, path):
if path:
if not os.path.exists(path):
print
print "Pyicoteo couldn't find the following file or directory: %s"%path
print
sys.exit(1)
def validate_operations(self, args, turbomix):
if MODFDR in turbomix.operations and args.output_format != SPK:
self._big_warning('You are using the ModFDR without a stranded format as output. This means that the strand information will be ignored. If you want to include the strand information, please consider using the --output-format bed_spk flag to get the output in bed spk stranded format')
if ENRICHMENT in turbomix.operations and args.experiment_format == BAM and args.sequential == True:
self._big_warning('Please make sure that the region files is ordered in the same way your BAM files are ordered, or else results will be incorrect!')
def _isratio(self, argument, name):
if argument < 0 or argument > 1:
self._error_exit("%s is a ratio, it should be between 0 and 1"%name)
def validate(self, args):
self._isratio(args.binsize, "--binsize")
self._isratio(args.binstep, "--binstep")
if args.poisson_test not in POISSON_OPTIONS:
self._error_exit("%s is not a valid Pyicoteo poisson test. Please use one of the following: %s"%(args.poisson_test, POISSON_OPTIONS))
if args.output_format == WIG and args.open_output == False:
self._big_warning('You chose as output format a WIG closed file. This will not be visible in the UCSC genome browser. Please consider adding the --open-output flag if you are intending to use the UCSC browser with it.')
if not args.region and args.region_format == BED12:
self._error_exit("The autogenerated regions can only be calculated in BED format. Did you forget to specify the --region ?")
self._file_exists(args.experiment)
self._file_exists(args.experiment_b)
self._file_exists(args.region)
self._file_exists(args.control)
self._file_exists(args.replica)
def new_subparser(self, *args):
return argparse.ArgumentParser(add_help=False)
def create_parser(self):
read_formats = str(READ_FORMATS)
write_formats = str(WRITE_FORMATS)
parser = argparse.ArgumentParser(version=__version__, description="Pyicoteo is a collection of tools for mapped reads processing, peak calling and comparison (differential expression/splicing/whatever).")
#parent parsers
experiment = self.new_subparser()
experiment.add_argument('experiment', help='The experiment file or directory')
experiment_flags = self.new_subparser()
experiment_flags.add_argument('-o','--open-experiment', action='store_true', dest='open_experiment', default=OPEN_EXPERIMENT, help='Defines if the experiment is half-open or closed notation. [Default %(default)s]')
experiment_flags.add_argument( '-f','--experiment-format',default=EXPERIMENT_FORMAT, dest='experiment_format', help="""The format the experiment file is written as.
The options are %s. [Default pk]"""%read_formats)
exp_or_count = self.new_subparser()
mutexc = exp_or_count.add_mutually_exclusive_group(required=True)
mutexc.add_argument('-reads', nargs=2, dest='experiments', help='Compare two packages.', metavar=("experiment_a","experiment_b"))
mutexc.add_argument('-counts', dest='counts_file', help='Verify Content of package.')
experiment_b = self.new_subparser()
experiment_b.add_argument('experiment_b', help='The experiment file B')
optional_replica = self.new_subparser()
optional_replica.add_argument('--replica', help='Experiment A replica file')
replica = self.new_subparser()
replica.add_argument('replica', help='Experiment A replica file')
control = self.new_subparser()
control.add_argument('control', help='The control file or directory')
control_format = self.new_subparser()
control_format.add_argument('--control-format', default=CONTROL_FORMAT, help='The format the control file is written as. [default: The same as experiment format]')
optional_control = self.new_subparser()
optional_control.add_argument('--control', help='The control file or directory')
optional_control = self.new_subparser()
optional_control.add_argument('--control', help='The control file or directory')
open_control = self.new_subparser()
open_control.add_argument('--open-control', action='store_true', default=OPEN_CONTROL, help='Define if the region file is half-open or closed notation. [Default closed]')
basic_parser = self.new_subparser()
basic_parser.add_argument('--debug', action='store_true', default=DEBUG)
basic_parser.add_argument('--no-sort',action='store_true', default=NO_SORT, help='Force skip the sorting step. WARNING: Use only if you know what you are doing. Processing unsorted files assuming they are will outcome in erroneous results')
basic_parser.add_argument('--force-sort',action='store_true', default=False, help='Force the sorting step')
basic_parser.add_argument('--silent' ,action='store_false', default=VERBOSE, dest='verbose', help='Run without printing in screen')
basic_parser.add_argument('--disable-cache' ,action='store_false', default=CACHED, dest='cached', help='Disable internal reading cache. When Clustering low coverage files, it will increase speed and improve memory usage. With very read dense files, the speed will decrease.')
basic_parser.add_argument('--keep-temp', action='store_true', default=KEEP_TEMP, help='Keep the temporary files')
basic_parser.add_argument('--postscript', action='store_true', default=POSTSCRIPT, help='get the output graphs in postscript format instead of .png')
basic_parser.add_argument('--showplots', action='store_true', default=SHOWPLOTS, help='Show the plots as they are being calculated by matplotlib. Note that the execution will be stopped until you close the window pop up that will arise')
basic_parser.add_argument('--label1', default=LABEL1, help="Manually define the first label of the graphs.")
basic_parser.add_argument('--label2', default=LABEL2, help="Manually define the second label of the graphs.")
basic_parser.add_argument('--tempdir', default=TEMPDIR, help="Manually define the temporary directory where Pyicoteo will write. By default Pyicoteo will use the temporary directory the system provides (For example, /tmp in unix systems)")
basic_parser.add_argument('--samtools', default=USESAMTOOLS, action='store_true', help="Use samtools for reading BAM files [Default: Pyicoteo uses its own library] (reading BAM works without samtools for convert, extend, and other operations, but not for enrichment yet)]")
basic_parser.add_argument('--skip-header', action='store_true', default=SKIP_HEADER, help="Skip writing the header for the output file. [Default %(default)s]")
#basic_parser.add_argument('--get-report', action='store_true', default=SKIP_HEADER, help=". [Default %(default)s]")
output = self.new_subparser()
output.add_argument('output', help='The output file or directory')
optional_output = self.new_subparser()
optional_output.add_argument('--output', help='The output file or directory')
output_flags = self.new_subparser()
output_flags.add_argument('-O','--open-output', action='store_true', default=OPEN_OUTPUT, help='Define if the output is half-open or closed notation. [Default closed]')
output_flags.add_argument('-F','--output-format',default=OUTPUT_FORMAT, help='Format desired for the output. You can choose between %s. WARNING, for some operations, some outputs are not valid. See operation help for more info. [default pk]'%write_formats)
blacklist_help = 'Reads a bed file with coordinates that you want to exclude from the analysis. Useful for discarding "noisy" probable artifactual regions like centromeres and repeat regions. [Default %(default)s]'
blacklist = self.new_subparser()
blacklist.add_argument('blacklist', default=BLACKLIST, help=blacklist_help)
optional_blacklist = self.new_subparser()
optional_blacklist.add_argument('--blacklist', default=BLACKLIST, help=blacklist_help)
region = self.new_subparser()
region.add_argument('region', help='The region file')
optional_region = self.new_subparser()
optional_region.add_argument('--region', help='The region file or directory. In the enrichment analysis, if its not specified it will be calculated automatically from the tags in both files and the distance of clustering specified in the --proximity flag')
region_format = self.new_subparser()
region_format.add_argument('--region-format',default=REGION_FORMAT, help='The format the region file is written as. [default %(default)s]')
region_format.add_argument('--open-region', action='store_true', default=OPEN_REGION, help='Define if the region file is half-open or closed notation. [Default closed]')
#enrichment flags
enrichment_flags = self.new_subparser()
enrichment_flags.add_argument('--stranded', action='store_true', default=STRANDED_ANALYSIS, help="Decide if the strand is taken into consideration for the analysis. This requires a region file in bed format with the strand information in its 6th column.")
enrichment_flags.add_argument('--proximity', default=PROXIMITY, type=int, help="Determines if two regions calculated automatically are close enough to be clustered. Default %(default)s nt")
enrichment_flags.add_argument('--binsize', type=float, default=BINSIZE, help="The size of the bins to calculate the local sd and mean for the background model, as a ratio of total number or regions. Regardless of the ratio selected, the minimum window size is 50 regions, since below that threshold the results will no longer be statistically meaningful. [Default %(default)s]")
enrichment_flags.add_argument('--sdfold', type=float, default=SDFOLD, help="The standard deviation fold used to generate the background model. [Default %(default)s]")
enrichment_flags.add_argument('--recalculate', action='store_true', default=RECALCULATE, help="Recalculate the z-score when plotting. Useful for doing different plots with 'Pyicoteo plot' [Default %(default)s]")
enrichment_flags.add_argument('--mintags', type=float, default=REGION_MINTAGS, help="Number of tags (of the union of the experiment and experiment_b datasets) for a region to qualify to be analyzed. [Default %(default)s]")
enrichment_flags.add_argument('--binstep', type=float, default=WINDOW_STEP, help="Step of the sliding window for the calculation of the z-score, as a ratio of the window size selected. If you want max precision, in the zscore calculation. You can set this value to 0 in order to use a sliding window that slides only 1 region at a time, but if you have many regions the calculation can get very slow. [Default %(default)s]")
enrichment_flags.add_argument('--skip-plot', action='store_true', default=SKIP_PLOT, help="Skip the plotting step. [Default %(default)s]")
enrichment_flags.add_argument('--n-norm', action='store_true', default=N_NORM, help="Divide the read counts by the total number of reads (units of million reads)")
enrichment_flags.add_argument('--len-norm', action='store_true', default=LEN_NORM, help="Divide the read counts by region (gene, transcript...) length (reads per kilobase units)")
#enrichment_flags.add_argument('--sequential', default=SEQUENTIAL, action='store_true', help="Iterate through the files in sequential order, instead of random access (for BAM reading). This is faster than random if you are using a lot of regions that overlap with each other") #TODO This flag doesn't work because if we order chr1 chr2 every file, instead of alphabetical, the SortedClusterReader classes will fail when changing chromosome, since the ALGORITHM depends on a sorted file
tmm_flag = self.new_subparser()
tmm_flag.add_argument('--tmm-norm', action='store_true', default=TMM_NORM, help="Trimming the extreme A and M to correct the dataset for the differences in read density between samples. [Default %(default)s]")
quant_flag = self.new_subparser()
quant_flag.add_argument('--quant-norm', action='store_true', default=QUANT_NORM, help="Full quantile normalization of the counts. This normalization method could be considered the most conservative of them all. [Default %(default)s]")
checkrep_flags = self.new_subparser()
checkrep_flags.add_argument('--experiment-label', default=EXPERIMENT_LABEL, help='The label that will identify the experiment file in the "check replicas" plot')
checkrep_flags.add_argument('--replica-label', default=REPLICA_LABEL, help='The label that will identify the experiment file in the "check replicas" plot')
checkrep_flags.add_argument('--title-label', default=REPLICA_LABEL, help='The label that will identify the experiment file in the "check replicas" plot')
checkrep_flags.add_argument('--count-filter', default=COUNT_FILTER, type=float, help='Filter the points that go below a threshold to better visualize the correlation between the replicas')
total_reads_flags = self.new_subparser()
total_reads_flags.add_argument('--total-reads-a', type=int, default=0, help="To manually set how many reads the dataset in 'experiment' has. If not used, it will be counted from the read or counts file. Default (automatically calculated from reads or counts files)")
total_reads_flags.add_argument('--total-reads-b', type=int, default=0, help="To manually set how many reads the dataset in 'experiment_b' has. If not used, it will be counted from the read or counts file. Default (automatically calculated from reads or counts files)")
total_reads_flags.add_argument('--total-reads-replica', type=int, default=0, help="To manually set how many reads the dataset in 'experiment_replica' has. If not used, it will be calculated from the read or the counts file. Default %(default)s (not used)")
total_reads_flags.add_argument('--a-trim', type=float, default=A_TRIM, help="Proportion of A values to be discarded when doing the TMM normalization. Only applied when combined with --tmm-norm. [Default %(default)s]")
total_reads_flags.add_argument('--m-trim', type=float, default=M_TRIM, help="Proportion of M values to be discarded when doing the TMM normalization. Only applied when combined with --tmm-norm. [Default %(default)s]")
pseudocount = self.new_subparser()
pseudocount.add_argument('--pseudocount', action='store_true', default=PSEUDOCOUNT, help="The usage of pseudocounts in the enrichment calculation allows the inclusion of regions that have n reads in one dataset and 0 reads in the other. [Default %(default)s]")
zscore = self.new_subparser()
zscore.add_argument('--zscore', type=float, default=ZSCORE, help="Significant Z-score value. [Default %(default)s]")
use_replica = self.new_subparser()
use_replica.add_argument("--use-replica", action='store_true', default=USE_REPLICA, help="Indicates that for the calculation of the counts tables, a replica was used. [Default %(default)s]")
label = self.new_subparser()
label.add_argument('--wig-label', default=LABEL, help='The label that will identify the experiment in the WIG tracks.')
span = self.new_subparser()
span.add_argument('--span', default=SPAN, help='The span of the variable and fixed wig formats [Default %(default)s]', type=int)
round = self.new_subparser()
round.add_argument('--round',action='store_true',dest='rounding', default=ROUNDING, help='Round the final results to an integer')
pvalue = self.new_subparser()
pvalue.add_argument('--p-value',type=float, default=P_VALUE, help='The threshold p-value that will make a cluster significant. [Default %(default)s]')
tolerated_duplicates =self.new_subparser()
tolerated_duplicates.add_argument('--duplicates',type=int, default=DUPLICATES, help='The number of duplicated reads accept will be counted. Any duplicated read after this threshold will be discarded. [Default %(default)s]')
height = self.new_subparser()
height.add_argument('--k-limit',type=int, default=HEIGHT_LIMIT, help='The k limit Pyicoteo will analize to when performing a poisson test. Every cluster that goes over the threshold will have a p-value of 0, therefore considered significant. For performance purposes, raising it will give more precision when defining low p-values, but will take longer to execute. [Default %(default)s]')
correction = self.new_subparser()
correction.add_argument('--correction',type=float, default=CORRECTION, help='This value will correct the size of the genome you are analyzing. This way you can take into consideration the real mappable genome [Default %(default)s]')
tag_length = self.new_subparser()
tag_length.add_argument( '--tag-length',default=TAG_LENGTH, type=int, help='The tag length, or the extended one. Needed when converting from a Clustered format (wig, pk) to a non clustered format (bed, eland) [Default %(default)s]')
frag_size = self.new_subparser()
frag_size.add_argument('frag_size', help='The estimated inmmunoprecipitated fragment size. This is used by the extend operation to extend the tags, taking into consideration their strand, if provided. If the strand is not provided, Pyicoteo will assume positive strand.', type=int)
optional_frag_size = self.new_subparser()
optional_frag_size.add_argument('-x', '--frag-size', help='The estimated inmmunoprecipitated fragment size. This is used by Pyicoteo to reconstruct the original signal in the original wet lab experiment.', type=int)
push_distance = self.new_subparser()
push_distance.add_argument('push_distance', help='', type=int)
no_subtract = self.new_subparser()
no_subtract.add_argument('--no-subtract',action='store_true', default=False, help='Don\'t subtract the control to the output, only normalize.')
normalize = self.new_subparser()
normalize.add_argument('--normalize',action='store_true', default=False, help='Normalize to the control before subtracting')
extend = self.new_subparser()
extend.add_argument('--extend',action='store_true', default=False, help='Extend')
subtract = self.new_subparser()
subtract.add_argument('--subtract',action='store_true', default=False, help='subtract')
filterop = self.new_subparser()
filterop.add_argument('--filter',action='store_true', default=False, help='filterop')
poisson = self.new_subparser()
poisson.add_argument('--poisson',action='store_true', default=False, help='poisson')
modfdr = self.new_subparser()
modfdr.add_argument('--modfdr',action='store_true', default=False, help='modfdr')
remduplicates = self.new_subparser()
remduplicates.add_argument('--remduplicates',action='store_true', default=False, help='remduplicates')
split = self.new_subparser()
split.add_argument('--split',action='store_true', default=False, help='split')
trim = self.new_subparser()
trim.add_argument('--trim',action='store_true', default=False, help='trim')
strcorr = self.new_subparser()
strcorr.add_argument('--strcorr',action='store_true', default=False, help='strcorr')
remregions = self.new_subparser()
remregions.add_argument('--remregions',action='store_true', default=False, help='remregions')
remartifacts = self.new_subparser()
remartifacts.add_argument('--remartifacts',action='store_true', default=False, help='remartifacts')
checkrep = self.new_subparser()
checkrep.add_argument('--checkrep',action='store_true', default=False, help='check replicas')
split_proportion = self.new_subparser()
split_proportion.add_argument('--split-proportion', default=SPLIT_PROPORTION, help='Fraction of the cluster height below which the cluster is splitted. [Default %(default)s]', type=float)
trim_proportion = self.new_subparser()
trim_proportion.add_argument('--trim-proportion', default=TRIM_PROPORTION, help='Fraction of the cluster height below which the peak is trimmed. Example: For a cluster of height 40, if the flag is 0.05, 40*0.05=2. Every cluster will be trimmed to that height. A position of height 1 is always considered insignificant, no matter what the cluster height is. [Default %(default)s]', type=float)
trim_absolute = self.new_subparser()
trim_absolute.add_argument('--trim-absolute', default=TRIM_ABSOLUTE, help='The height threshold to trim the clusters. Overrides the trim proportion. [Default %(default)s]', type=int)
split_absolute = self.new_subparser()
split_absolute.add_argument('--split-absolute', default=SPLIT_ABSOLUTE, help='The height threshold to split the clusters. [Default %(default)s]', type=int)
repeats = self.new_subparser()
repeats.add_argument('--repeats', help='Number of random repeats when generating the "background" for the modfdr operation[Default %(default)s]', default=REPEATS, type=int)
masker_file = self.new_subparser()
masker_file.add_argument('--masker', help='You can provide a masker file that will be used by the modfdr operation background generation so that randomized reads will not fall in this areas')
poisson_test = self.new_subparser()
poisson_test.add_argument('--poisson-test', help="Decide what property of the cluster will be used for the poisson analysis. Choices are %s [Default %s]"%(POISSON_OPTIONS, POISSONTEST), default=POISSONTEST)
remlabels = self.new_subparser()
remlabels.add_argument('--remlabels', help='Discard the reads that have this particular label. Example: --discard chr1 will discard all reads with chr1 as tag. You can specify multiple tags to discard using the following notation --discard chr1 chr2 tagN')
threshold = self.new_subparser()
threshold.add_argument('--threshold', help='The height threshold used to cut', type=int)
species = self.new_subparser()
species.add_argument('-p', '--species', default=SPECIES, help='The species that you are analyzing. This will read the length of the chromosomes of this species from the files inside the folder "chrdesc". If the species information is not known, the filtering step will assume that the chromosomes are as long as the position of the furthest read. [Default %(default)s]')
plot_path = self.new_subparser()
plot_path.add_argument('plot_path', default=PLOT_PATH, help='The path of the file to plot.')
correlation_flags = self.new_subparser()
correlation_flags.add_argument('--max-delta',type=int, default=MAX_DELTA, help='Maximum distance to consider when correlating the positive and the negative groups of reads [Default %(default)s]')
correlation_flags.add_argument('--min-delta',type=int, default=MIN_DELTA, help='Minimum distance to consider when correlating the positive and the negative groups of reads [Default %(default)s]')
correlation_flags.add_argument('--height-filter',type=int, default=HEIGHT_FILTER, help='The minimum number of overlapping reads in a cluster to include it in the test [Default %(default)s]')
correlation_flags.add_argument('--delta-step',type=int, default=DELTA_STEP, help='The step of the delta values to test [Default %(default)s]')
correlation_flags.add_argument('--max-correlations',type=int, default=MAX_CORRELATIONS, help='The maximum pairs of clusters to analyze before considering the test complete. Lower this parameter to increase time performance [Default %(default)s]')
counts_file = self.new_subparser()
counts_file.add_argument('counts_file', help='The counts file. The format required is a bed file with fields "name", "start", "end", "name2", "score(ignored)", "strand", "count file a", "count file b", "count file a", "count replica a" where the counts can be RPKMs or simple counts')
protocol_name = self.new_subparser()
protocol_name.add_argument('protocol_name', help='The protocol configuration file.')
subparsers = parser.add_subparsers(help='The operation you want to perform. Note that some operations imply previous automatic operations.')
#callpeaks operation
subparsers.add_parser('callpeaks', help='The complete peak calling sequence proposed in the future publication. The region file is optional. The same goes for the control file, if not provided, there will not be a normalization or a subtraction.',
parents=[experiment, experiment_flags, basic_parser, optional_control, control_format, open_control, optional_blacklist, output, output_flags, optional_frag_size, round, label, span, no_subtract, remlabels, pvalue, height, correction, trim_proportion, species, tolerated_duplicates, poisson_test])
#convert operation
subparsers.add_parser('convert', help='Convert a file to another file type.',
parents=[experiment, experiment_flags, basic_parser, output, output_flags, round, label, tag_length, span, optional_frag_size, remlabels])
subparsers.add_parser('subtract', help='Subtract two clustered files. Operating with directories will only give apropiate results if the files and the control are paired in alphabetical order.', parents=[experiment,experiment_flags, basic_parser, control, control_format, open_control, output, output_flags, round, normalize, tag_length, span, label, remlabels])
#split operation
subparsers.add_parser('split', help='Split the peaks in subpeaks. Only accepts pk or wig as output (other formats under development).', parents=[experiment, experiment_flags, basic_parser, output, output_flags, round, split_proportion, split_absolute, label, remlabels])
#trim operation
subparsers.add_parser('trim', help='Trim the clusters to a given threshold.', parents=[experiment, experiment_flags, basic_parser, output, output_flags, round, trim_absolute, trim_proportion, label, remlabels, span])
#discard operation
subparsers.add_parser('discard', help='Discards artifacts from a file. Only accepts pk or wig as output.', parents=[experiment, experiment_flags, basic_parser, output, output_flags, round, span, label, remlabels])
#remove duplicates operation
subparsers.add_parser('remduplicates', help='Removes the duplicated reads in a file. Only accepts tag-like files (bed, eland, sam)', parents=[experiment, experiment_flags, basic_parser, output, output_flags, tolerated_duplicates, round, span, label, remlabels])
#normalize operation
#subparsers.add_parser('normalize', help='Normalize a pk file respect of the control.', parents=[experiment, experiment_flags, basic_parser, control, control_format, output, output_flags, open_control, round, label, span, remlabels])
#extend operation
subparsers.add_parser('extend', help='Extends the reads of a file to the desired length. This operation requires tag-like files (bed, eland, sam)', parents=[experiment,experiment_flags, basic_parser, output, output_flags, frag_size, round, label, span, remlabels])
#push operation
subparsers.add_parser('push', help='Push the reads in the corresponding strand. If a read doesn\'t have a strand, will be pushed from left to right. This operation requires tag-like files (bed, eland, sam)', parents=[experiment,experiment_flags, basic_parser, output, output_flags, push_distance, round, label, span, remlabels])
#poisson analysis
subparsers.add_parser('poisson', help='Analyze the significance of accumulated reads in the file using the poisson distribution. With this tests you will be able to decide what is the significant threshold for your reads.',
parents=[experiment,experiment_flags, basic_parser, output_flags, optional_frag_size, pvalue, height, correction, species, remlabels, poisson_test])
#cut operations
subparsers.add_parser('filter', help="""Analyze the significance of accumulated reads in the file using the poisson distribution and generate the resulting profiles, in wig or pk formats""",
parents=[experiment,experiment_flags, basic_parser, output, optional_frag_size, output_flags, round, pvalue, height, correction, threshold, species, remlabels, poisson_test])
#modfdr analysis
subparsers.add_parser('modfdr', help="""Use the modified FDR method to determine what clusters are significant in an specific region. Output in a clustered format only.""",
parents=[experiment, experiment_flags, basic_parser, region, output, output_flags, round, pvalue, repeats, remlabels]) #, masker_file
#remove operation
subparsers.add_parser('remregions', help='Removes regions that overlap with another the coordinates in the "black list" file.',
parents=[experiment, experiment_flags, basic_parser, output_flags, blacklist, region_format, output, remlabels])
#strcorr operation
subparsers.add_parser('strcorr', help='A cross-correlation test between forward and reverse strand clusters in order to find the optimal extension length.',
parents=[experiment, experiment_flags, basic_parser, output, output_flags, correlation_flags, remlabels])
#rpkm operation
subparsers.add_parser('enrichma', help='An enrichment test based on the MA plots using the Pyicoteo count files with the MA information. It will ignore the counts information, and directly use whatever values found in the M and A columns and zscore information to plot the data. Useful to re-plot the data by adjusting the zscore for visualization purposes.', parents=[counts_file, basic_parser, output_flags, optional_replica, region_format, output, enrichment_flags, zscore])
#rpkm operation
#subparsers.add_parser('enrichcount', help='An enrichment test based on the MA plots using (normalized) counts', parents=[counts_file, basic_parser, output_flags, optional_replica, region_format, output, enrichment_flags, tmm_flag, total_reads_flags, zscore, use_replica])
#enrichment operation
subparsers.add_parser('enrichment', help='An enrichment test based on the MA plots using mapped reads files. Pyicoteo output will consist in a results table and a MA plot (optional, but matplotlib required >=0.9.7). The fields of this table are as follows: %s'%(" | ".join(enrichment_keys)), parents=[exp_or_count, experiment_flags, basic_parser, output_flags, optional_replica, optional_region, region_format, optional_output, enrichment_flags, tmm_flag, quant_flag, total_reads_flags, pseudocount, zscore])
#check replicas operation TODO unfinished
#subparsers.add_parser('checkrep', help='Check how good the replicas are.', parents=[experiment, experiment_flags, basic_parser, replica, region, region_format, checkrep_flags, output])
#check replicas operation
subparsers.add_parser('checkrepcount', help='Check how good the replicas are (from a Pyicoteo count file)', parents=[counts_file, basic_parser, enrichment_flags, total_reads_flags, checkrep_flags, output])
#protocol reading
subparsers.add_parser('protocol', help='Import a protocol file to load in Pyicoteo', parents=[protocol_name])
subparsers.add_parser('plot', help="Plot a file with Pyicoteo plotting utilities. Requires matplotlib >=0.9.7 installed.", parents=[basic_parser, plot_path, output, zscore])
#whole exposure
subparsers.add_parser('all', help='Exposes all Pyicoteo functionality through a single command', parents=[experiment, experiment_flags, basic_parser, optional_control, control_format, open_control, optional_region, output, output_flags, optional_frag_size, round, label, span, no_subtract, remlabels, pvalue, height, correction, trim_proportion, trim_absolute, species, tolerated_duplicates, masker_file, correlation_flags, split_proportion, split_absolute, normalize, extend, subtract, filterop, poisson, modfdr, remduplicates, split, trim, strcorr, remregions, remartifacts])
return parser
def run_parser(self):
parser = self.create_parser()
parser.set_defaults(experiment=EXPERIMENT, experiment_format=EXPERIMENT_FORMAT, open_experiment=OPEN_EXPERIMENT, debug=DEBUG, discard=DISCARD, output=OUTPUT, control=CONTROL,
wig_label = LABEL, output_format=OUTPUT_FORMAT,open_output=OPEN_OUTPUT, rounding=ROUNDING, control_format=CONTROL_FORMAT, region=REGION, region_format=REGION_FORMAT,
open_region =OPEN_REGION,frag_size = FRAG_SIZE, tag_length = TAG_LENGTH, span=SPAN, p_value=P_VALUE, height_limit=HEIGHT_LIMIT,
correction=CORRECTION, no_subtract = NO_SUBTRACT, normalize = DO_NORMALIZE, trim_proportion=TRIM_PROPORTION,open_control=OPEN_CONTROL,
no_sort=NO_SORT, duplicates=DUPLICATES, threshold=THRESHOLD, trim_absolute=TRIM_ABSOLUTE, max_delta=MAX_DELTA, min_delta=MIN_DELTA,
height_filter=HEIGHT_FILTER, delta_step=DELTA_STEP, verbose=VERBOSE, species=SPECIES, cached=CACHED, split_proportion=SPLIT_PROPORTION,
split_absolute=SPLIT_ABSOLUTE, repeats=REPEATS, masker_file=MASKER_FILE, max_correlations=MAX_CORRELATIONS, keep_temp=KEEP_TEMP, postscript = POSTSCRIPT,
remlabels=REMLABELS, experiment_b=EXPERIMENT, replica=EXPERIMENT, replica_b=EXPERIMENT, poisson_test=POISSONTEST, stranded=STRANDED_ANALYSIS,
proximity=PROXIMITY, showplots=SHOWPLOTS, plot_path=PLOT_PATH, pseudocount=PSEUDOCOUNT, len_norm=LEN_NORM, label1=LABEL1,
label2=LABEL2, binsize=BINSIZE, zscore=ZSCORE, blacklist=BLACKLIST, sdfold=SDFOLD, recalculate=RECALCULATE,
counts_file=COUNTS_FILE, mintags=REGION_MINTAGS, binstep=WINDOW_STEP, tmm_norm=TMM_NORM, n_norm=N_NORM, skip_header=SKIP_HEADER,
total_reads_a=TOTAL_READS_A, total_reads_b=TOTAL_READS_B, total_reads_replica=TOTAL_READS_REPLICA, a_trim=A_TRIM, m_trim=M_TRIM,
use_replica=USE_REPLICA, tempdir=TEMPDIR, samtools=USESAMTOOLS, access_sequential=SEQUENTIAL, experiment_label = EXPERIMENT_LABEL,
replica_label = REPLICA_LABEL, title_label = TITLE_LABEL, count_filter = COUNT_FILTER, force_sort=FORCE_SORT,
push_distance=PUSH_DIST, quant_norm=QUANT_NORM, experiments=None)
args = parser.parse_args()
#Add any parameters found in the config file. Override them with anything found in the args later
if sys.argv[1] == 'protocol':
config = ConfigParser.ConfigParser()
config.read(args.protocol_name)
try:
section = self.config_section_map("Pyicotrocol", config)
except ConfigParser.NoSectionError:
print "\nERROR: %s is not a Pyicoteo Protocol file, is missing the [Pyicotrocol] header or it doesn't exists\n"%args.protocol_name
sys.exit(0)
for key, value in section.items(): #this works fine for all string values
try:
t = type(parser._defaults[key])
if t == int:
args.__dict__[key] = config.getint("Pyicotrocol", key)
elif t == float:
args.__dict__[key] = config.getfloat("Pyicotrocol", key)
elif t == bool:
args.__dict__[key] = config.getboolean("Pyicotrocol", key)
elif t == str:
args.__dict__[key] = config.get("Pyicotrocol", key)
except KeyError:
if key == 'input':
args.__dict__['experiment'] = config.get("Pyicotrocol", 'input')
print "\nWARNING: The keyword 'input' for the protocol files is deprecated, please use 'experiment' instead"
elif key != 'operations':
print 'ERROR: There is an error in your protocol file. "%s" is not a Pyicotrocol parameter'%key
sys.exit(0)
self.validate(args)
if args.counts_file: #the formats are overridden when using enrichment (only of cosmetic value, when printing the flags)
args.experiment_format = COUNTS
args.experiment_b_format = COUNTS
args.output_format = COUNTS
if not args.control_format: #If not specified, the control format is equal to the experiment format
args.control_format = args.experiment_format
args.open_control = args.open_experiment
if args.experiments:
args.experiment, args.experiment_b = args.experiments
turbomix = Turbomix(args.experiment, args.output, args.experiment_format, args.output_format, args.wig_label, args.open_experiment, args.open_output, args.debug,
args.rounding, args.tag_length, args.remlabels, args.control, args.control_format, args.open_control, args.region,
args.region_format, args.open_region, args.span, args.frag_size, args.p_value, args.height_limit, args.correction,
args.trim_proportion, args.no_sort, args.duplicates, args.threshold, args.trim_absolute, args.max_delta,
args.min_delta, args.height_filter, args.delta_step, args.verbose, args.species, args.cached, args.split_proportion, args.split_absolute,
args.repeats, args.masker_file, args.max_correlations, args.keep_temp, args.experiment_b, args.replica, args.replica_b, args.poisson_test,
args.stranded, args.proximity, args.postscript, args.showplots, args.plot_path, args.pseudocount, args.len_norm, args.label1,
args.label2, args.binsize, args.zscore, args.blacklist, args.sdfold, args.recalculate, args.counts_file, args.mintags, args.binstep,
args.tmm_norm, args.n_norm, args.skip_header, args.total_reads_a, args.total_reads_b, args.total_reads_replica, args.a_trim, args.m_trim,
args.use_replica, args.tempdir, args.samtools, args.access_sequential, args.experiment_label, args.replica_label, args.title_label,
args.count_filter, args.force_sort, args.push_distance, args.quant_norm)
if sys.argv[1] == 'protocol':
operations = section['operations'].split(',')
for operation in operations:
print "Adding operation %s to protocol..."%operation
turbomix.operations.append(operation.strip())
elif sys.argv[1] == 'convert':
if args.frag_size:
turbomix.operations = [EXTEND]
elif sys.argv[1] == 'subtract':
turbomix.operations = [SUBTRACT]
if args.normalize:
turbomix.operations.append(NORMALIZE)
elif sys.argv[1] == 'normalize':
turbomix.operations = [NORMALIZE]
elif sys.argv[1] == 'extend':
turbomix.operations = [EXTEND]
elif sys.argv[1] == 'push':
turbomix.operations = [PUSH]
elif sys.argv[1] == 'strcorr':
turbomix.operations = [STRAND_CORRELATION, NOWRITE]
elif sys.argv[1] == 'poisson':
turbomix.operations = [POISSON, NOWRITE]
elif sys.argv[1] == 'filter':
turbomix.operations = [POISSON, FILTER]
elif sys.argv[1] == 'remove':
turbomix.operations = [REMOVE_REGION]
elif sys.argv[1] == 'enrichma':
turbomix.operations = [USE_MA, ENRICHMENT, CALCZSCORE]
if not args.skip_plot:
turbomix.operations.append(PLOT)
elif sys.argv[1] == 'enrichment' or sys.argv[1] == 'enrichcount':
turbomix.operations = [ENRICHMENT, CALCZSCORE]
if not args.skip_plot:
turbomix.operations.append(PLOT)
elif sys.argv[1] == 'checkrep' or sys.argv[1] == 'checkrepcount':
turbomix.operations = [ENRICHMENT, CHECK_REPLICAS, NOWRITE]
elif sys.argv[1] == 'split':
turbomix.operations = [SPLIT]
elif sys.argv[1] == 'trim':
turbomix.operations = [TRIM]
elif sys.argv[1] == 'discard':
turbomix.operations = [DISCARD_ARTIFACTS]
elif sys.argv[1] == 'remduplicates':
turbomix.operations = [REMOVE_DUPLICATES]
elif sys.argv[1] == 'remregions':
turbomix.operations = [REMOVE_REGION]
elif sys.argv[1] == 'modfdr':
turbomix.operations = [MODFDR]
elif sys.argv[1] == 'callpeaks':
turbomix.operations = [SPLIT, EXTEND, POISSON, FILTER, REMOVE_DUPLICATES, STRAND_CORRELATION]
if args.duplicates > 1: #If there is only 1 duplicate, there is no need to discard artifacts
turbomix.operations.append(DISCARD_ARTIFACTS)
if args.blacklist:
turbomix.operations.append(REMOVE_REGION)
if args.control and not args.no_subtract:
turbomix.operations.append(NORMALIZE)
turbomix.operations.append(SUBTRACT)
elif sys.argv[1] == 'plot':
turbomix.operations = [PLOT, NOWRITE]
elif sys.argv[1] == 'all':
if args.normalize: turbomix.operations.append(NORMALIZE)
if args.extend: turbomix.operations.append(EXTEND)
if args.subtract: turbomix.operations.append(SUBTRACT)
if args.filter: turbomix.operations.append(FILTER)
if args.poisson: turbomix.operations.append(POISSON)
if args.modfdr: turbomix.operations.append(MODFDR)
if args.remduplicates: turbomix.operations.append(REMOVE_DUPLICATES)
if args.split: turbomix.operations.append(SPLIT)
if args.trim: turbomix.operations.append(TRIM)
if args.strcorr: turbomix.operations.append(STRAND_CORRELATION)
if args.remregions: turbomix.operations.append(REMOVE_REGION)
if args.remartifacts: turbomix.operations.append(DISCARD_ARTIFACTS)
if args.checkrep: turbomix.operations.append(CHECK_REPLICAS)
self.validate_operations(args, turbomix)
#parameters are set, now try running
try:
turbomix.run()
except KeyboardInterrupt:
print 'Canceled by user.'
except OperationFailed:
if args.debug:
raise
else:
print 'Operation Failed.'
|
gpl-3.0
|
rth/PyAbel
|
examples/example_GUI.py
|
1
|
21520
|
# -*- coding: iso-8859-1 -*-
# Illustrative GUI driving a small subset of PyAbel methods
import numpy as np
import abel
import sys
if sys.version_info[0] < 3:
import Tkinter as tk
from tkFileDialog import askopenfilename
else:
import tkinter as tk
from tkinter.filedialog import askopenfilename
import tkinter.ttk as ttk
import tkinter.font as tkFont
from tkinter.scrolledtext import *
#from ScrolledText import *
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg,\
NavigationToolbar2TkAgg
from matplotlib.backend_bases import MouseEvent
from matplotlib.figure import Figure
from matplotlib.pyplot import imread, colorbar
from matplotlib import gridspec
from scipy.ndimage.interpolation import shift
Abel_methods = ['basex', 'direct', 'hansenlaw', 'linbasex', 'onion_peeling',
'onion_bordas', 'two_point', 'three_point']
center_methods = ['center-of-mass', 'convolution', 'gaussian', 'slice']
class PyAbel: #(tk.Tk):
def __init__(self, parent):
self.parent = parent
self.initialize()
def initialize(self):
self.fn = None
self.old_fn = None
self.old_method = None
self.old_fi = None
self.AIM = None
self.rmx = (368, 393)
# matplotlib figure
self.f = Figure(figsize=(2, 6))
self.gs = gridspec.GridSpec(2, 2, width_ratios=[1, 2])
self.gs.update(wspace=0.2, hspace=0.2)
self.plt = []
self.plt.append(self.f.add_subplot(self.gs[0]))
self.plt.append(self.f.add_subplot(self.gs[1]))
self.plt.append(self.f.add_subplot(self.gs[2], sharex=self.plt[0],
sharey=self.plt[0]))
self.plt.append(self.f.add_subplot(self.gs[3]))
for i in [0, 2]:
self.plt[i].set_adjustable('box-forced')
# hide until have data
for i in range(4):
self.plt[i].axis("off")
# tkinter
# set default font size for buttons
self.font = tkFont.Font(size=11)
self.fontB = tkFont.Font(size=12, weight='bold')
#frames top (buttons), text, matplotlib (canvas)
self.main_container = tk.Frame(self.parent, height=10, width=100)
self.main_container.pack(side="top", fill="both", expand=True)
self.button_frame = tk.Frame(self.main_container)
#self.info_frame = tk.Frame(self.main_container)
self.matplotlib_frame = tk.Frame(self.main_container)
self.button_frame.pack(side="top", fill="x", expand=True)
#self.info_frame.pack(side="top", fill="x", expand=True)
self.matplotlib_frame.pack(side="top", fill="both", expand=True)
self._menus()
self._button_area()
self._plot_canvas()
self._text_info_box()
def _button_frame(self):
self.button_frame = tk.Frame(self.main_container)
self.button_frame.pack(side="top", fill="x", expand=True)
self._menus()
def _menus(self):
# menus with callback ----------------
# duplicates the button interface
self.menubar = tk.Menu(self.parent)
self.transform_method = tk.IntVar()
self.center_method = tk.IntVar()
# File - menu
self.filemenu = tk.Menu(self.menubar, tearoff=0)
self.filemenu.add_command(label="Load image file",
command=self._loadimage)
self.filemenu.add_separator()
self.filemenu.add_command(label="Exit", command=self._quit)
self.menubar.add_cascade(label="File", menu=self.filemenu)
# Process - menu
self.processmenu = tk.Menu(self.menubar, tearoff=0)
#self.processmenu.add_command(label="Center image", command=self._center)
self.subcent=tk.Menu(self.processmenu)
for cent in center_methods:
self.subcent.add_radiobutton(label=cent,
var=self.center_method, val=center_methods.index(cent),
command=self._center)
self.processmenu.add_cascade(label="Center image",
menu=self.subcent, underline=0)
self.submenu=tk.Menu(self.processmenu)
for method in Abel_methods:
self.submenu.add_radiobutton(label=method,
var=self.transform_method, val=Abel_methods.index(method),
command=self._transform)
self.processmenu.add_cascade(label="Inverse Abel transform",
menu=self.submenu, underline=0)
self.processmenu.add_command(label="Speed distribution",
command=self._speed)
self.processmenu.add_command(label="Angular distribution",
command=self._anisotropy)
self.angmenu=tk.Menu(self.processmenu)
self.menubar.add_cascade(label="Processing", menu=self.processmenu)
# view - menu
self.viewmenu = tk.Menu(self.menubar, tearoff=0)
self.viewmenu.add_command(label="Raw image", command=self._display)
self.viewmenu.add_command(label="Inverse Abel transformed image",
command=self._transform)
self.viewmenu.add_command(label="view buttons",
command=self._on_buttons)
self.menubar.add_cascade(label="View", menu=self.viewmenu)
def _button_area(self):
# grid layout
# make expandable
for col in range(5):
self.button_frame.columnconfigure(col, weight=1)
self.button_frame.rowconfigure(col, weight=1)
# column 0 ---------
# load image file button
self.load = tk.Button(master=self.button_frame, text="load image",
font=self.fontB, fg="dark blue",
command=self._loadimage)
self.load.grid(row=0, column=0, sticky=tk.W, padx=(5, 10), pady=(5, 0))
self.sample_image = ttk.Combobox(master=self.button_frame,
font=self.font,
values=["from file", "from transform",
"sample dribinski", "sample Ominus"],
width=14, height=4)
self.sample_image.current(0)
self.sample_image.grid(row=1, column=0, padx=(5, 10))
# quit
self.quit = tk.Button(master=self.button_frame, text="Quit",
font=self.fontB, fg="dark red",
command=self._quit)
self.quit.grid(row=3, column=0, sticky=tk.W, padx=(5, 10), pady=(0, 5))
# column 1 -----------
# center image
self.center = tk.Button(master=self.button_frame, text="center image",
anchor=tk.W,
font=self.fontB, fg="dark blue",
command=self._center)
self.center.grid(row=0, column=1, padx=(0, 20), pady=(5, 0))
self.center_method = ttk.Combobox(master=self.button_frame,
font=self.font,
values=center_methods,
width=11, height=4)
self.center_method.current(1)
self.center_method.grid(row=1, column=1, padx=(0, 20))
# column 2 -----------
# Abel transform image
self.recond = tk.Button(master=self.button_frame,
text="Abel transform image",
font=self.fontB, fg="dark blue",
command=self._transform)
self.recond.grid(row=0, column=2, padx=(0, 10), pady=(5, 0))
self.transform = ttk.Combobox(master=self.button_frame,
values=Abel_methods,
font=self.font,
width=10, height=len(Abel_methods))
self.transform.current(2)
self.transform.grid(row=1, column=2, padx=(0, 20))
self.direction = ttk.Combobox(master=self.button_frame,
values=["inverse", "forward"],
font=self.font,
width=8, height=2)
self.direction.current(0)
self.direction.grid(row=2, column=2, padx=(0, 20))
# column 3 -----------
# speed button
self.speed = tk.Button(master=self.button_frame, text="speed",
font=self.fontB, fg="dark blue",
command=self._speed)
self.speed.grid(row=0, column=5, padx=20, pady=(5, 0))
self.speedclr = tk.Button(master=self.button_frame, text="clear plot",
font=self.font, command=self._speed_clr)
self.speedclr.grid(row=1, column=5, padx=20)
# column 4 -----------
# anisotropy button
self.aniso = tk.Button(master=self.button_frame, text="anisotropy",
font=self.fontB, fg="dark blue",
command=self._anisotropy)
self.aniso.grid(row=0, column=6, pady=(5, 0))
self.subframe = tk.Frame(self.button_frame)
self.subframe.grid(row=1, column=6)
self.rmin = tk.Entry(master=self.subframe, text='rmin', width=3,
font=self.font)
self.rmin.grid(row=0, column=0)
self.rmin.delete(0, tk.END)
self.rmin.insert(0, self.rmx[0])
self.lbl = tk.Label(master=self.subframe, text="to", font=self.font)
self.lbl.grid(row=0, column=1)
self.rmax = tk.Entry(master=self.subframe, text='rmax', width=3,
font=self.font)
self.rmax.grid(row=0, column=2)
self.rmax.delete(0, tk.END)
self.rmax.insert(0, self.rmx[1])
# turn off button interface
self.hide_buttons = tk.Button(master=self.button_frame,
text="hide buttons",
font=self.fontB, fg='grey',
command=self._hide_buttons)
self.hide_buttons.grid(row=3, column=6, sticky=tk.E, pady=(0, 20))
def _text_info_box(self):
# text info box ---------------------
self.text = ScrolledText(master=self.button_frame, height=6,
fg="mediumblue",
bd=1, relief=tk.SUNKEN)
self.text.insert(tk.END, "Work in progress, some features may"
" be incomplete ...\n")
self.text.insert(tk.END, "To start: load an image data file using"
" e.g. data/O2-ANU1024.txt.bz2\n"
" (1) load image button (or file menu)\n"
" (2) center image\n"
" (3) Abel transform\n"
" (4) speed\n"
" (5) anisotropy\n"
" (6) Abel transform <- change\n"
" (:) repeat\n")
self.text.grid(row=3, column=1, columnspan=3, padx=5)
def _plot_canvas(self):
# matplotlib canvas --------------------------
self.canvas = FigureCanvasTkAgg(self.f, master=self.matplotlib_frame)
#self.cid = self.canvas.mpl_connect('button_press_event', self._onclick)
self.canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=1)
self.toolbar = NavigationToolbar2TkAgg(self.canvas, self.parent)
self.toolbar.update()
self.canvas._tkcanvas.pack(anchor=tk.W, side=tk.TOP, fill=tk.BOTH, expand=1)
def _onclick(self,event):
print('button={:d}, x={:f}, y={:f}, xdata={:f}, ydata={:f}'.format(
event.button, event.x, event.y, event.xdata, event.ydata))
# call back functions -----------------------
def _display(self):
if self.fn is None:
self._loadimage()
# display image
self.plt[0].imshow(self.IM, vmin=0)
#rows, cols = self.IM.shape
#r2 = rows/2
#c2 = cols/2
#self.a.plot((r2, r2), (0, cols), 'r--', lw=0.1)
#self.a.plot((0, rows), (c2, c2),'r--', lw=0.1)
#self.f.colorbar(self.a.get_children()[2], ax=self.f.gca())
self.plt[0].set_title("raw image", fontsize=10)
self.canvas.show()
def _loadimage(self):
if self.fn is not None:
# clear old plot
for i in range(4):
self._clr_plt(i)
self.plt[i].axis("off")
self.fn = self.sample_image.get()
# update what is occurring text box
self.text.insert(tk.END, "\nloading image file {:s}".format(self.fn))
self.text.see(tk.END)
self.canvas.show()
if self.fn == "from file":
self.fn = askopenfilename()
# read image file
if ".txt" in self.fn:
self.IM = np.loadtxt(self.fn)
else:
self.IM = imread(self.fn)
elif self.fn == "from transform":
self.IM = self.AIM
self.AIM = None
for i in range(1,4):
self._clr_plt(i)
self.plt[i].axis("off")
self.direction.current(0)
else:
self.fn = self.fn.split(' ')[-1]
self.IM = abel.tools.analytical.sample_image(n=1001, name=self.fn)
self.direction.current(1) # raw images require 'forward' transform
self.text.insert(tk.END,"\nsample image: (1) Abel transform 'forward', ")
self.text.insert(tk.END," (2) load 'from transform', ")
self.text.insert(tk.END," (3) Abel transform 'inverse', ")
self.text.insert(tk.END," (4) Speed")
self.text.see(tk.END)
# if even size image, make odd
if self.IM.shape[0] % 2 == 0:
self.IM = shift(self.IM, (-0.5, -0.5))[:-1,:-1]
self.old_method = None
self.AIM = None
self.action = "file"
self.rmin.delete(0, tk.END)
self.rmin.insert(0, self.rmx[0])
self.rmax.delete(0, tk.END)
self.rmax.insert(0, self.rmx[1])
# show the image
self._display()
def _center(self):
self.action = "center"
center_method = self.center_method.get()
# update information text box
self.text.insert(tk.END, "\ncentering image using {:s}".\
format(center_method))
self.canvas.show()
# center image via chosen method
self.IM = abel.tools.center.center_image(self.IM, center=center_method,
odd_size=True)
#self.text.insert(tk.END, "\ncenter offset = {:}".format(self.offset))
self.text.see(tk.END)
self._display()
def _transform(self):
#self.method = Abel_methods[self.transform_method.get()]
self.method = self.transform.get()
self.fi = self.direction.get()
if self.method != self.old_method or self.fi != self.old_fi:
# Abel transform of whole image
self.text.insert(tk.END,"\n{:s} {:s} Abel transform:".\
format(self.method, self.fi))
if self.method == "basex":
self.text.insert(tk.END,
"\nbasex: first time calculation of the basis"
" functions may take a while ...")
elif self.method == "direct":
self.text.insert(tk.END,
"\ndirect: calculation is slowed if Cython unavailable ...")
self.canvas.show()
if self.method == 'linbasex':
self.AIM = abel.Transform(self.IM, method=self.method,
direction=self.fi,
transform_options=dict(return_Beta=True))
else:
self.AIM = abel.Transform(self.IM, method=self.method,
direction=self.fi,
symmetry_axis=None)
self.rmin.delete(0, tk.END)
self.rmin.insert(0, self.rmx[0])
self.rmax.delete(0, tk.END)
self.rmax.insert(0, self.rmx[1])
if self.old_method != self.method or self.fi != self.old_fi or\
self.action not in ["speed", "anisotropy"]:
self.plt[2].set_title(self.method+" {:s} Abel transform".format(self.fi),
fontsize=10)
self.plt[2].imshow(self.AIM.transform, vmin=0,
vmax=self.AIM.transform.max()/5.0)
#self.f.colorbar(self.c.get_children()[2], ax=self.f.gca())
#self.text.insert(tk.END, "{:s} inverse Abel transformed image".format(self.method))
self.text.see(tk.END)
self.old_method = self.method
self.old_fi = self.fi
self.canvas.show()
def _speed(self):
self.action = "speed"
# inverse Abel transform
self._transform()
# update text box in case something breaks
self.text.insert(tk.END, "\nspeed distribution")
self.text.see(tk.END)
self.canvas.show()
if self.method == 'linbasex':
self.speed_dist = self.AIM.Beta[0]
self.radial = self.AIM.radial
else:
# speed distribution
self.radial, self.speed_dist = abel.tools.vmi.angular_integration(
self.AIM.transform)
self.plt[1].axis("on")
self.plt[1].plot(self.radial, self.speed_dist/self.speed_dist[10:].max(),
label=self.method)
# make O2- look nice
if self.fn.find('O2-ANU1024') > -1:
self.plt[1].axis(xmax=500, ymin=-0.05)
elif self.fn.find('VMI_art1') > -1:
self.plt[1].axis(xmax=260, ymin=-0.05)
self.plt[1].set_xlabel("radius (pixels)", fontsize=9)
self.plt[1].set_ylabel("normalized intensity")
self.plt[1].set_title("radial speed distribution", fontsize=12)
self.plt[1].legend(fontsize=9, loc=0, frameon=False)
self.action = None
self.canvas.show()
def _speed_clr(self):
self._clr_plt(1)
def _clr_plt(self, i):
self.f.delaxes(self.plt[i])
self.plt[i] = self.f.add_subplot(self.gs[i])
self.canvas.show()
def _anisotropy(self):
def P2(x): # 2nd order Legendre polynomial
return (3*x*x-1)/2
def PAD(theta, beta, amp):
return amp*(1 + beta*P2(np.cos(theta)))
self.action = "anisotropy"
self._transform()
if self.method == 'linbasex':
self.text.insert(tk.END,
"\nanisotropy parameter pixel range 0 to {}: "\
.format(self.rmx[1]))
else:
# radial range over which to follow the intensity variation with angle
self.rmx = (int(self.rmin.get()), int(self.rmax.get()))
self.text.insert(tk.END,
"\nanisotropy parameter pixel range {:} to {:}: "\
.format(*self.rmx))
self.canvas.show()
# inverse Abel transform
self._transform()
if self.method == 'linbasex':
self.beta = self.AIM.Beta[1]
self.radial = self.AIM.radial
self._clr_plt(3)
self.plt[3].axis("on")
self.plt[3].plot(self.radial, self.beta, 'r-')
self.plt[3].set_title("anisotropy", fontsize=12)
self.plt[3].set_xlabel("radius", fontsize=9)
self.plt[3].set_ylabel("anisotropy parameter")
# make O2- look nice
if self.fn.find('O2-ANU1024') > -1:
self.plt[3].axis(xmax=500, ymin=-1.1, ymax=0.1)
elif self.fn.find('VMI_art1') > -1:
self.plt[3].axis(xmax=260, ymin=-1.1, ymax=2)
else:
# intensity vs angle
self.beta, self.amp, self.rad, self.intensity, self.theta =\
abel.tools.vmi.radial_integration(self.AIM.transform,\
radial_ranges=[self.rmx,])
self.text.insert(tk.END," beta = {:g}+-{:g}".format(*self.beta[0]))
self._clr_plt(3)
self.plt[3].axis("on")
self.plt[3].plot(self.theta, self.intensity[0], 'r-')
self.plt[3].plot(self.theta, PAD(self.theta, self.beta[0][0],
self.amp[0][0]), 'b-', lw=2)
self.plt[3].annotate("$\\beta({:d},{:d})={:.2g}\pm{:.2g}$".
format(*self.rmx+self.beta[0]), (-3, self.intensity[0].min()/0.8))
self.plt[3].set_title("anisotropy", fontsize=12)
self.plt[3].set_xlabel("angle", fontsize=9)
self.plt[3].set_ylabel("intensity")
self.action = None
self.canvas.show()
def _hide_buttons(self):
self.button_frame.destroy()
def _on_buttons(self):
self._button_frame()
def _quit(self):
self.parent.quit() # stops mainloop
self.parent.destroy() # this is necessary on Windows to prevent
# Fatal Python Error: PyEval_RestoreThread: NULL tstate
if __name__ == "__main__":
root = tk.Tk()
pyabel = PyAbel(root)
root.title("PyAbel simple GUI")
root.config(menu=pyabel.menubar)
root.mainloop()
|
mit
|
sebalander/VisionUNQ
|
dev/graph_density.py
|
1
|
2648
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 29 18:03:16 2015
Plotea no se que
@author: jew
"""
# Importo Pickle
import pickle
import cv2
# Numpy
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import binned_statistic
def dumpVar(name, var):
filehandler = open("vars/" + name + ".obj","wb")
pickle.dump(var,filehandler)
filehandler.close()
def importVar(name, var):
try:
file = open("vars/" + name + ".obj",'rb')
object_file = pickle.load(file)
file.close()
except:
object_file = var
return object_file
try:
dumpVar('_res', res[1][1])
dumpVar('_vel2', vg.pv.kp_dist_vect)
dumpVar('_kphist', vg.pv.kp_hist)
dumpVar('_ocu2', vg.pv.bf_vect)
dumpVar('_imhi', vg.im_hist)
dumpVar('_imbf', vg.im_bf)
dumpVar('_imbf2', vg.im_bf2)
dumpVar('_imframe', vg.im_frame)
dumpVar('_immask', vg.im_mask)
dumpVar('_tra', vg.im_transf)
dumpVar('_lab', vg.im_label)
dumpVar('_neti', vg.net_input)
dumpVar('_labels', vg.labels[0])
except:
ocu = importVar('_ocu','_ocu')
vel = importVar('_vel','_vel')
ocu2 = importVar('_ocu2','_ocu2')
vel2 = importVar('_vel2','_vel2')
tra = importVar('_tra','_tra')
imhi = importVar('_imhi','_imhi')
imbf = importVar('_imbf','_imbf')
imbf2 = importVar('_imbf2','_imbf2')
imframe = importVar('_imframe','_imframe')
immask = importVar('_immask','_immask')
resa = importVar('_res','_res')
lab = importVar('_lab','_lab')
neti = importVar('_neti','_neti')
labels = importVar('_labels','_labels')
kphist = importVar('_kphist','_kphist')
ocu = imframe.copy()
ocu[imbf2>0,2] = 255
ocu = cv2.cvtColor(ocu, cv2.COLOR_BGR2RGB)
tot = imframe.copy()
tot[immask>0,2] = 255
tot = cv2.cvtColor(tot, cv2.COLOR_BGR2RGB)
# plt.figure()
# plt.imshow(cv2.cvtColor(resa[a1:a2,a3:a4], cv2.COLOR_BGR2RGB))
# plt.figure()
# plt.imshow(cv2.cvtColor(tra, cv2.COLOR_BGR2RGB))
# plt.figure()
# plt.imshow(imhi[a1:a2,a3:a4])
# plt.figure()
# plt.imshow(imbf[a1:a2,a3:a4])
# plt.figure()
# plt.imshow(imbf2[a1:a2,a3:a4]>0)
plt.figure()
plt.imshow(ocu)
plt.figure()
plt.imshow(tot)
# means = [0,0,0,0]
# estan = [0,0,0,0]
# ind = np.arange(4)
# width = 0.25
# plt.figure()
# vel2 = np.vstack(velf[4000:10100])
# ocu2 = np.vstack(ocu[4000:10100])
# plt.plot(np.array(vel2)/110)
# plt.plot(np.array(ocu2)/15525)
# plt.xlim(0,len(vel2))
# plt.ylim(0,1.2)
# plt.legend(['Velocidad promedio normalizada','Llenado normalizado de la via'])
|
bsd-3-clause
|
etkirsch/scikit-learn
|
sklearn/manifold/tests/test_mds.py
|
324
|
1862
|
import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from sklearn.manifold import mds
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
|
bsd-3-clause
|
pepper-johnson/Erudition
|
Thesis/Mallet/scripts/mallet/distributions/local_distribution.py
|
1
|
4396
|
import os
import sys
import json
import datetime
import numpy as np
import pandas as pd
alpha = float(sys.argv[1])
number_of_topics = int(sys.argv[2])
input_directory = sys.argv[3]
output_directory = sys.argv[4]
global_distribution_file_path = sys.argv[5]
extra_stoplist_path = sys.argv[6]
def clean_term(term):
return term.replace('\n', '').strip()
def load_tokens(path):
with open(path, 'r') as file:
for line in file:
yield clean_term(line)
def is_in_dictionary(dictionary, term):
try:
_ = dictionary[term]
except:
return False
return True
def main():
## globals,
empty_distribution = [ 0.0 for _ in range(0, number_of_topics) ]
## pull stoplists,
extra_stoplist = list(load_tokens(extra_stoplist_path))
stoplist = list(load_tokens(r'C:\Users\dmpas\thesis\data\text\scripts\mallet\globals\stoplist.txt'))
stoplist_to_end_all_stoplists = {}
for term in extra_stoplist:
stoplist_to_end_all_stoplists[term] = 0
for term in stoplist:
stoplist_to_end_all_stoplists[term] = 0
## cache global distribution,
names = ['terms']
for k in range(0, number_of_topics):
names.append(k)
tcol = {
'terms': str
}
df_global = pd.read_csv(global_distribution_file_path, names=names, header=None, skiprows=1, index_col='terms', dtype=tcol)
df_global = df_global.reset_index()
df_global.terms = df_global.terms.fillna('null').astype('str')
df_global = df_global.set_index('terms')
## input documents,
for name in os.listdir(input_directory):
print(' * starting', name)
## local globals.
document_to_topic = dict([ ( k, float(0) ) for k in range(0, number_of_topics) ])
## build paths.
input_file = r'{}\{}'.format(input_directory, name)
clean_file_name = name.replace('.txt', '')
output_file = r'{}\{}.csv'.format(output_directory, clean_file_name)
output_counts_file = r'{}\{}_counts.csv'.format(output_directory, clean_file_name)
## setup "term" default distributions.
words_to_topic = [ ]
offset = (alpha / number_of_topics)
with open(input_file, 'r') as file:
for line in file:
term = clean_term(line)
if is_in_dictionary(stoplist_to_end_all_stoplists, term):
continue
distribution = df_global.loc[term].values.tolist()
for k in range(0, number_of_topics):
document_to_topic[k] += distribution[k]
total = np.sum(distribution) + alpha
distribution = (np.array(distribution) + offset) / total
assert np.sum(distribution) >= .99
words_to_topic.append((term, distribution))
if len(words_to_topic) == 0:
dir_ = r'C:\Users\dmpas\thesis\data\text\bitcoin\src\corpus\by_day\mallet\estimates'
estimates_dir_ = ['chibs', 'hm', 'is', 'lengths']
for file in estimates_dir_:
with open(r'{}\{}\{}'.format(dir_, file, name), 'w') as f_:
f_.write('0\n');
print(' * finished', name)
continue
## setup distribution, values will add up to 1,
df_distribution = pd.DataFrame(list(words_to_topic))
df_distribution.columns = [ 'term', 'distribution' ]
topic_columns = [ str(t) for t in range(0, number_of_topics) ]
for k in range(0, number_of_topics):
df_distribution[str(k)] = df_distribution.distribution.map(lambda i: i[k])
df_distribution = df_distribution.drop(columns=['distribution'])
## save file,
df_distribution.to_csv(output_file)
## build document to topic distribution,
total_words = np.sum(list(document_to_topic.values()))
if total_words != 0:
for topic in document_to_topic.keys():
document_to_topic[topic] /= total_words
name_without_extension = name.replace('.txt', '')
with open('{}/{}_doc_to_topic.json'.format(output_directory, name_without_extension), 'w') as file:
file.write(json.dumps(document_to_topic))
print(' * finished', name)
print()
if __name__ == "__main__":
main()
|
apache-2.0
|
tribilium/offset_piston_motion
|
offset_piston_motion.py
|
1
|
2507
|
"""
Offset Piston Motion Animation using Matplotlib.
Animation designed to run on Raspberry Pi 2
Author: Peter D. Kazarinoff, 2016
Tribilium Engineering Solutions www.tribilium.com
"""
#import necessary packages
import numpy as np
from numpy import pi, sin, cos, sqrt
import matplotlib.pyplot as plt
import matplotlib.animation as animation
#input parameters
r = 1.0 # crank radius
l = 4.0 # connecting rod length
d = 0.5; # offset distance
rot_num = 6 # number of crank rotations
increment = 0.1 # angle incremement
#create the angle array, where the last angle is the number of rotations*2*pi
angle_minus_last = np.arange(0,rot_num*2*pi,increment)
angle = np.append(angle_minus_last, rot_num*2*pi)
X1=np.zeros(len(angle)) # array of crank x-positions: Point 1
Y1=np.zeros(len(angle)) # array of crank y-positions: Point 1
X2=np.zeros(len(angle)) # array of rod x-positions: Point 2
Y2=np.zeros(len(angle)) # array of rod y-positions: Point 2
#find the crank and connecting rod positions for each angle
for index,angle in enumerate(angle, start=0):
theta=angle
x1 = r*cos(theta) # x-cooridnate of the crank: Point 1
y1 = r*sin(theta) # y-cooridnate of the crank: Point 1
x2 = d # x-coordinate of the rod: Point 2
# y-coordinate of the rod: Point 2
y2 = r*sin(theta) + sqrt( l**2 - (r*cos(theta)-d)**2 )
X1[index]=x1 #grab the crankshaft x-position
Y1[index]=y1 #grab the crankshaft y-position
X2[index]=x2 #grab the connecting rod x-position
Y2[index]=y2 #grab the connecting rod y-position
#print piston_height
#axis ([-r-l r+l -3*r 3*r+l]); %set the plot size
#pause (speed); %wait before next iteration
# set up the figure and subplot
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal', autoscale_on=False, xlim=(-4,4), ylim=(-2,6))
ax.grid()
line, = ax.plot([], [], 'o-', lw=5, color='r')
# initialization function
def init():
line.set_data([], [])
return line,
# animation function
def animate(i):
x_points = [0, X1[i], X2[i]]
y_points = [0, Y1[i], Y2[i]]
line.set_data(x_points, y_points)
return line,
# call the animation
ani = animation.FuncAnimation(fig, animate, init_func=init, frames=len(X1), interval=40, blit=True, repeat=False)
## to save animation, uncomment the line below:
## ani.save('offset_piston_motion_animation.mp4', fps=30, extra_args=['-vcodec', 'libx264'])
#show the animation
plt.show()
|
gpl-3.0
|
buguen/pylayers
|
pylayers/antprop/signature.py
|
1
|
118968
|
#-*- coding:Utf-8 -*-
"""
Class Signatures
================
.. autosummary::
:toctree: generated/
Signatures.__init__
Signatures.__repr__
Signatures.__len__
Signatures.num
Signatures.info
Signatures.saveh5
Signatures.loadh5
Signatures._saveh5
Signatures._loadh5
Signatures.load
Signatures.save
Signatures.sp
Signatures.calsig
Signatures.exist
Signatures.dido
Signatures.run
Signatures.meta
Signatures.lineofcycle
Signatures.cones
Signatures.unfold
Signatures.show
Signatures.showi
Signatures.rays
Signatures.raysv
Signatures.image
Signatures.image2
Class Signature
===============
.. autosummary::
:toctree: generated/
Signature.__init__
Signature.__repr__
Signature.info
Signature.split
Signature.ev2
Signature.evf
Signature.ev
Signature.unfold
Signature.evtx
Signature.image
Signature.backtrace
Signature.sig2beam
Signature.sig2ray
Utility functions
=================
.. autosummary::
:toctree: generated/
showsig
gidl
frontline
edgeout2
edgeout
"""
import doctest
import numpy as np
#import scipy as sp
import scipy.linalg as la
import pdb
import h5py
import copy
import time
import pickle
import logging
import networkx as nx
import shapely.geometry as shg
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import pylayers.gis.layout as layout
import pylayers.util.geomutil as geu
import pylayers.util.cone as cone
#import pylayers.util.graphutil as gph
import pylayers.util.pyutil as pyu
import pylayers.util.plotutil as plu
from pylayers.antprop.rays import Rays
from pylayers.util.project import *
import heapq
import shapely.geometry as sh
import shapely.ops as sho
from tqdm import tqdm
#from numba import autojit
def plot_lines(ax, ob, color = []):
"""
Parameters
----------
ax :
ob :
"""
from descartes.patch import PolygonPatch
for ii,line in enumerate(ob):
if color == []:
if ii ==0 :
c ='g'
elif ii == len(ob)-1:
c ='r'
else:
c= 'k'
else:
c=color
x, y = line.xy
ax.plot(x, y, color=c, alpha=0.7, linewidth=3, solid_capstyle='round', zorder=2)
return ax
def plot_poly(ax, ob, color = []):
""" plot polygon
Parameters
----------
ax :
ob :
"""
from descartes.patch import PolygonPatch
for ii,poly in enumerate(ob):
pp = PolygonPatch(poly,alpha=0.3)
ax.add_patch(pp)
return ax
def showsig(L,s,tx=[],rx=[]):
""" show signature
Parameters
----------
L : Layout
s :
tx :
rx :
"""
L.display['thin']=True
fig,ax = L.showGs()
L.display['thin']=False
L.display['edlabel']=True
L.showGs(fig=fig,ax=ax,edlist=s,width=4)
if tx !=[]:
plt.plot(tx[0],tx[1],'x')
if rx !=[]:
plt.plot(rx[0],rx[1],'+')
plt.title(str(s))
plt.show()
L.display['edlabel']=False
def gidl(g):
""" gi without diffraction
Returns
-------
gr : A graph
"""
edlist=[]
pos={}
for n in g.nodes():
if len(n)>1:
edlist.append(n)
gr = g.subgraph(edlist)
for k in gr.edge:
for k1 in gr.edge[k]:
ke = gr.edge[k][k1]['output'].keys()
va = gr.edge[k][k1]['output'].values()
keva = zip(ke,va)
keva_valid = [ x for x in keva if len(x[0])>1]
gr.edge[k][k1]['output']=dict(keva_valid)
dpos = {k:g.pos[k] for k in edlist}
gr.pos=dpos
return(gr)
def shLtmp(L):
seg_connect = {x:L.Gs.edge[x].keys() for x in L.Gs.nodes() if x >0}
dpts = {x[0]:(L.Gs.pos[x[1][0]],L.Gs.pos[x[1][1]]) for x in seg_connect.items() }
L._shseg = {p[0]:sh.LineString(p[1]) for p in dpts.items()}
def showsig2(lsig,L,tahe):
if isinstance(lsig,list):
lsig = np.array([(i[0],len(i)) for i in lsig])
for k in lsig:
k0 = k[0]
k1 = k[1]
if k0>0:
npt = L.Gs[k0].keys()
pta = np.array(L.Gs.pos[npt[0]])
phe = np.array(L.Gs.pos[npt[1]])
if k1==2:
plu.displot(pta.reshape(2,1),phe.reshape(2,1),color='r',linewidth=2)
if k1 ==3:
plu.displot(pta.reshape(2,1),phe.reshape(2,1),color='g',linewidth=2)
for th in tahe:
ta = th[0]
he = th[1]
plu.displot(ta.reshape(2,1),he.reshape(2,1),color='k',linewidth=1)
tahe = np.array(tahe) # Nseg x tahe x xy
pta = tahe[:,0,:].T #2 x Nseg
phe = tahe[:,1,:].T # 2 x Nseg
seq = lsig[:,0]
if not (geu.ccw(pta[:,0],phe[:,0],phe[:,-1]) ^
geu.ccw(phe[:,0],phe[:,-1],pta[:,-1]) ):
vr = ( pta[:,0],phe[:,-1])
vl = ( phe[:,0],pta[:,-1])
# twisted = True
lef = sh.LineString((pta[:,0],phe[:,-1]))
rig = sh.LineString((phe[:,0],pta[:,-1]))
else:
vr = ( pta[:,0],pta[:,-1])
vl = ( phe[:,0],phe[:,-1])
lef = sh.LineString((pta[:,0],pta[:,-1]))
rig = sh.LineString((phe[:,0],phe[:,-1]))
plt.ion()
plt.gcf()
#L.showG('s',labels=True)
lines = [L._shseg[seq[0]]]
plt.title(str(lsig))
plot_lines(ax=plt.gca(),ob=lines)
plot_lines(ax=plt.gca(),ob=[lef],color='g')
plot_lines(ax=plt.gca(),ob=[rig],color='r')
plt.scatter(pta[0,:],pta[1,:],marker='d',s=70,label='tail')
plt.scatter(phe[0,:],phe[1,:],marker='s',s=70,label='head')
#plu.displot(vl[0].reshape(2,1),vl[1].reshape(2,1),arrow=True)
#plu.displot(vr[0].reshape(2,1),vr[1].reshape(2,1),arrow=True)
plt.axis('auto')
plt.legend()
#@profile
def valid(lsig,L,tahe=[]):
"""
Check if a signature is valid.
if a segment of a given signature is not in or touches the polygon
described by the 1st and last segment, the signature is not valid
Parameters
----------
lsig : list of tuple from run |signatures
L : layout
tahe :
lensig , ta|he , x,y
Returns
-------
inside : boolean
is the signature valid ?
"""
lensi = len(lsig)
if lensi<=3:
return True
# DEBUG
# if lensi == 4:
# if np.all(lsig == np.array([[ 5, 2, 67, 58],[ 2, 2, 3, 2]]).T):
# import ipdb
# ipdb.set_trace()
# ensure compatibility with Signature.run where
# lsig is a list of tuple
if isinstance(lsig,list):
lsig = np.array([(i[0],len(i)) for i in lsig])
pta = np.empty((2,lensi))
phe = np.empty((2,lensi))
seq = lsig[:,0]
# upos = np.where(seq>0)[0]
# uneg = np.where(seq<0)[0]
# tahep = L.seg2pts(seq[upos])
# tahen = np.array([L.Gs.pos[i] for i in seq[uneg]]).T
# tahen = np.vstack((tahen,tahen))
# tahe = np.empty((4,lensi))
# tahe[:,upos]=tahep
# try:
# tahe[:,uneg]=tahen
# except:
# pass
# pts = [k for i in seq for k in [L.Gs[i].keys()[0],L.Gs[i].keys()[1]]]
# if tahe ==[]:
# print 'run tahe\n',np.array(tahe)
# if tahe == []:
# pts = [L.Gs[i].keys() for i in seq]
# tahe = np.array([[L.Gs.pos[p[0]],L.Gs.pos[p[1]]] for p in pts])
# pta[:,0] = tahe[0,0,:]
# phe[:,0] = tahe[0,1,:]
# typ = lsig[:,1]
# mirror=[]
# # lines = [L._shseg[seq[0]]]
# for i in range(1,lensi):
# # pam = pa[:,i].reshape(2,1)
# # pbm = pb[:,i].reshape(2,1)
# pam = tahe[i,0,:].reshape(2,1)
# pbm = tahe[i,1,:].reshape(2,1)
# if typ[i] == 2: # R
# for m in mirror:
# pam = geu.mirror(pam,pta[:,m],phe[:,m])
# pbm = geu.mirror(pbm,pta[:,m],phe[:,m])
# pta[:,i] = pam.reshape(2)
# phe[:,i] = pbm.reshape(2)
# mirror.append(i)
# elif typ[i] == 3 : # T
# for m in mirror:
# pam = geu.mirror(pam,pta[:,m],phe[:,m])
# pbm = geu.mirror(pbm,pta[:,m],phe[:,m])
# pta[:,i] = pam.reshape(2)
# phe[:,i] = pbm.reshape(2)
# elif typ[i] == 1 : # D
# pta[:,i] = pam.reshape(2)
# phe[:,i] = pbm.reshape(2)
# else:
tahe = np.array(tahe) # Nseg x tahe x xy
pta = tahe[:,0,:].T #2 x Nseg
phe = tahe[:,1,:].T # 2 x Nseg
# ### ONLY FOR TEST TO BE DELETED
# pts = [L.Gs[i].keys() for i in seq]
# tahetest = np.array([[L.Gs.pos[p[0]],L.Gs.pos[p[1]]] for p in pts])
# ptat = np.empty((2,lensi))
# phet = np.empty((2,lensi))
# ptat[:,0] = tahetest[0,0,:]
# phet[:,0] = tahetest[0,1,:]
# typ = lsig[:,1]
# mirror=[]
#lines = [L._shseg[seq[0]]]
# for i in range(1,lensi):
# # pam = pa[:,i].reshape(2,1)
# # pbm = pb[:,i].reshape(2,1)
# pam = tahetest[i,0,:].reshape(2,1)
# pbm = tahetest[i,1,:].reshape(2,1)
# if typ[i] == 2: # R
# for m in mirror:
# pam = geu.mirror(pam,ptat[:,m],phet[:,m])
# pbm = geu.mirror(pbm,ptat[:,m],phet[:,m])
# ptat[:,i] = pam.reshape(2)
# phet[:,i] = pbm.reshape(2)
# mirror.append(i)
# elif typ[i] == 3 : # T
# for m in mirror:
# pam = geu.mirror(pam,ptat[:,m],phet[:,m])
# pbm = geu.mirror(pbm,ptat[:,m],phet[:,m])
# ptat[:,i] = pam.reshape(2)
# phet[:,i] = pbm.reshape(2)
# elif typ[i] == 1 : # D
# ptat[:,i] = pam.reshape(2)
# phet[:,i] = pbm.reshape(2)
# tahetest = np.dstack((ptat.T,phet.T)).swapaxes(1,2)
# if np.sum(tahe-tahetest) != 0:
# import ipdb
# ipdb.set_trace()
# determine the 2 side of the polygon ( top/bottom = tahe[0]/tahe[-1])
#vl and vr are 2 director vector lying on the polygon side.
if not (geu.ccw(pta[:,0],phe[:,0],phe[:,-1]) ^
geu.ccw(phe[:,0],phe[:,-1],pta[:,-1]) ):
vr = ( pta[:,0],pta[:,-1])
vl = ( phe[:,0],phe[:,-1])
# vr = ( pta[:,0],phe[:,-1])
# vl = ( phe[:,0],pta[:,-1])
# twisted = True
#lef = sh.LineString((pta[:,0],pta[:,-1]))
#rig = sh.LineString((phe[:,0],phe[:,-1]))
else:
vr = ( pta[:,0], phe[:,-1])
vl = ( phe[:,0],pta[:,-1])
# vr = ( pta[:,0],pta[:,-1])
# vl = ( phe[:,0],phe[:,-1])
# twisted = False
#lef = sh.LineString((pta[:,0],phe[:,-1]))
#rig = sh.LineString((pta[:,-1],phe[:,0]))
# looking situation where Tail and head are not inside the polygon
# => both tahe are left of vr and vl
#=> both tahe are right of vr and vl
lta = geu.isleft(pta[:,1:-1],vl[0][:,None],vl[1][:,None])
rta = geu.isleft(pta[:,1:-1],vr[0][:,None],vr[1][:,None])
lhe = geu.isleft(phe[:,1:-1],vl[0][:,None],vl[1][:,None])
rhe = geu.isleft(phe[:,1:-1],vr[0][:,None],vr[1][:,None])
out = (lta & lhe ) | (~rta & ~rhe)
inside = ~out
# #debug
# plt.ion()
# plt.gcf()
# #plt.title(str(cond))
# #Ok plot_lines(ax=plt.gca(),ob=lines)
# plot_lines(ax=plt.gca(),ob=[lef],color='g')
# plot_lines(ax=plt.gca(),ob=[rig],color='r')
# plt.scatter(pta[0,:],pta[1,:],marker='d',s=70,label='tail')
# plt.scatter(phe[0,:],phe[1,:],marker='s',s=70,label='head')
# plu.displot(vl[0].reshape(2,1),vl[1].reshape(2,1),arrow=True)
# plu.displot(vr[0].reshape(2,1),vr[1].reshape(2,1),arrow=True)
# plt.legend()
return np.all(inside)
class Signatures(PyLayers,dict):
""" set of Signature given 2 Gt cycle (convex) indices
Attributes
----------
L : gis.Layout
source : int
source convex cycle
target : int
target convex cycle
"""
def __init__(self,L,source,target,cutoff=3,threshold = 0.6):
""" object constructor
Parameters
----------
L : Layout
dump : int
source : int
cycle number
target : int
cycle index
cutoff : int
limiting depth level in graph exploration (default 3)
A signature ia a dict of arrays
The array is an interleaving between nstr and type of interaction
typeInt = 1,2,3 (extremity,diffraction,reflexion,transmission)
Si[1]
np.array([5,2,19,2,26,2,72,2])
"""
self.L = L
self.dump = -1
self.source = source
self.target = target
self.cutoff = cutoff
self.threshold = threshold
self.ratio = {}
self.filename = self.L._filename.split('.')[0] +'_' + str(self.source) +'_' + str(self.target) +'_' + str(self.cutoff) +'.sig'
def __repr__(self):
def fun1(x):
if x==1:
return('R')
if x==2:
return('T')
if x==3:
return('D')
size = {}
s = self.__class__.__name__ + '\n' + '----------'+'\n'
#s = s + str(self.__sizeof__())+'\n'
for k in self:
size[k] = len(self[k])/2
s = s + 'from cycle : '+ str(self.source) + ' to cycle ' + str(self.target)+'\n'
if self.dump==-1:
ldump = self.keys()
else:
ldump = self.dump
for k in ldump:
s = s + str(k) + ' : ' + str(size[k]) + '\n'
a = np.swapaxes(self[k].reshape(size[k],2,k),0,2)
# nl x 2 x nsig
for l in np.arange(a.shape[2]):
for i in range(k):
if i==k-1:
s = s + '('+str(a[i,0,l])+','+str(a[i,1,l])+')'
else:
s = s + '('+str(a[i,0,l])+','+str(a[i,1,l])+'),'
s = s+'\n'
return(s)
def __len__(self):
nsig = 0
for k in self:
size = len(self[k])/2
nsig += size
return(nsig)
def compl(self,lint,L):
""" completion from lint
Parameters
----------
lint : list
list of interactions
Examples
--------
>>> Si.compl([(6220,3),(6262,3),(6241,3)],DL.L)
"""
# all group of interactions
for k in self:
if k > len(lint):
Si = self[k]
Ns,Nb = Si.shape
# all signatures form a group of interactions
for l in range(Ns/2):
# all interactions
b1 = True
for i1,it in enumerate(lint):
if ((Si[2*l,i1] == it[0]) and
(Si[2*l+1,i1] == it[1])):
pass
else:
b1 = False
if b1:
sig = Si[2*l:2*l+2,:]
sigi = self.sig2inter(L,sig)
print(k,l,' :',sigi)
# all
def sig2inter(self,L,lsi=[]):
''' convert signature to corresponding list of interactions in Gi
Paramaters:
----------
L : Layout
lsi : nd.array
signature (2xnb_sig,sig_length)
Examples:
---------
>>> lsi = DL.Si[3]
>>> DL.Si.sig2inter(DL.L,lsi)
"""
'''
assert L.isbuilt, AttributeError('Layout is not built')
assert len(lsi)%2==0, AttributeError('Incorrect signature(s) shape')
tlinter = []
for uu in range(0,len(lsi),2):
si = lsi[uu:uu+2,:]
lsig = si.shape[1]
linter = []
for k in range(lsig):
# nstr : seg or points
nstr = si[0,k]
typ = si[1,k]
# cycles connected to seg or point
seg_cy = copy.deepcopy(L.Gs.node[nstr]['ncycles'])
if k == 0:
cy0 = self.source
lcy0 =[cy0]
if (typ==3) or (typ==2):
cy0 = list(set(seg_cy).intersection(set(lcy0)))[0]
cy1 = [x for x in seg_cy if x!= cy0 ][0]
if k == (lsig -1):
cy1 = self.target
if typ == 1:
inter = (nstr,)
lcy0 = L.Gs.node[nstr]['ncycles']
elif typ == 2:
inter = (nstr,cy0)
elif typ == 3:
inter = (nstr,cy0,cy1)
# changing cycle
lcy0 = [cy1]
linter.append(inter)
tlinter.append(linter)
if len(lsi) == 2:
tlinter=tlinter[0]
return tlinter
def sig2prob(self,L,lsi):
""" get signatures probability
L : Layout
lsi : nd.array
signature (2xnb_sig,sig_length)
Returns
-------
tlproba : list (nb_sig,sig_length-2)
output proba of each triplet of interaction
"""
slsi = lsi.shape[1]
assert L.isbuilt, AttributeError('Layout is not built')
assert hasattr(L,'Gi'), AttributeError('Layout has not Gi Graph')
assert L.Gi.size != 0, AttributeError('Gi Graph is empty')
assert len(lsi)%2==0, AttributeError('Incorrect signature(s) shape')
assert slsi>=3, AttributeError('Proba available for signature with at least 3 interacitons')
linter = self.sig2inter(L,lsi)
if len(lsi) == 2:
linter=[linter]
tlproba = []
for inter in linter:
lproba = []
for k in range(slsi-2):
proba = L.Gi[inter[k]][inter[k+1]]['output'][inter[k+2]]
lproba.append(proba)
tlproba.append(lproba)
return tlproba
def num(self):
""" determine the number of signatures
"""
self.nsig = 0
self.nint = 0
for k in self:
size = len(self[k])/2
self.nsig += size
self.nint += size*k
def info(self):
# print "Signatures for scenario defined by :"
# print "Layout"
# print "======"
# L = self.L.info()
# print "================================"
# print "source : ", self.source
# print "target : ", self.target
size = {}
print self.__class__.__name__ + '\n' + '----------'+'\n'
#s = s + str(self.__sizeof__())+'\n'
for k in self:
size[k] = len(self[k])/2
print 'from cycle : '+ str(self.source) + ' to cycle ' + str(self.target)+'\n'
pyu.printout('Reflection',pyu.BLUE)
print ' '
pyu.printout('Transmission',pyu.GREEN)
print ' '
pyu.printout('Diffraction',pyu.RED)
print ' \n'
for k in self:
print str(k) + ' : ' + str(size[k])
a = np.swapaxes(self[k].reshape(size[k],2,k),0,2)
# nl x 2 x nsig
for i in range(k):
nstr=a[i,0,:]
typ=a[i,1,:]
print '[',
for n,t in zip(nstr,typ):
if t==1:
pyu.printout(str(n),pyu.BLUE)
if t==2:
pyu.printout(str(n),pyu.GREEN)
if t==3:
pyu.printout(str(n),pyu.RED)
print ']'
print'\n'
# s = s + ' '+ str(a[i,0,:]) + '\n'
# s = s + ' '+ str(a[i,1,:]) + '\n'
def check(self):
""" check signature
Returns
-------
OK : np.array
KO : np.array
"""
OK = Signatures(self.L,self.target,self.source)
KO = Signatures(self.L,self.target,self.source)
for i in self:
sigs = self[i]
for s in range(len(sigs)/2):
sig = sigs[2*s:2*s+2,:]
ok = valid(sig.T,self.L)
if ok :
try :
OK[i]=np.vstack((OK[i],sig))
except:
OK[i]=[]
OK[i]=sig
pass
else :
try :
KO[i]=np.vstack((KO[i],sig))
except:
KO[i]=[]
KO[i]=sig
pass
return OK,KO
def saveh5(self):
""" save signatures in hdf5 format
"""
filename=pyu.getlong(self.filename+'.h5',pstruc['DIRSIG'])
f=h5py.File(filename,'w')
# try/except to avoid loosing the h5 file if
# read/write error
try:
f.attrs['L']=self.L._filename
f.attrs['source']=self.source
f.attrs['target']=self.target
f.attrs['cutoff']=self.cutoff
for k in self.keys():
f.create_dataset(str(k),shape=np.shape(self[k]),data=self[k])
f.close()
except:
f.close()
raise NameError('Signature: issue when writting h5py file')
def loadh5(self,filename=[]):
""" load signatures hdf5 format
"""
if filename == []:
_filename = self.filename
else :
_filename = filename
filename=pyu.getlong(_filename+'.h5',pstruc['DIRSIG'])
# try/except to avoid loosing the h5 file if
# read/write error
try:
f=h5py.File(filename,'r')
for k in f.keys():
self.update({eval(k):f[k][:]})
f.close()
except:
f.close()
raise NameError('Signature: issue when reading h5py file')
_fileL=pyu.getshort(filename).split('_')[0]+'.ini'
self.L=layout.Layout(_fileL)
try:
self.L.dumpr()
except:
self.L.build()
self.L.dumpw()
def _saveh5(self,filenameh5,grpname):
""" Save in hdf5 compliant with Links
Parameters
----------
filenameh5
hrpname
Notes
-----
"""
filename=pyu.getlong(filenameh5,pstruc['DIRLNK'])
# if grpname == '':
# grpname = str(self.source) +'_'+str(self.target) +'_'+ str(self.cutoff)
try:
# file management
fh5=h5py.File(filename,'a')
if not grpname in fh5['sig'].keys():
fh5['sig'].create_group(grpname)
else :
raise NameError('sig/'+grpname +'already exists in '+filenameh5)
f=fh5['sig/'+grpname]
# write data
f.attrs['L']=self.L._filename
f.attrs['source']=self.source
f.attrs['target']=self.target
f.attrs['cutoff']=self.cutoff
f.attrs['threshold']=self.threshold
f.create_group('ratio')
f.create_group('sig')
for k in self.keys():
f['sig'].create_dataset(str(k),shape=np.shape(self[k]),data=self[k])
f['ratio'].create_dataset(str(k),shape=np.shape(self.ratio[k]),data=self.ratio[k])
fh5.close()
except:
fh5.close()
raise NameError('Signature: issue when writting h5py file')
def _loadh5(self,filenameh5,grpname,**kwargs):
""" load signatures in hdf5 format compliant with class Links
Parameters
----------
filenameh5 : string
filename of the h5py file (from Links Class)
grpname : string
groupname of the h5py file (from Links Class)
kwargs
may contain a L: layout object
if L = [] the layout is loaded from the layout name stored
into the h5 file
if L = Layout the layout passed in arg is used
See Also
--------
pylayers.simul.links
"""
filename=pyu.getlong(filenameh5,pstruc['DIRLNK'])
# if grpname =='':
# grpname = str(self.source) +'_'+str(self.target) +'_'+ str(self.cutoff)
# try/except to avoid loosing the h5 file if
# read/write error
try:
fh5=h5py.File(filename,'r')
f=fh5['sig/'+grpname]
# compliant with new h5 format:
if 'sig' in f.keys():
for k in f['sig'].keys():
self.update({eval(k):f['sig'][k][:]})
self.ratio.update({eval(k):f['ratio'][k][:]})
# old h5 format
else:
for k in f.keys():
self.update({eval(k):f[k][:]})
Lname=f.attrs['L']
self.cutoff = f.attrs['cutoff']
if 'threshold' in f.attrs.keys():
self.threshold = f.attrs['threshold']
# ensure backward compatibility
else:
# find threshold
th = np.min([np.min(self.ratio[x])
for x in self.ratio])
self.threshold = th.round(decimals=2)
fh5.close()
except:
fh5.close()
raise NameError('Signature: issue when reading h5py file')
if kwargs.has_key('L'):
self.L=kwargs['L']
else:
self.L=layout.Layout(Lname)
try:
self.L.dumpr()
except:
self.L.build()
self.L.dumpw()
def save(self):
""" save signatures
"""
L=copy.deepcopy(self.L)
del(self.L)
filename=pyu.getlong(self.filename+'.h5',pstruc['DIRSIG'])
with open(filename, 'wb') as handle:
pickle.dump(self, handle)
self.L=L
def load(self,filename=[]):
""" load signatures
"""
if filename == []:
_filename = self.filename
else :
_filename = filename
filename=pyu.getlong(_filename,pstruc['DIRSIG'])
try:
handle=open(filename, 'rb')
sitmp = pickle.load(handle)
except:
raise NameError(filename +' does not exist')
# to load a dictionary, use update
self.update(sitmp)
_fileL=pyu.getshort(filename).split('_')[0]+'.ini'
self.L=layout.Layout(_fileL)
try:
self.L.dumpr()
except:
self.L.build()
self.L.dumpw()
def sp(self,G, source, target, cutoff=None):
""" algorithm for signature determination
Parameters
----------
G : Graph
source : tuple or int
target : tuple or int
cutoff : int
See Also
--------
pylayers.antprop.signature.run3
"""
if cutoff < 1:
return
visited = [source]
stack = [iter(G[source])]
while stack:
children = stack[-1]
child = next(children, None)
if child is None:
stack.pop()
visited.pop()
elif len(visited) < cutoff:
if child == target:
for i in range(len(self.ds[source])):
s=self.ds[target][i] + visited
self.ds[target].append(s)
# yield visited +[target]
elif child not in visited:
visited.append(child)
stack.append(iter(G[child]))
else: #len(visited) == cutoff:
if child == target or target in children:
for i in range(len(self.ds[source])):
s=self.ds[target][i] + visited
self.ds[target].append(s)
stack.pop()
visited.pop()
def calsig(self,G,dia={},cutoff=None):
""" calculates signature
Parameters
----------
G : graph
dia : dictionnary of interactions
cutoff : integer
"""
if cutoff < 1:
return
di=copy.deepcopy(dia)
source = 'Tx'
target = 'Rx'
d={}
visited = [source]
stack = [iter(G[source])]
out=[]
while stack:
# pdb.set_trace()
children = stack[-1]
child = next(children, None)
if child is None:
stack.pop()
visited.pop()
if len(out) !=0:
out.pop()
out.pop()
elif len(visited) < cutoff:
if child == target:
lot = len(out)
try:
d.update({lot:d[lot]+(out)})
except:
d[lot]=[]
d.update({lot:d[lot]+(out)})
# yield visited + [target]
elif child not in visited:
visited.append(child)
out.extend(di[child])
stack.append(iter(G[child]))
else: #len(visited) == cutoff:
if child == target or target in children:
# yield visited + [target]
lot = len(out)
try:
d.update({lot:d[lot]+(out)})
except:
d[lot]=[]
d.update({lot:d[lot]+(out)})
stack.pop()
visited.pop()
if len(out) !=0:
out.pop()
out.pop()
return d
def exist(self,seq):
""" verifies if seq exists in signatures
Parameters
----------
seq : list of tuple
[(2,2),(5,3),(7,2)]
1 : Diffraction
2 : Reflexion
3 : Diffraction
Returns
-------
Examples
--------
>>> DL=DLink()
>>> DL.eval()
>>> seq = [(2,3)] # transmission through segment 2
>>> DL.Si.exist(seq)
"""
# Number of interactions
N = len(seq)
# signatures with N interaction
sig = self[N]
# Number signature with N interaction
Nsig = sig.shape[0]/2
nstr = sig[::2,:]
typ = sig[1::2,:]
# List of signat
lsig = []
for k in range(Nsig):
lint = []
for l in range(N):
lint.append((nstr[k,l],typ[k,l]))
lsig.append(lint)
if seq in lsig:
return True
else:
return False
def run(self,**kwargs):
""" get signatures (in one list of arrays) between tx and rx
Parameters
----------
cutoff : int
limit the exploration of all_simple_path
bt : boolean
backtrace (allow to visit already visited nodes in simple path algorithm)
progress : boolean
display the time passed in the loop
diffraction : boolean
activate diffraction
threshold : float
for reducing calculation time
Returns
-------
siglist : numpy.ndarray
See Also
--------
pylayers.simul.link.Dlink.eval
pylayers.antprop.signature.Signatures.propath2
pylayers.antprop.signature.Signatures.procone2
"""
defaults = {'cutoff' : 2,
'threshold':0.1,
'nD':1,
'nR':10,
'nT':10,
'bt' : True,
'progress': True,
'diffraction' : True,
'animation' : False
}
selv.cpt = 0
for k in defaults:
if k not in kwargs:
kwargs[k] = defaults[k]
self.cutoff = kwargs['cutoff']
if 'threshold' not in kwargs:
kwargs['threshold'] = self.threshold
else:
self.threshold=kwargs['threshold']
nD = kwargs['nD']
nT = kwargs['nT']
nR = kwargs['nR']
bt = kwargs['bt']
progress = kwargs['progress']
diffraction = kwargs['diffraction']
animation = kwargs['animation']
self.filename = self.L._filename.split('.')[0] +'_' + str(self.source) +'_' + str(self.target) +'_' + str(self.cutoff) +'.sig'
#
# AIR : editable AIR separation
# _AIR : constructed AIR separation
#
lair = self.L.name['AIR']+self.L.name['_AIR']
# list of interactions visible from source
lisR,lisT,lisD = self.L.intercy(self.source,typ='source')
if diffraction:
lis = lisT + lisR + lisD
else:
lis = lisT + lisR
# list of interactions visible from target
litR,litT,litD = self.L.intercy(self.target,typ='target')
if diffraction:
lit = litT + litR + litD
else:
lit = litT + litR
#pdb.set_trace()
#print "source,lis :",self.source,lis
#print "target,lit :",self.target,lit
# for u in lit:
# print u
# print "-------------"
Gi = self.L.Gi
Gi.pos = self.L.Gi.pos
#
# remove diffractions from Gi
#
if not diffraction:
Gi = gidl(Gi)
# initialize dout dictionnary
dout = {}
# progresss stuff...
lmax = len(lis)*len(lit)
pe = 0
tic = time.time()
tic0 = tic
#for interaction source in list of source interactions
bvisu = False
# signature counter
cptsig = 0
if animation:
fig,ax = self.L.showG('s',aw=1)
ax.plot(self.L.Gt.pos[self.source][0],self.L.Gt.pos[self.source][1],'ob')
ax.plot(self.L.Gt.pos[self.target][0],self.L.Gt.pos[self.target][1],'or')
#
# Loop over all interactions seen from the source
#
# us : loop counter
# s : interaction tuple
# s[0] : point (<0) or segment (>0)a
# pts : list of neighbour nodes from s[0]
# tahe : segment extremities or point coordinates (repeated twice)
lhash = []
if progress :
pbar = tqdm(total=100,desc='Signatures')
for us,s in enumerate(lis):
if progress:
pbar.update(100./(1.*len(lis)))
if s[0]>0:
pts = self.L.Gs[s[0]].keys()
tahe = [np.array([self.L.Gs.pos[pts[0]],self.L.Gs.pos[pts[1]]])]
else:
tahe = [np.array([self.L.Gs.pos[s[0]],self.L.Gs.pos[s[0]]])]
# R is a list which contains reflexion matrices (Sn) and translation matrices(vn)
# for interaction mirroring
# R=[[S0,v0],[S1,v1],...]
R = [(np.eye(2),np.array([0,0]))]
# initialize visited list sequence with the first intercation s
visited = [s]
# if
# + s is in target interaction list
# or
# + arrival cycle is equal to target cycle
# then stack a new signature in self[len(typ)]
#
# TODO : It concerns self[1] : only one interaction (i.e several single reflection or diffraction)
if (s in lit) or (s[-1]==self.target):
anstr = np.array(map(lambda x: x[0],visited))
typ = np.array(map(lambda x: len(x),visited))
assert(len(typ)==1)
try:
self[len(typ)] = np.vstack((self[len(typ)],anstr,typ))
self.ratio[len(typ)] = np.append(self.ratio[len(typ)],1.)
except:
self[len(typ)] = np.vstack((anstr,typ))
self.ratio[len(typ)] = np.array([1.])
# update signature counter
cptsig +=1
# stack is a list of iterators
#
#
stack = [iter(Gi[s])]
# air walls do not intervene in the number of transmission (cutoff criteria)
# lawp is the list of airwall position in visited sequence
# handle the case of the first segment which can be an airwall
#
if len(s)==3:
nseg = s[0]
if ((self.L.Gs.node[nseg]['name']=='_AIR') or
(self.L.Gs.node[nseg]['name']=='AIR')):
lawp = [1]
else:
lawp = [0]
else:
lawp = [0]
# while the stack of iterators is not void
cpt = 0
while stack: #
# iter_on_interactions is the last iterator in the stack
iter_on_interactions = stack[-1]
# next interaction child
interaction = next(iter_on_interactions, None)
#print visited
#if ((visited ==[(6236,74,91),(-213,)]) and (interaction==(-1002,))):
# print interaction
# pdb.set_trace()
#if (visited ==[(6236,74,91),(-213,),(6248,99,111)]):
#if (visited ==[(6236,74,91),(-213,),(6248,99,111),(6287,111,118)]):
#pdb.set_trace()
# import ipdb
# cond1 : there is no more interactions
# continue if True
cond1 = not(interaction is None)
# cond2 : enable reverberation
# interaction has not been visited yet
# or
# bt : True (allow reentrance) (unconditionnaly)
# continue if True
#cond2 = (interaction in visited) and bt (old)
cond2 = not (interaction in visited) or bt
# cond3 : test the cutoff condition not get to the limit
# continue if True
cond3 = not(len(visited) > (self.cutoff + sum(lawp)))
uD = [ k for k in range(len(visited)) if len(visited[k])==1 ]
uR = [ k for k in range(len(visited)) if len(visited[k])==2 ]
uT = [ k for k in range(len(visited)) if len(visited[k])==3 ]
if cond1:
condD = True
condR = True
condT = True
if ((len(interaction)==1) and (len(uD)==nD)):
condD = False
if ((len(interaction)==2) and (len(uR)==nR)):
condR = False
if ((len(interaction)==3) and (len(uT)==nT)):
condT = False
#
# animation
#
if animation :
cpt = cpt+1
edge=zip(visited[:-1],visited[1:])
N = nx.draw_networkx_nodes(Gi,pos=Gi.pos,
nodelist=visited,labels={},
node_size=15,ax=ax,fig=fig)
E = nx.draw_networkx_edges(Gi,pos=Gi.pos,
edgelist=edge,labels={},width=0.1,
arrows=False,ax=ax,fig=fig)
plt.savefig('./figure/' +str(us) +'_' + str(cpt) +'.png')
try:
ax.collections.remove(N)
except:
pass
try:
ax.collections.remove(E)
except:
pass
if (cond1 and cond2 and cond3):
if (condD and condR and condT):
visited.append(interaction)
self.cpt+=1
#print(visited)
# [(44,2,7),(62,7,15),(21,15),(62,15,7),(44,7,2),(16,2)]
# if visited ==[(6236,74,91),(141,91)]:
# import ipdb
# ipdb.set_trace()
# update list of airwalls
if interaction[0] in lair:
lawp.append(1)
else:
lawp.append(0)
# update number of useful segments
# if there is airwall in visited
nstr = interaction[0]
#
#
#
# Testing the type of interaction at rank -2
# R is a list which contains a rotation matrix
# and a translation vector for doing the mirroring
# operation
# diffraction (retrieve a point)
if len(visited[-2]) == 1:
#th = self.L.Gs.pos[nstr]
R.append((np.eye(2),np.array([0,0])))
elif len(visited[-2])==2:
#
# Avant dernier poitnt est une reflection
#
nseg_points = self.L.Gs[visited[-2][0]].keys()
ta_seg = np.array(self.L.Gs.pos[nseg_points[0]])
he_seg = np.array(self.L.Gs.pos[nseg_points[1]])
#
# get reflection matrix from segment visited[-2]
#
R.append(geu.axmat(ta_seg,he_seg))
# direct order
#R.append(geu.axmat(tahe[-1][0],tahe[-1][1]))
# transmission do nothing
else :
pass
# current interaction is of segment type
if (nstr>0):
nseg_points = self.L.Gs[nstr].keys()
th = np.array([self.L.Gs.pos[nseg_points[0]],
self.L.Gs.pos[nseg_points[1]]])
else:
th = self.L.Gs.pos[nstr]
th = np.array([th,th])
# current interaction is of point type (diffraction)
# apply current chain of symmetries
#
# th is the current segment tail-head coordinates
# tahe is a list of well mirrored tail-head coordinates
#tahe.append(a)
#if ((visited[0]==(104,23,17)) and (visited[1]==(1,17))):
# print("th (avant mirror)",th)
ik = 1
r = R[-ik]
#
# mirroring th until the previous point
#
th_mirror = copy.copy(th)
while np.any(r[0]!=np.eye(2)):
th_mirror = np.einsum('ki,ij->kj',th_mirror,r[0])+r[1]
ik = ik + 1
r = R[-ik]
# if at least 2 interactions
# or previous point is a diffraction
if (len(tahe)<2) or (len(visited[-2])==1) or (len(visited[-1])==1):
ratio = 1.0
else:
# Determine the origin of the cone
# either the transmitter (ilast =0)
# or the last diffraction point (ilast=udiff[-1] )
udiff = [ k for k in range(len(visited)) if len(visited[k])==1 ]
if udiff==[]:
ilast = 0
else:
ilast=udiff[-1]
pta0 = tahe[ilast][0] # tail first segment (last difraction)
phe0 = tahe[ilast][1] # head first segment
pta_ = tahe[-1][0] # tail last segment
phe_ = tahe[-1][1] # head last segment
#
# Calculates the left and right vector of the cone
#
# vl left vector
# vr right vector
#
#
# Detect situations of connected segments
#
# [(60, 2, 8), (61, 8, 11), (15, 11), (61, 11, 8), (60 ,8, 2), (44, 2, 7)]
# if visited == [(60, 2, 8), (61, 8, 11), (15, 11), (61, 11, 8), (60 ,8, 2), (44, 2, 7)]:
# print '\n',visited
# import ipdb
# ipdb.set_trace()
connected = False
if (pta0==pta_).all():
apex = pta0
connected = True
v0=phe0-apex
v_=phe_-apex
elif (pta0==phe_).all():
apex = pta0
connected = True
v0=phe0-apex
v_=pta_-apex
elif (phe0==pta_).all():
apex = phe0
connected = True
v0=pta0-apex
v_=phe_-apex
elif (phe0==phe_).all():
apex = phe0
connected = True
v0=pta0-apex
v_=pta_-apex
if not connected:
if not (geu.ccw(pta0,phe0,phe_) ^
geu.ccw(phe0,phe_,pta_) ):
vr = (pta0,phe_)
vl = (phe0,pta_)
else: # twisted case
vr = (pta0,pta_)
vl = (phe0,phe_)
# cone dot product
# print vr
# print vl
vr_n = (vr[1]-vr[0])/np.linalg.norm(vr[1]-vr[0])
vl_n = (vl[1]-vl[0])/np.linalg.norm(vl[1]-vl[0])
vrdotvl = np.dot(vr_n,vl_n)
# cone angle
angle_cone = np.arccos(np.maximum(np.minimum(vrdotvl,1.0),-1.0))
#angle_cone = np.arccos(vrdotvl)
# prepare lines and seg argument for intersection checking
if angle_cone!=0:
linel = (vl[0],vl[1]-vl[0])
liner = (vr[0],vr[1]-vr[0])
# from origin mirrored segment to be tested
seg = (th_mirror[0],th_mirror[1])
# apex calculation
a0u = np.dot(pta0,vr_n)
a0v = np.dot(pta0,vl_n)
b0u = np.dot(phe0,vr_n)
b0v = np.dot(phe0,vl_n)
#import warnings
#warnings.filterwarnings("error")
try:
kb = ((b0v-a0v)-vrdotvl*(b0u-a0u))/(vrdotvl*vrdotvl-1)
except:
pdb.set_trace()
apex = phe0 + kb*vl_n
#if ((visited[0]==(104,23,17)) and (visited[1]==(1,17))):
# print(visited)
# print("th",th)
# print("tahe",tahe)
# print("ta_,he_",pta_,phe_)
# print("vr,vl",vr_n,vl_n)
# print('angle cone',angle_cone)
# print(apex)
else:
v0n = v0/np.linalg.norm(v0)
v_n = v_/np.linalg.norm(v_)
# import ipdb
# ipdb.set_trace()
sign = np.sign(np.cross(v_n,v0n))
if sign>0:
vr_n = -v0n
vl_n = v_n
else:
vr_n = v_n
vl_n = -v0n
# vr_n = (vr[1]-vr[0])/np.sqrt(np.sum((vr[1]-vr[0])*(vr[1]-vr[0]),axis=0))
# vl_n = (vl[1]-vl[0])/np.sqrt(np.sum((vl[1]-vl[0])*(vl[1]-vl[0]),axis=0))
vrdotvl = np.dot(vr_n,vl_n)
# cone angle
angle_cone = np.arccos(np.maximum(np.minimum(vrdotvl,1.0),-1.))
al = np.arctan2(vl_n[1],vl_n[0])
ar = np.arctan2(vr_n[1],vr_n[0])
#
# On connecte l'apex du cone courant aux extrémités du segment courant mirroré
#
# Dans certaines circonstances par example un cone emanant d'un point colinéaire
# avec le segment d'arrivé" (-4) (6,4) le point -4 est aligné avec le segment 6
# l'ouverture du cone est nul => arret. Cela pourrait être géré dans Gi en interdisant
# la visibilité (-4) (6,4)
#
if angle_cone ==0:
ratio = 0
else:
if np.allclose(th_mirror[0],apex) or np.allclose(th_mirror[1],apex):
ratio = 1.
else:
wseg0 = th_mirror[0] - apex
wseg1 = th_mirror[1] - apex
mod_wseg0 = np.sqrt(np.sum(wseg0*wseg0,axis=0))
mod_wseg1 = np.sqrt(np.sum(wseg1*wseg1,axis=0))
if np.isclose(mod_wseg0,0):
#bvisu = True
#pdb.set_trace()#
pass
if np.isclose(mod_wseg1,0):
#bvisu = True
#pdb.set_trace()#
pass
#wseg0_n = wseg0/mod_wseg0
#wseg1_n = wseg1/mod_wseg1
wseg0_n = wseg0/np.linalg.norm(wseg0)
wseg1_n = wseg1/np.linalg.norm(wseg1)
aseg0 = np.arctan2(wseg0_n[1],wseg0_n[0])
aseg1 = np.arctan2(wseg1_n[1],wseg1_n[0])
# if al==aseg0 or al==aseg1 or ar==aseg0 or ar==aseg1:
# ratio = 1
#print "toto"
# else:
I = geu.angle_intersection2(al,ar,aseg0,aseg1)
ratio = I/angle_cone
#if ratio>=1:
# pdb.set_trace()
# if connected:
# print "ratio :",ratio
#if visited == [(104, 23, 17), (1, 17), (53, 17)]:
if (bvisu):
fig ,ax = self.L.showG('s',aw=1,labels=0)
#
# magenta : start of the cone
# cyan :
# yellow : last interaction
#
ax = geu.linet(ax,pta0,phe0,al=1,color='magenta',linewidth=3)
ax = geu.linet(ax,pta_,phe_,al=1,color='cyan',linewidth=3)
ax = geu.linet(ax,np.array(self.L.Gs.pos[nseg_points[0]]),np.array(self.L.Gs.pos[nseg_points[1]]),al=1,color='yellow',linewidth=4)
# ax = geu.linet(ax,vr[0],vr[1],al=1,color='red',linewidth=3)
# ax = geu.linet(ax,vl[0],vl[1],al=1,color='blue',linewidth=3)
ax = geu.linet(ax,seg[0],seg[1],al=1,color='k',linewidth=3)
ax = geu.linet(ax,th_mirror[0,:],th_mirror[1,:],al=1,color='green',linewidth=3)
nx.draw_networkx_labels(self.L.Gi,
self.L.Gi.pos,labels={x:str(x) for x in visited},
ax=ax,fontsize=18)
plt.title(str(visited)+' '+str(ratio))
ax.plot(apex[0],apex[1],'or')
plt.axis('auto')
pdb.set_trace()
#if visited == [(104, 23, 17), (1, 17), (53, 17), (108, 17, 18)]:
# if visited == [(104, 23, 17), (1, 17), (53, 17)]:
if (1==0):
fig ,ax = self.L.showG('s',aw=1,labels=0)
ax = geu.linet(ax,pta0,phe0,al=1,color='magenta',linewidth=3)
ax = geu.linet(ax,pta_,phe_,al=1,color='cyan',linewidth=3)
ax = geu.linet(ax,np.array(self.L.Gs.pos[pts[0]]),np.array(self.L.Gs.pos[pts[1]]),al=1,color='yellow',linewidth=4)
ax = geu.linet(ax,vr[0],vr[1],al=1,color='red',linewidth=3)
ax = geu.linet(ax,vl[0],vl[1],al=1,color='blue',linewidth=3)
#ax = geu.linet(ax,seg[0],seg[1],al=1,color='k',linewidth=3)
ax = geu.linet(ax,th[0,:],th[1,:],al=1,color='green',linewidth=3)
plt.title(str(visited)+' '+str(ratio))
ax.plot(apex[0],apex[1],'or')
plt.axis('auto')
plt.show()
#else:
# th = self.L.Gs.pos[nstr]
# th = np.array([th,th])
# ratio = 1
if ratio > self.threshold:
#
# Update sequence of mirrored points
if nstr<0:
tahe.append(th)
else:
tahe.append(th_mirror)
#
# Check if the target has been reached
# sequence is valid and last interaction is in the list of targets
#if (interaction in lit) or (interaction[-1]==self.target):
if (interaction in lit):
# idea here is to produce signature without any airwalls
# lawp_tmp is a mask where 0 mean no air wall and 1 = airwall
# anstr does not contains airwalls
# lawp_tmp = [0]+lawp
# lll = [x[0] for ix,x in enumerate(visited) if lawp_tmp[ix]==1]
# print([self.L.Gs.node[x]['name'] for x in lll])
#anstr = np.array([x[0] for ix,x in enumerate(visited)
# if ((lawp[ix]!=1) or (x[0] in self.L.name['AIR']) or (x in (lit+lis)))] )
#typ = np.array([len(x) for ix,x in enumerate(visited)
# if ((lawp[ix]!=1) or (x[0] in self.L.name['AIR']) or (x in (lit+lis)))] )
#sig = np.array([anstr,typ])
#sighash = hash(str(sig))
# if len(anstr) == 2:
# if (anstr == np.array([323,351])).all():
# import ipdb
# ipdb.set_trace()
anstr = np.array([x[0] for x in visited ])
typ = np.array([len(x) for x in visited])
sig = np.array([anstr,typ])
sighash = hash(str(sig))
if sighash not in lhash:
lhash.append(sighash)
try:
self[len(typ)] = np.vstack((self[len(typ)],sig))
self.ratio[len(typ)] = np.append(self.ratio[len(typ)],ratio)
except:
self[len(typ)] = np.vstack((sig))
self.ratio[len(typ)] = np.array([ratio])
# print ('added',visited)
cptsig +=1
if animation:
Nf = nx.draw_networkx_nodes(Gi,pos=Gi.pos,
nodelist=visited,labels={},
node_color='b',
node_size=40,
ax=ax,fig=fig)
Ef = nx.draw_networkx_edges(Gi,pos=Gi.pos,
edgelist=edge,labels={},
width=0.1,arrows=False,
ax=ax,fig=fig)
cpt=cpt+1
plt.savefig('./figure/' +str(us) +'_' + str(cpt) +'.png')
try:
ax.collections.remove(Nf)
except:
pass
try:
ax.collections.remove(Ef)
except:
pass
outint = Gi[visited[-2]][interaction]['output'].keys()
#
# proint not used
#
proint = Gi[visited[-2]][interaction]['output'].values()
nexti = [it for it in outint ]
stack.append(iter(nexti))
# 1590 ratio <= threshold
else:
if len(visited)>1:
if ((len(visited[-2])==2) or len(visited[-2])==1):
R.pop()
last = visited.pop()
lawp.pop()
# 1389 condR and condT and condD
else:
pass
# 1388 cond1 and cond2 and cond3
else:
# if at least 2 interactions
# and antepenultiem is a reflexion
if len(visited)>1:
if ((len(visited[-2])==2) or len(visited[-2])==1):
R.pop()
last = visited.pop()
#
# Poping
# tahe
# lawp
# stack
tahe.pop()
try:
lawp.pop()
except:
pass
stack.pop()
#stack.pop()
def plot_cones(self,L,i=0,s=0,fig=[],ax=[],figsize=(10,10)):
""" display cones of an unfolded signature
Parameters
----------
L : Layout
i : int
the interaction block
s : int
the signature number in the block
fig :
ax :
figsize :
"""
if fig == []:
fig= plt.figure()
ax = fig.add_subplot(111)
elif ax ==[]:
ax = fig.add_subplot(111)
pta,phe = self.unfold(L,i=i,s=s)
# create a global array or tahe segments
seg = np.vstack((pta,phe))
lensi = np.shape(seg)[1]
for s in range(1,lensi):
pseg0 = seg[:,s-1].reshape(2,2).T
pseg1 = seg[:,s].reshape(2,2).T
#
# create the cone seg0 seg1
#
cn = cone.Cone()
cn.from2segs(pseg0,pseg1)
fig,ax = cn.show(fig = fig,ax = ax,figsize = figsize)
return (fig,ax)
def unfold(self,L,i=0,s=0):
""" unfold a given signature
return 2 np.ndarray of pta and phe "aligned"
(reflexion interaction are mirrored)
Parameters
----------
L : Layout
i : int
the interaction block
s : int
the signature number in the block
Returns
-------
pta,phe
See Also
--------
Signature.unfold
"""
si = Signature(self[i][(2*s):(2*s)+2])
si.ev(L)
pta,phe = si.unfold()
return pta,phe
def pltunfold(self,L,i=0,s=0):
import shapely.ops as sho
from descartes.patch import PolygonPatch
plt.ion()
plt.gcf()
plt.clf()
def plot_lines(ax, ob, color = []):
for ii,line in enumerate(ob):
if color == []:
if ii ==0 :
c ='g'
elif ii == len(ob)-1:
c ='r'
else:
c= 'k'
else:
c=color
x, y = line.xy
ax.plot(x, y, color=c, alpha=0.7, linewidth=3, solid_capstyle='round', zorder=2)
return ax
def plot_poly(ax, ob, color = []):
for ii,poly in enumerate(ob):
pp = PolygonPatch(poly,alpha=0.3)
ax.add_patch(pp)
return ax
pta,phe=self.unfold(L=L,i=i,s=s)
ML =sh.MultiLineString([((pta[0][i],pta[1][i]),(phe[0][i],phe[1][i])) for i in range(pta.shape[1])])
fig=plt.gcf()
ax=plt.gca()
ax = plot_lines(ax,ML)
s0=sh.LineString([(pta[0,0],pta[1,0]),(phe[0,-1],phe[1,-1])])
s1=sh.LineString([(phe[0,0],phe[1,0]),(pta[0,-1],pta[1,-1])])
if s0.crosses(s1):
s0=sh.LineString([(pta[0,0],pta[1,0]),(pta[0,-1],pta[1,-1])])
s1=sh.LineString([(phe[0,0],phe[1,0]),(phe[0,-1],phe[1,-1])])
cross = sh.MultiLineString([s0,s1,ML[0],ML[-1]])
poly=sho.polygonize(cross)
# ax = plot_lines(ax,cross,color='b')
ax = plot_poly(ax,poly)
def show(self,L,**kwargs):
""" plot signatures within the simulated environment
Parameters
----------
L : Layout
i : list or -1 (default = all groups)
list of interaction group numbers
s : list or -1 (default = all sig)
list of indices of signature in interaction group
ctx : cycle of tx (optional)
crx : cycle of rx (optional)
graph : type of graph to be displayed
color : string
alphasig : float
widthsig : float
colsig : string
ms : int
ctx : int
crx :int
"""
defaults = {'i':-1,
's':-1,
'fig':[],
'ax':[],
'graph':'s',
'color':'black',
'alphasig':1,
'widthsig':0.1,
'colsig':'black',
'ms':5,
'ctx':-1,
'crx':-1,
'aw':True
}
for key, value in defaults.items():
if key not in kwargs:
kwargs[key] = value
# display layout
fig,ax = L.showG(**kwargs)
if kwargs['ctx']!=-1:
Tpoly = self.L.Gt.node[kwargs['ctx']]['polyg']
Tpoly.coul='r'
Tpoly.plot(fig=fig,ax=ax,color='r')
if kwargs['crx']!=-1:
Rpoly = self.L.Gt.node[kwargs['crx']]['polyg']
Rpoly.plot(fig=fig,ax=ax,color='g')
# i=-1 all rays
# else block of interactions i
if kwargs['i']==-1:
lgrint = self.keys()
else:
lgrint = [kwargs['i']]
for i in lgrint:
if kwargs['s']==-1:
lsig = range(len(self[i])/2)
else:
lsig = [kwargs['s']]
for j in lsig:
sig = map(lambda x: self.L.Gs.pos[x],self[i][2*j])
siga = np.array(sig)
# sig = np.hstack((self.pTx[0:2].reshape((2, 1)),
# np.hstack((self[i]['pt'][0:2, :, j],
# self.pRx[0:2].reshape((2, 1))))
# ))
ax.plot(siga[:,0], siga[:,1],
alpha=kwargs['alphasig'],color=kwargs['colsig'],linewidth=kwargs['widthsig'])
ax.axis('off')
return(fig,ax)
def showi(self,uni=0,us=0):
""" interactive show
press n to visit signatures sequentially
Parameters
----------
uni : index of interaction dictionnary keys
us : signature index
"""
plt.ion()
fig = plt.figure()
nit = self.keys()
ni = nit[uni]
ust = len(self[ni])/2
polyS = self.L.Gt.node[self.source]['polyg']
cp1 = polyS.centroid.xy
polyT = self.L.Gt.node[self.target]['polyg']
cp2 = polyT.centroid.xy
ptx = np.array([cp1[0][0],cp1[1][0]])
prx = np.array([cp2[0][0],cp2[1][0]])
st='a'
while st != 'q':
inter=[]
ax = fig.add_subplot(111)
fig,ax=self.L.showG(fig=fig,ax=ax,graph='s')
title = '# interaction :', ni, 'signature #',us,'/',ust
ax.set_title(title)
line = ptx
# draw terminal points (centroid of source and target cycle)
ax.plot(ptx[0],prx[1],'xr')
ax.plot(prx[0],prx[1],'xb')
if ni not in self.keys():
print "incorrect number of interactions"
pos={}
try:
for u in self[ni][us*2]:
pos.update({u:self.L.Gs.pos[u]})
line = np.vstack((line,np.array((self.L.Gs.pos[u]))))
nx.draw_networkx_nodes(self.L.Gs,pos=pos,nodelist=pos.keys(),node_color='r',ax=ax)
for ii in self[ni][(us*2)+1]:
if ii == 1:
inter.append('R')
if ii == 2:
inter.append('T')
if ii == 3:
inter.append('D')
except:
print "signature index out of bounds of signature"
line = np.vstack((line,prx))
ax.plot(line[:,0],line[:,1])
plt.draw()
print inter
st = raw_input()
ax.cla()
if st == 'n':
if us+2 <= ust:
us=us+2
else:
uni = uni+1
try:
ni = nit[uni]
ust = len(self[ni])/2
us=0
except:
uni=0
ni=nit[uni]
us = 0
else:
print 'press n for next signature'
def rays(self,ptx=0,prx=1):
""" from signatures dict to 2D rays
Parameters
----------
ptx : numpy.array or int
Tx coordinates is the center of gravity of the cycle number if
type(tx)=int
prx : numpy.array or int
Rx coordinates is the center of gravity of the cycle number if
sigtype(rx)=int
Returns
-------
rays : Rays
Notes
-----
In the same time the signature of the ray is stored in the Rays object
Todo : Find the best memory implemntation
See Also
--------
Signature.sig2ray
"""
if type(ptx)==int:
ptx = np.array(self.L.Gt.pos[ptx])
if type(prx)==int:
prx = np.array(self.L.Gt.pos[prx])
rays = Rays(ptx,prx)
#
# detect LOS situation
#
#
# cycle on a line between 2 cycles
# lc = self.L.cycleinline(self.source,self.target)
#
# if source and target in the same merged cycle
# and ptx != prx
#
los = shg.LineString(((ptx[0], ptx[1]), (prx[0], prx[1])))
# convex cycle of each point
cyptx = self.L.pt2cy(ptx)
cyprx = self.L.pt2cy(prx)
# merged cycle of each point
polyctx = self.L.Gt.node[cyptx]['polyg']
polycrx = self.L.Gt.node[cyprx]['polyg']
#
# Handling LOS ray
#
dtxrx = np.sum((ptx-prx)*(ptx-prx))
if dtxrx>1e-15:
if cyptx==cyprx:
if polyctx.contains(los):
rays.los = True
else:
rays.los = False
# k : Loop on interaction group
# l : loop on signature
# --->
# this part should be a generator
#
for k in self:
# print 'block#',k
# if k ==3:
# import ipdb
# ipdb.set_trace()
# get signature block with k interactions
tsig = self[k]
shsig = np.shape(tsig)
for l in range(shsig[0]/2):
sig = tsig[2*l:2*l+2,:]
ns0 = sig[0,0]
nse = sig[0,-1]
validtx = True
validrx = True
if (ns0<0):
pD = self.L.Gs.pos[ns0]
TxD = shg.LineString(((ptx[0], ptx[1]), (pD[0], pD[1])))
seg = polyctx.intersection(TxD)
validtx = seg.almost_equals(TxD,decimal=4)
if not validtx:
pass
#print "Signature.rays": ns0
if (nse<0):
pD = self.L.Gs.pos[nse]
DRx = shg.LineString(((pD[0], pD[1]), (prx[0], prx[1])))
validrx = polyctx.contains(DRx)
if not validrx:
pass
#print nse
if validtx & validrx:
# print sig
# print pD
s = Signature(sig)
#
# Transform signature into a ray
# --> sig2ray
isray,Yi = s.sig2ray(self.L, ptx[:2], prx[:2])
if isray:
Yi = np.fliplr(Yi)
if k in rays.keys():
Yi3d = np.vstack((Yi[:, 1:-1], np.zeros((1, k))))
Yi3d = Yi3d.reshape(3, k, 1)
rays[k]['pt'] = np.dstack(( rays[k]['pt'], Yi3d))
rays[k]['sig'] = np.dstack(( rays[k]['sig'],
sig.reshape(2, k, 1)))
else:
rays[k] = {'pt': np.zeros((3, k, 1)),
'sig': np.zeros((2, k, 1),dtype=int)}
rays[k]['pt'][0:2, :, 0] = Yi[:, 1:-1]
rays[k]['sig'][:, :, 0] = sig
rays.nb_origin_sig = len(self)
rays.origin_sig_name = self.filename
return rays
def raysv(self,ptx=0,prx=1):
""" transform dict of signatures into 2D rays - default vectorized version
Parameters
----------
ptx : numpy.array or int
Tx coordinates is the center of gravity of the cycle ptx if
type(ptx)=int
prx : numpy.array or int
Rx coordinates is the center of gravity of the cycle prx if
type(prx)=int
Returns
-------
rays : Rays
Notes
-----
This is a vectorized version of Signatures.rays.
This implementation takes advantage of the np.ndarray
and calculates images and backtrace for block of signatures.
A block of signatures gathers all signatures with the same number of interactions.
For mathematical details see :
@phdthesis{amiot:tel-00971809,
TITLE = {{Design of simulation platform joigning site specific radio propagation and human mobility for localization applications}},
AUTHOR = {Amiot, Nicolas},
URL = {https://tel.archives-ouvertes.fr/tel-00971809},
NUMBER = {2013REN1S125},
SCHOOL = {{Universit{\'e} Rennes 1}},
YEAR = {2013},
MONTH = Dec,
KEYWORDS = {Electromagnetic wave propagation simulation ; Human mobility simulation ; Wireless localization methods ; Position estimation methods in wireless networks ; Vectorized computation ; Ray-tracing ; Ultra wide band ; Simulateur de propagation {\'e}lectromagn{\'e}tique ; Simulateur de mobilit{\'e} humaine ; M{\'e}thodes de localisation sans fils ; M{\'e}thodes d'estimation de la position dans les r{\'e}seaux sans fils ; Calcul informatique vectoris{\'e} ; Outil de trac{\'e} de rayons ; Ultra large bande},
TYPE = {Theses},
HAL_ID = {tel-00971809},
HAL_VERSION = {v1},
}
See Also
--------
Signatures.image
Signatures.backtrace
"""
if type(ptx)==int:
ptx = np.array(self.L.Gt.pos[ptx])
if type(prx)==int:
prx = np.array(self.L.Gt.pos[prx])
if len(ptx) == 2:
ptx= np.r_[ptx,0.5]
if len(ptx) == 2:
prx= np.r_[prx,0.5]
rays = Rays(ptx,prx)
#
# detect LOS situation
#
#
# cycle on a line between 2 cycles
# lc = self.L.cycleinline(self.source,self.target)
#
# if source and target are in the same merged cycle
# and ptx != prx
#
los = shg.LineString(((ptx[0], ptx[1]), (prx[0], prx[1])))
# convex cycle of each point
cyptx = self.L.pt2cy(ptx)
cyprx = self.L.pt2cy(prx)
polyctx = self.L.Gt.node[cyptx]['polyg']
polycrx = self.L.Gt.node[cyprx]['polyg']
# The Line of sight situation is detected here
# dtxtx : distance between Tx and Rx
dtxrx = np.sum((ptx-prx)*(ptx-prx))
if dtxrx>1e-15:
if polyctx.contains(los):
rays.los = True
else:
rays.los = False
M = self.image2(ptx)
R = self.backtrace(ptx,prx,M)
#
# Add LOS ray in ray 2D
#
if rays.los:
R[0]= {'sig':np.zeros(shape=(0,0,1)),'pt': np.zeros(shape=(2,1,0))}
rays.update(R)
rays.nb_origin_sig = len(self)
rays.origin_sig_name = self.filename
#pdb.set_trace()
return rays
def backtrace(self, tx, rx, M):
''' backtracing betwen tx and rx
Warning :
This is an attempt to vectorize the backtrace process.
Despite it has been tested on few cases with succes,
this is quite new need to be validated !!!
Parameters
----------
tx : ndarray
position of tx (2,)
rx : ndarray
position of tx (2,)
M : dict
position of intermediate points obtained from self.image()
Return
-------
rayp : dict
key = number_of_interactions
value =ndarray positions of interactions for creating rays
Notes
-----
dictionnary of intermediate coordinated :
key = number_of_interactions
value = nd array M with shape : (2,nb_signatures,nb_interactions)
and 2 represent x and y coordinates
See Also
--------
pylayers.antprop.signature.image
'''
if len(tx) > 2:
tx = tx[:2]
if len(rx) > 2:
rx = rx[:2]
rayp={}
# loop on number of interactions
for ninter in self.keys():
signatures = copy.deepcopy(self[ninter])
#get segment ids of signature with ninter interactions
# seg = self[ninter][::2]
# unegseg=np.where(seg<0)
# uninegseg,idx = np.unique(seg[unegseg],return_inverse=True)
# pneg = np.array([self.L.Gs.pos[x] for x in uninegseg])
# nsig = len(seg)
# # determine positions of points limiting the semgments
# #1 get index in L.tahe
# # 2 get associated position in L.pt
# utahe = self.L.tahe[:,self.L.tgs[seg]]
# # pt : (xycoord (2),pt indexes (2),nb_signatures,nb_interactions)
# pt = self.L.pt[:,utahe]
# ####WARNING BIG TRICK HERE :
# #### pa and pb are not set as the same value
# #### to avoid a singular matrixnext.
# #### set pa =-pb has no incidence but avoid complex and vain code
# #### modification for handling diffractions
# try:
# pt[:,0,unegseg[0],unegseg[1]]=pneg[idx].T
# pt[:,1,unegseg[0],unegseg[1]]=-pneg[idx].T
# except:
# pass
# pt shape =
# 0 : (x,y) coordinates x=0,y=1
# 1 : 2 points (linking the semgnet) a=0,b=1
#2 : nb of found signatures/segments
# 3 : nb interaction
################################
###############################
####### This part between hash has been copy/paste from self.image2
###### should be considered to become a function
#get segment ids of signature with ninter interactions
# nid = node id
nid = self[ninter][::2]
nsig = len(nid)
# pt shape =
# 0 : (x,y) coordinates x=0,y=1
# 1 : 2 points (linking the nidment) a=0,b=1
# 2 : nb of found signatures/nidments
# 3 : nb interactions
pt = np.empty((2,2,nsig,ninter))
# 1 negative points
# seek for diffraction
# negative index points are diffraction points
upoint = np.where(nid<0)
unipoint,idx = np.unique(nid[upoint],return_inverse=True)
#get their coordinates
#
# TO BE FIXED
#
#upointcoord = self.L.iupnt[-unipoint]
#pointcoord = self.L.pt[:,upointcoord]
pointcoord = np.array([ (self.L.Gs.pos[x][0],self.L.Gs.pos[x][1]) for x in unipoint ]).T
# #### WARNING BIG TRICK HERE :
# #### pa and pb are not set as the same value
# #### to avoid a singular matrixnext.
# #### set pa =-pb has no incidence but avoid complex and vain code
# #### modification for handling diffractions
try:
pt[:,0,upoint[0],upoint[1]] = pointcoord[:,idx]
pt[:,1,upoint[0],upoint[1]] = -pointcoord[:,idx]
except:
pass
# 2 positive points
# seek for segments
useg = np.where(nid>0)
# removing duplicates ( for increasing speed)
uniseg,idxp = np.unique(nid[useg],return_inverse=True)
# determine positions of points limiting the nidments
#1 get index in L.tahe
utahe = self.L.tahe[:,self.L.tgs[uniseg]]
segcoord = self.L.pt[:,utahe]
pt[:,:,useg[0],useg[1]]=segcoord[:,:,idxp]
###################################
########################################
# how to do this into a while loop ?
p=rx
# creating W matrix required in eq (2.70) thesis Nicolas AMIOT
# Warning W is rolled after and becomes (nsig,4,4)
W = np.zeros((4,4,nsig))
I = np.eye(2)[:,:,np.newaxis]*np.ones((nsig))
W[:2,:2,...] = I
W[2:4,:2,...] = I
# once rolled :
# W (nsig,4,4)
W = np.rollaxis(W,-1)
kinter=ninter-1
ptr = pt
Mr = copy.deepcopy(M)
epsilon = 1e-12
rayp_i = np.zeros((3,nsig,ninter))
# rayp_i[:2,:,-1]=rx[:,None]
#backtrace process
# if ninter == 6:
# print np.where(((signatures[:,0]==42) &(signatures[:,1]==-277) & (signatures[:,2]==135) & (signatures[:,3]==21) & (signatures[:,4]==46) & (signatures[:,5]==319)))
# import ipdb
# ipdb.set_trace()
while kinter > -1:
#Initilization, using the Tx position
if kinter == ninter-1:
p_min_m = p[:,np.newaxis]-Mr[ninter][:,:,kinter]
else :
p_min_m = pvalid[:].T-Mr[ninter][:,:,kinter]
a_min_b = ptr[:,0,:,kinter]-ptr[:,1,:,kinter]
# Creating W from eq (2.71)
# a_min_b <=> a_{Lh-l}-b_{Lh-l}
# p_min_m <=> \tilde{p}_{Lh}-\tilde{b}_{Lh-l}
# W (nsig,4,4)
# p_min_m (2,nsig)
# a_min_b (2,nsig)
W[...,:2,2] = p_min_m.T
W[...,2:,3] = a_min_b.T
# create 2nd member from eq (2.72)
if kinter == ninter-1:
y= np.concatenate((p[:,np.newaxis]*np.ones((nsig)),ptr[:,0,:,kinter]))
else:
y= np.concatenate((pvalid.T,ptr[:,0,:,kinter]))
# y once transposed :
# y (nsig,4)
y=y.T
# search and remove point with singular matrix
invalid_sig=np.where(abs(np.linalg.det(W))<1e-15)
W = np.delete(W,invalid_sig,axis=0)
y = np.delete(y,invalid_sig,axis=0)
ptr = np.delete(ptr,invalid_sig,axis=2)
Mr[ninter] = np.delete(Mr[ninter],invalid_sig,axis=1)
rayp_i = np.delete(rayp_i,invalid_sig,axis=1)
#remove signatures
usig = np.repeat(invalid_sig[0],2)
usig[::2]=usig[::2]*2
usig[1::2]=usig[1::2]*2+1
signatures = np.delete(signatures,usig,axis=0)
# detect diffrac
uD = signatures[1::2,kinter]==1
uuD = np.where(signatures[1::2,kinter]==1)[0]
psolved = np.linalg.solve(W,y)
#valid ray is : 0 < \alpha < 1 and 0< \beta < 1
# alpha
uvalidA = psolved[:,2]>0.
uvalidB = psolved[:,2]<1.
#beta
uvalidC = psolved[:,3] >= epsilon
uvalidD = psolved[:,3] <=1.-epsilon
valid = uvalidA & uvalidB & uvalidC & uvalidD
# consider valid diffraction interactions
valid = valid | uD
uvalid = np.where(valid)[0]
# re-add correct position of diffraction interations
#indeed diffraction point should not been solved with linalg,
# but by setting pa=-pb, no singular matrix appear
#and diffraction points can be re-add thereafter.
psolved[uuD,:2] = ptr[:,0,uuD,kinter].T
pvalid = psolved[uvalid,:2]
# keep only valid rays for ptr and Mr
Mr[ninter]=Mr[ninter][:,uvalid,:]
ptr=ptr[:,:,uvalid,:]
W = W[uvalid,:,:]
# remove signatures
usigv = np.repeat(uvalid,2)
usigv[::2]=usigv[::2]*2
usigv[1::2]=usigv[1::2]*2+1
signatures = signatures[usigv,:]
rayp_i[:2,uvalid,kinter] = pvalid.T
rayp_i = rayp_i[:,uvalid,:]
#if no more rays are valid , then quit block
# (kinter <0 is the exit while condition)
if len(uvalid) > 0 :
kinter=kinter-1
else :
kinter = -2
# rayp_i[:2,:,0]=tx[:,None]
if len(uvalid) !=0:
sir1=signatures[::2].T.reshape(ninter,len(usigv)/2)
sir2=signatures[1::2].T.reshape(ninter,len(usigv)/2)
sig = np.empty((2,ninter,len(usigv)/2))
sig[0,:,:]=sir1
sig[1,:,:]=sir2
rayp_i=np.swapaxes(rayp_i,1,2)
rayp.update({ninter:{'pt':rayp_i,'sig':sig.astype('int')}})
return rayp
def image2(self,tx):
""" determine rays from images (second implementation)
Parameters
----------
tx : point
"""
if len(tx) > 2:
tx = tx[:2]
dM={}
# loop on number of interactions
for ninter in self.keys():
#get segment ids of signature with ninter interactions
# nid = node id
nid = self[ninter][::2]
nsig = len(nid)
M = np.empty((2,nsig,ninter))
# pt shape =
# 0 : (x,y) coordinates x=0,y=1
# 1 : 2 points (linking the nidment) a=0,b=1
# 2 : nb of found signatures/nidments
# 3 : nb interactions
pt = np.nan*np.empty((2,2,nsig,ninter))
#1 negative points
# seek for diffraction
# negative index points are diffraction points
upoint = np.where(nid<0)
unipoint,idxpt = np.unique(nid[upoint],return_inverse=True)
#get their coordinates
#
# To be FIXED
#
#upointcoord = self.L.iupnt[-unipoint]
#pointcoord = self.L.pt[:,upointcoord]
pointcoord = np.array([ (self.L.Gs.pos[x][0],self.L.Gs.pos[x][1]) for x in unipoint ]).T
# try except to handle the case where there is no diffraction point
try:
pt[:,0,upoint[0],upoint[1]] = pointcoord[:,idxpt]
pt[:,1,upoint[0],upoint[1]] = pointcoord[:,idxpt]
except:
pass
#2 positive points
#seek for segments
useg = np.where(nid>0)
# removing duplicates ( for increasing speed)
uniseg,idxseg = np.unique(nid[useg],return_inverse=True)
# determine positions of points limiting the nidments
#1 get index in L.tahe
utahe = self.L.tahe[:,self.L.tgs[uniseg]]
segcoord = self.L.pt[:,utahe]
pt[:,:,useg[0],useg[1]]=segcoord[:,:,idxseg]
# check every element of pt is filled
assert not np.isnan(pt).any()
#
# TODO Upgrading layout for handling slab offsets
#
# uncomment those two lines when the numpy array L.norm and
# L.offset exist
#norm = self.L.normal[:,utahe]
#offset = self.L.offset[:,utahe]
# pt = pt + offset*norm
############
#formula 2.61 -> 2.64 N.AMIOT PH.D thesis
############
sx = pt[0,1,:,:]-pt[0,0,:,:]
sy = pt[1,1,:,:]-pt[1,0,:,:]
den = sx**2+sy**2
# den = ((pt[0,0,:,:]-pt[0,1,:,:])**2+(pt[1,0,:,:]-pt[1,1,:,:])**2)
# avoiding singularity (should not be possible)
uz = np.where(den==0)
den[uz] = 1.
a = 1 - (2. / den) * (pt[1,0,:, :] - pt[1,1,:, :]) ** 2
b= (2. / den) * (pt[0,1,:, :] - pt[0,0,:, :]) * (pt[1,0,:, :] - pt[1,1,:, :])
c = (2. / den) * (pt[0,0,:, :] * (pt[1,0,:, :] - pt[1,1,:, :]) ** 2 +
pt[1,0,:, :] * (pt[1,0,:, :] - pt[1,1,:, :]) *
(pt[0,1,:, :] - pt[0,0,:, :]))
d = (2. / den) * (pt[1,0,:, :] * (pt[0,1,:, :] - pt[0,0,:, :]) ** 2 +
pt[0,0,:, :] * (pt[1,0,:, :] - pt[1,1,:, :]) *
(pt[0,1,:, :] - pt[0,0,:, :]))
# a = ((pt[0,0,:,:]-pt[0,1,:,:])**2-(pt[1,0,:,:]-pt[1,1,:,:])**2)
# a=a/(1.*den)
# b = 2*(pt[0,1,:,:]-pt[0,0,:,:])*(pt[1,1,:,:]-pt[1,0,:,:])
# b=b/(1.*den)
# c= 2*(pt[0,0,:,:]*(pt[1,0,:,:]-pt[1,1,:,:])**2+pt[1,0,:,:]*(pt[0,1,:,:]-pt[0,0,:,:])*(pt[1,0,:,:]-pt[1,1,:,:]))
# c = c/(1.*den)
# d= 2*(pt[0,0,:,:]*(pt[1,0,:,:]-pt[1,1,:,:])*(pt[0,1,:,:]-pt[0,0,:,:])+pt[1,0,:,:]*(pt[0,1,:,:]-pt[0,0,:,:])**2)
# d= d/(1.*den)
# K=np.array([[a,-b],[-b,-a]])
K = np.array([[a,-b],[-b,-a]])
# translation vector v (2.60)
v =np.array(([c,d]))
ityp = self[ninter][1::2]
for n in xrange(ninter):
#get segment ids of signature with ninter interactions
uT = np.where(ityp[:,n]==3)[0]
uR = np.where(ityp[:,n]==2)[0]
uD = np.where(ityp[:,n]==1)[0]
if n ==0:
p=tx[:,None]*np.ones((nsig))
else :
p=M[:,:,n-1]
#reflexion 0 (2.67)
M[:,uR,n] = np.einsum('ijk,jk->ik',K[:,:,uR,n],p[:,uR])+v[:,uR,n]
#transmission 0 (2.67)
M[:,uT,n] = p[:,uT]
M[:,uD,n] = pt[:,0,uD,n]
# if ninter==6:
# print np.where(((seg[:,0]==42) & (seg[:,1]==-277) & (seg[:,2]==135) & (seg[:,3]==21)&(seg[:,-1]==319)))
# import ipdb
# ipdb.set_trace()
dM.update({ninter:M})
return dM
def image(self,tx=np.array([2.7,12.5])):
''' Warning :
This is an attempt to vectorize the image process.
Despite it has been tested on few cases with success,
this is quite new need to be validated !!!
Parameters
----------
tx : ndarray
position of tx (2,)
Return
-------
M : dictionnary
dictionnary of intermediate coordinated :
key = number_of_interactions
value = nd array M with shape : (2,nb_signatures,nb_interactions)
and 2 represent x and y coordinates
'''
if len(tx) > 2:
tx = tx[:2]
def nb_split(a):
nsp = 2
out=False
while not out:
res=a%nsp
if res!=0:
nsp=nsp+1
else:
out=True
return nsp
dM={}
for ninter in self.keys():
#get segment ids of signature with ninter interactions
seg = self[ninter][::2]
nsig = len(seg)
# determine positions of points limiting the semgments
#1 get index in L.tahe
# 2 get associated position in L.pt
#utahe (2 pt indexes,nb_signatures,nb_interactions)
utahe = self.L.tahe[:,self.L.tgs[seg]]
# pt : (xycoord (2),pt indexes (2),nb_signatures,nb_interactions)
pt = self.L.pt[:,utahe]
# pt shape =
# 0 : (x,y) coordinates x=0,y=1
# 1 : 2 points (linking the semgnet) a=0,b=1
#2 : nb of found signatures/segments
# 3 : nb interaction
############
#formula 2.61 -> 2.64 N.AMIOT thesis
############
den = ((pt[0,0,:,:]-pt[0,1,:,:])**2+(pt[1,0,:,:]-pt[1,1,:,:])**2)
uz = np.where(den ==0)
den[uz] = 1.
a = 1 - (2. / den) * (pt[1,0,:, :] - pt[1,1,:, :]) ** 2
b= (2. / den) * (pt[0,1,:, :] - pt[0,0,:, :]) * (pt[1,0,:, :] - pt[1,1,:, :])
c = (2. / den) * (pt[0,0,:, :] * (pt[1,0,:, :] - pt[1,1,:, :]) ** 2 +
pt[1,0,:, :] * (pt[1,0,:, :] - pt[1,1,:, :]) *
(pt[0,1,:, :] - pt[0,0,:, :]))
d = (2. / den) * (pt[1,0,:, :] * (pt[0,1,:, :] - pt[0,0,:, :]) ** 2 +
pt[0,0,:, :] * (pt[1,0,:, :] - pt[1,1,:, :]) *
(pt[0,1,:, :] - pt[0,0,:, :]))
# den = ((pt[0,0,:,:]-pt[0,1,:,:])**2+(pt[1,0,:,:]-pt[1,1,:,:])**2)
# a = ((pt[0,0,:,:]-pt[0,1,:,:])**2-(pt[1,0,:,:]-pt[1,1,:,:])**2)
# a=a/(1.*den)
# b = 2*(pt[0,1,:,:]-pt[0,0,:,:])*(pt[1,1,:,:]-pt[1,0,:,:])
# b=b/(1.*den)
# c= 2*(pt[0,0,:,:]*(pt[1,0,:,:]-pt[1,1,:,:])**2+pt[1,0,:,:]*(pt[0,1,:,:]-pt[0,0,:,:])*(pt[1,0,:,:]-pt[1,1,:,:]))
# c = c/(1.*den)
# d= 2*(pt[0,0,:,:]*(pt[1,0,:,:]-pt[1,1,:,:])*(pt[0,1,:,:]-pt[0,0,:,:])+pt[1,0,:,:]*(pt[0,1,:,:]-pt[0,0,:,:])**2)
# d= d/(1.*den)
#get segment ids of signature with ninter interactions
ityp = self[ninter][1::2]
uT = np.where(ityp[:,1:]==3)
uR = np.where(ityp[:,1:]==2)
uD=np.where(ityp[:,1:]==1)
#create matrix AM which is used to create marix A from eq. 2.65
AM = np.eye(2*ninter)[:,:,np.newaxis]*np.ones(nsig)
# Reflexion MAtrix K (2.59)
K=np.array([[a,-b],[-b,-a]])
# translation vector v (2.60)
v =np.array(([c,d]))
############
#Create matrix A (2.66) which is fill by blocks
############
blocks=np.zeros((2,2,nsig,ninter-1))
# Reflexion block
blocks[:,:,uR[0],uR[1]]=-K[:,:,uR[0],uR[1]+1]
# Transmission block
blocks[:,:,uT[0],uT[1]]=-np.eye(2)[:,:,np.newaxis]*np.ones((len(uT[0])))
# Diff block
blocks[:,:,uD[0],uD[1]]=0.
# fill the AM mda on the diagonal below the mda diagonal....
A=pyu.fill_block_diagMDA(AM,blocks,2,-1)
# The 2nd member y is firslty completly fill, without taking into account that the 1st line differst from others.
# 1. find which interaction and signature are R|T|D => create a masked array
# 2. repeat is created because to each signature/interaction correspond a 2x1 column. Repeat allow to have the correct size to fill y
# 3. fill the 1st line of y to take into consideration that difference.
#y is the 2nd memeber from from (2.65) and will be filled following (2.67)
y = np.zeros((2 * ninter,nsig))
#######
# Determine where y has to be filed with R|T|D
#####
#find the position where there is T|R|D. non continuous => need mask array
uTf = np.where(ityp==3)
uRf = np.where(ityp==2)
uDf =np.where(ityp==1)
#postiion in signature <=> 2 lines in y . need to repeat to get the correct size
uRy2=np.repeat(uRf[0],2)
uRy1=np.repeat(uRf[1],2)
uRy1=2*uRy1
uRy1[1::2]=uRy1[::2]+1
uDy2=np.repeat(uDf[0],2)
uDy1=np.repeat(uDf[1],2)
uDy1=2*uDy1
uDy1[1::2]=uDy1[::2]+1
try:
y[uRy1,uRy2]=v[:,uRf[0],uRf[1]].ravel(order='F')
except:
pass #print 'no R'
try:
pass
#uT1mr = np.repeat(uT1m.mask,2,axis=1).T
#nothing to do. shoould be a zero vector , already initialized by y
except:
pass #print 'no T'
try:
# NEVER TESTED !!!!!!!!!!!
y[uDy1,uDy2]=a[uDf]
except:
print "signatures.image diffraction line 3672 Not yet tested !"
pass #print 'no D'
######
#FIRST LINE specific processing of (2.67)
######
uT0 = np.where(ityp[:,0]==3)[0]
uR0 = np.where(ityp[:,0]==2)[0]
uD0 =np.where(ityp[:,0]==1)[0]
#reflexion 0 (2.67)
r0 = np.einsum('ijk,j->ik',K[:,:,uR0,0],tx)+v[:,uR0,0]
#trnasmission 0 (2.67)
t0 = tx[:,np.newaxis]*np.ones(len(uT0))
#diff 0 (2.67)
d0 = a[uD0,0]
#first line
y[0:2,uR0]=r0
y[0:2,uT0]=t0
y[0:2,uD0]=d0
#reshape for compliant size with linalg
A=np.rollaxis(A,-1)
y=np.rollaxis(y,-1)
leA = len(A)
res=0
#trick for memory usage
if leA > 1e4:
nsp = nb_split(leA)
if nsp != leA:
lA=np.split(A,nsp)
ly=np.split(y,nsp)
del A
del y
print nsp
for s in range(nsp):
lm=np.linalg.solve(lA[s], ly[s])
try:
m = np.vstack((m,lm))
except:
m = lm
del lm
del lA
del ly
else:
m = np.linalg.solve(A, y)
else :
m = np.linalg.solve(A, y)
M=np.array((m[:,0::2],m[:,1::2]))
dM.update({ninter:M})
return dM
class Signature(object):
""" class Signature
Attributes
----------
seq : list of interaction point (edges (>0) or vertices (<0) [int]
typ : list of interaction type 1-R 2-T 3-D [int]
pa : tail point of interaction segment (2xN) ndarray
pb : head point of interaction segment (2xN) ndarray
pc : center point of interaction segment (2xN) ndarray
"""
def __init__(self, sig):
""" object constructor
Parameters
----------
sig : nd.array or list of interactions
>>> seq = np.array([[1,5,1],[1,1,1]])
>>> s = Signature(seq)
"""
def typinter(l):
try:
l = eval(l)
except:
pass
return(len(l))
def seginter(l):
try:
l = eval(l)
except:
pass
return l[0]
if type(sig)==np.ndarray:
self.seq = sig[0, :]
self.typ = sig[1, :]
if type(sig)==list:
self.seq = map(seginter,sig)
self.typ = map(typinter,sig)
def __repr__(self):
s = ''
s = s + str(self.seq) + '\n'
s = s + str(self.typ) + '\n'
if self.evaluated:
s = s + str(self.pa)+'\n'
s = s + str(self.pb)+'\n'
return s
def info(self):
for k in self.__dict__.keys():
print k, ':', self.__dict__[k]
def ev2(self, L):
""" evaluation of Signature
Parameters
----------
L : Layout
Notes
-----
This function converts the sequence of interactions into numpy arrays
which contains coordinates of segments extremities involved in the
signature. At that level the coordinates of extremities (tx and rx) is
not known yet.
members data
pa tail of segment (2xN)
pb head of segment (2xN)
pc the center of segment (2xN)
norm normal to the segment if segment
in case the interaction is a point the normal is undefined and then
set to 0
"""
def seqpointa(k,L=L):
if k>0:
ta, he = L.Gs.neighbors(k)
pa = np.array(L.Gs.pos[ta]).reshape(2,1)
pb = np.array(L.Gs.pos[he]).reshape(2,1)
pc = np.array(L.Gs.pos[k]).reshape(2,1)
nor1 = L.Gs.node[k]['norm']
norm = np.array([nor1[0], nor1[1]]).reshape(2,1)
else:
pa = np.array(L.Gs.pos[k]).reshape(2,1)
pb = pa
pc = pc
norm = np.array([0, 0]).reshape(2,1)
return(np.vstack((pa,pb,pc,norm)))
v = np.array(map(seqpointa,self.seq))
self.pa = v[:,0:2,:]
self.pb = v[:,2:4,:]
self.pc = v[:,4:6,:]
self.norm = v[:,6:,:]
def evf(self, L):
""" evaluation of Signature (fast version)
Parameters
----------
L : Layout
Notes
-----
This function converts the sequence of interactions into numpy arrays
which contains coordinates of segments extremities involved in the
signature.
members data
pa tail of segment (2xN)
pb head of segment (2xN)
"""
N = len(self.seq)
self.pa = np.empty((2, N)) # tail
self.pb = np.empty((2, N)) # head
for n in range(N):
k = self.seq[n]
if k > 0: # segment
ta, he = L.Gs.neighbors(k)
self.pa[:, n] = np.array(L.Gs.pos[ta])
self.pb[:, n] = np.array(L.Gs.pos[he])
else: # node
pa = np.array(L.Gs.pos[k])
self.pa[:, n] = pa
self.pb[:, n] = pa
self.evaluated = True
def ev(self, L):
""" evaluation of Signature
Parameters
----------
L : Layout
Notes
-----
This function converts the sequence of interactions into numpy arrays
which contains coordinates of segments extremities involved in the
signature.
At that stage coordinates of extremities (tx and rx) is
not known yet
members data
pa tail of segment (2xN)
pb head of segment (2xN)
pc the center of segment (2xN)
norm normal to the segment if segment
in case the interaction is a point the normal is undefined and then
set to 0.
"""
# TODO : use map and filter instead of for loop
N = len(self.seq)
self.pa = np.empty((2, N)) # tail
self.pb = np.empty((2, N)) # head
self.pc = np.empty((2, N)) # center
self.norm = np.empty((2, N))
for n in range(N):
k = self.seq[n]
if k > 0: # segment
ta, he = L.Gs.neighbors(k)
norm1 = np.array(L.Gs.node[k]['norm'])
norm = np.array([norm1[0], norm1[1]])
self.pa[:, n] = np.array(L.Gs.pos[ta])
self.pb[:, n] = np.array(L.Gs.pos[he])
self.pc[:, n] = np.array(L.Gs.pos[k])
self.norm[:, n] = norm
else: # node
pa = np.array(L.Gs.pos[k])
norm = np.array([0, 0])
self.pa[:, n] = pa
self.pb[:, n] = pa
self.pc[:, n] = pa
self.norm[:, n] = norm
self.evaluated = True
def unfold(self):
""" unfold a given signature
returns 2 np.ndarray of pta and phe "aligned"
reflexion interactions are mirrored
Returns
-------
pta : np.array
phe : np.array
"""
lensi = len(self.seq)
pta = np.empty((2,lensi))
phe = np.empty((2,lensi))
pta[:,0] = self.pa[:,0]
phe[:,0] = self.pb[:,0]
mirror=[]
for i in range(1,lensi):
pam = self.pa[:,i].reshape(2,1)
pbm = self.pb[:,i].reshape(2,1)
if self.typ[i] == 2: # R
for m in mirror:
pam = geu.mirror(pam,pta[:,m],phe[:,m])
pbm = geu.mirror(pbm,pta[:,m],phe[:,m])
pta[:,i] = pam.reshape(2)
phe[:,i] = pbm.reshape(2)
mirror.append(i)
elif self.typ[i] == 3 : # T
for m in mirror:
pam = geu.mirror(pam,pta[:,m],phe[:,m])
pbm = geu.mirror(pbm,pta[:,m],phe[:,m])
pta[:,i] = pam.reshape(2)
phe[:,i] = pbm.reshape(2)
elif self.typ[i] == 1 : # D
pass
# TODO not implemented yet
return pta,phe
def evtx(self, L, tx, rx):
""" evaluate transmitter
Parameters
----------
L : Layout
tx : np.array (2xN)
rx : np.array (2xM)
DEPRECATED
"""
self.pa = tx.reshape(2, 1)
self.pb = tx.reshape(2, 1)
self.pc = tx.reshape(2, 1)
self.typ = np.array([0])
for k in self.seq:
if k > 0:
ta, he = L.Gs.neighbors(k)
norm1 = L.Gs.node[k]['norm']
norm = np.array([norm1[0], norm1[1]]).reshape(2, 1)
pa = np.array(L.Gs.pos[ta]).reshape(2, 1)
pb = np.array(L.Gs.pos[he]).reshape(2, 1)
pc = np.array(L.Gs.pos[k]).reshape(2, 1)
self.pa = np.hstack((self.pa, pa))
self.pb = np.hstack((self.pb, pb))
self.pc = np.hstack((self.pc, pc))
try:
self.norm = np.hstack((self.norm, norm))
except:
self.norm = norm
self.typ = np.hstack((self.typ, np.array([1])))
else:
pa = np.array(L.Gs.pos[k]).reshape(2, 1)
norm = np.array([0, 0]).reshape(2, 1)
self.pa = np.hstack((self.pa, pa))
self.pb = np.hstack((self.pb, pa))
self.pc = np.hstack((self.pc, pa))
try:
self.norm = np.hstack((self.norm, norm))
except:
self.norm = norm
self.typ = np.hstack((self.typ, np.array([3])))
self.pa = np.hstack((self.pa, rx.reshape(2, 1)))
self.pb = np.hstack((self.pb, rx.reshape(2, 1)))
self.pc = np.hstack((self.pc, rx.reshape(2, 1)))
self.typ = np.hstack((self.typ, np.array([0])))
#
# vecteur entre deux points adjascents de la signature
#
self.v = s.pc[:, 1:] - s.pc[:, :-1]
self.vn = self.v / np.sqrt(sum(self.v * self.v, axis=0))
u1 = sum(self.norm * self.vn[:, 0:-1], axis=0)
u2 = sum(self.norm * self.vn[:, 1:], axis=0)
self.typ = np.sign(u1 * u2)
#return(vn)
#return(typ)
def image(self, tx):
""" compute the tx's images with respect to the signature segments
Parameters
----------
tx : numpy.ndarray
Returns
-------
M : numpy.ndarray
"""
pa = self.pa
pb = self.pb
pab = pb - pa
alpha = np.sum(pab * pab, axis=0)
zalpha = np.where(alpha == 0.)
alpha[zalpha] = 1.
a = 1 - (2. / alpha) * (pa[1, :] - pb[1, :]) ** 2
b = (2. / alpha) * (pb[0, :] - pa[0, :]) * (pa[1, :] - pb[1, :])
c = (2. / alpha) * (pa[0, :] * (pa[1, :] - pb[1, :]) ** 2 +
pa[1, :] * (pa[1, :] - pb[1, :]) *
(pb[0, :] - pa[0, :]))
d = (2. / alpha) * (pa[1, :] * (pb[0, :] - pa[0, :]) ** 2 +
pa[0, :] * (pa[1, :] - pb[1, :]) *
(pb[0, :] - pa[0, :]))
typ = self.typ
# number of interactions
N = np.shape(pa)[1]
S = np.zeros((N, 2, 2))
S[:, 0, 0] = -a
S[:, 0, 1] = b
S[:, 1, 0] = b
S[:, 1, 1] = a
blocks = np.zeros((N - 1, 2, 2))
A = np.eye(N * 2)
# detect diffraction
usig = np.nonzero(typ[1:] == 1)[0]
if len(usig) > 0:
blocks[usig, :, :] = np.zeros((2, 2))
# detect transmission
tsig = np.nonzero(typ[1:] == 3)[0]
if len(tsig) > 0:
#blocks[tsig, :, :] = np.zeros((2, 2))
blocks[tsig, :, :] = -np.eye(2)
# detect reflexion
rsig = np.nonzero(typ[1:] == 2)[0]
if len(rsig) > 0:
blocks[rsig, :, :] = S[rsig + 1, :, :]
A = pyu.fill_block_diag(A, blocks, 2, -1)
y = np.zeros(2 * N)
if typ[0] == 2:
vc0 = np.array([c[0], d[0]])
v0 = np.dot(-S[0, :, :], tx) + vc0
if typ[0] == 3:
v0 = tx
if typ[0] == 1:
v0 = pa[:, 0]
y[0:2] = v0
for i in range(len(typ[1:])):
if typ[i + 1] == 2:
y[2 * (i + 1):2 * (i + 1) + 2] = np.array([c[i + 1], d[i + 1]])
if typ[i + 1] == 3:
#y[2 * (i + 1):2 * (i + 1) + 2] = y[2*i:2*i+2]
y[2 * (i + 1):2 * (i + 1) + 2] = np.array([0,0])
if typ[i + 1] == 1:
y[2 * (i + 1):2 * (i + 1) + 2] = pa[:, i + 1]
x = la.solve(A, y)
M = np.vstack((x[0::2], x[1::2]))
return M
def show(self,L,tx,rx,**kwargs):
"""
Parameters
----------
L : Layout
tx :
rx :
aw
"""
defaults = {'aw':True,
'axes':True,
'labels':False,
'fig':[],
'ax':[]
}
for k in defaults:
if k not in kwargs:
kwargs[k]=defaults[k]
if kwargs['fig']==[]:
fig = plt.gcf()
else:
fig = kwargs['fig']
if kwargs['ax']==[]:
fig = plt.gcf()
else:
ax = fig.gca()
self.ev(L)
fig,ax = L.showG('s',labels=kwargs['labels'],
aw=kwargs['aw'],
axes=kwargs['axes']
,fig=fig,ax=ax)
M = self.image(tx)
isvalid,Y,tup = self.backtrace(tx,rx,M)
l1 = ax.plot(tx[0],tx[1],'or')
l2 = ax.plot(rx[0],rx[1],'og')
l3 = ax.plot(M[0,:],M[1,:],'ob')
l4 = ax.plot(Y[0,:],Y[1,:],'ok')
ray = np.hstack((np.hstack((rx.reshape(2,1),Y)),tx.reshape(2,1)))
for k in self.seq:
ax.annotate(str(k),xy=(L.Gs.pos[k]),xytext=(L.Gs.pos[k]))
if isvalid:
l5 = ax.plot(ray[0,:],ray[1,:],color='green',alpha=0.6,linewidth=0.6)
else:
l5 = ax.plot(ray[0,:],ray[1,:],color='red',alpha=0.6,linewidth=0.6)
return fig,ax
def backtrace(self, tx, rx, M):
""" backtrace given image, tx, and rx
Parameters
----------
tx : ndarray (2x1)
transmitter
rx : ndarray (2x1)
receiver
M : ndarray (2xN)
N image points obtained using self.image method
Returns
-------
isvalid : bool
True if the backtrace ends successfully
Y : ndarray (2 x (N+2))
sequence of points corresponding to the seek ray
Examples
--------
.. plot::
:include-source:
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from pylayers.gis.layout import *
>>> from pylayers.antprop.signature import *
>>> L = Layout('defstr.ini')
>>> s = Signature(seq)
>>> tx = np.array([760,1113])
>>> rx = np.array([762,1114])
>>> s.ev(L)
>>> M = s.image(tx)
>>> isvalid,Y = s.backtrace(tx,rx,M)
>>> fig,ax = L.showG('s',labels=1,aw=1,axes=1)
>>> l1 = ax.plot(tx[0],tx[1],'or')
>>> l2 = ax.plot(rx[0],rx[1],'og')
>>> l3 = ax.plot(M[0,:],M[1,:],'ob')
>>> l4 = ax.plot(Y[0,:],Y[1,:],'xk')
>>> ray = np.hstack((np.hstack((tx.reshape(2,1),Y)),rx.reshape(2,1)))
>>> l5 = ax.plot(ray[0,:],ray[1,:],color='#999999',alpha=0.6,linewidth=0.6)
>>>
>>> plt.show()
Notes
-----
For mathematical details see :
@INPROCEEDINGS{6546704,
author={Laaraiedh, Mohamed and Amiot, Nicolas and Uguen, Bernard},
booktitle={Antennas and Propagation (EuCAP), 2013 7th European Conference on},
title={Efficient ray tracing tool for UWB propagation and
localization modeling},
year={2013},
pages={2307-2311},}
"""
#import ipdb
#pdb.set_trace()
#import pdb
pa = self.pa
pb = self.pb
typ = self.typ
N = np.shape(pa)[1]
I2 = np.eye(2)
z0 = np.zeros((2, 1))
pkm1 = rx.reshape(2, 1)
Y = pkm1
k = 0 # interaction counter
beta = .5 # to enter into the loop
isvalid = True # signature is asumed being valid by default
epsilon = 1e-12
# if tuple(self.seq) == ( 42, -277, 135, 21, 46, 319):
# import ipdb
# ipdb.set_trace()
# while (((beta <= 1) & (beta >= 0)) & (k < N)):
while (((beta <= 1-epsilon) & (beta >= epsilon)) & (k < N)):
#if int(typ[k]) != 1: # not a diffraction (surprisingly it works)
if int(typ[N-(k+1)]) != 1: # not a diffraction
# Formula (25) of paper Eucap 2013
l0 = np.hstack((I2, pkm1 - M[:, N - (k + 1)].reshape(2, 1), z0))
l1 = np.hstack((I2, z0,
pa[:, N - (k + 1)].reshape(2, 1) -
pb[:, N - (k + 1)].reshape(2, 1)
))
# print pkm1
# import ipdb
# ipdb.set_trace()
T = np.vstack((l0, l1))
yk = np.hstack((pkm1[:, 0].T, pa[:, N - (k + 1)].T))
deT = np.linalg.det(T)
if abs(deT) < 1e-15:
return(False,(k,None,None))
xk = la.solve(T, yk)
pkm1 = xk[0:2].reshape(2, 1)
gk = xk[2::]
alpha = gk[0]
beta = gk[1]
#print k,alpha,beta
Y = np.hstack((Y, pkm1))
else:
alpha = 0.5 # dummy necessary for the test below
# fixing #210
#Y = np.hstack((Y, pa[:, k].reshape((2, 1))))
#pkm1 = pa[:, k].reshape((2, 1))
Y = np.hstack((Y, pa[:, N-(k+1)].reshape((2, 1))))
pkm1 = pa[:, N-(k+1)].reshape((2, 1))
k = k + 1
if ((k == N) & ((beta > 0) & (beta < 1)) & ((alpha > 0) & (alpha < 1))):
Y = np.hstack((Y, tx.reshape(2, 1)))
return isvalid,Y,(k,alpha,beta)
else:
isvalid = False
return isvalid,Y,(k,alpha,beta)
def sig2ray(self, L, pTx, pRx):
""" convert a signature to a 2D ray
Parameters
----------
L : Layout
pTx : ndarray
2D transmitter position
pRx : ndarray
2D receiver position
Returns
-------
Y : ndarray (2x(N+2))
See Also
--------
Signature.image
Signature.backtrace
"""
# ev transforms a sequence of segment into numpy arrays (points)
# necessary for image calculation
self.ev(L)
# calculates images from pTx
M = self.image(pTx)
#print self
#if np.array_equal(self.seq,np.array([5,7,4])):
# pdb.set_trace()
isvalid,Y,u = self.backtrace(pTx, pRx, M)
#print isvalid,Y
#
# If incremental mode this function returns an alternative signature
# in case the signature do not yield a valid ray.
#
return isvalid,Y,u
if __name__ == "__main__":
plt.ion()
print "testing pylayers/antprop/signature.py"
doctest.testmod()
print "-------------------------------------"
|
lgpl-3.0
|
jstoxrocky/statsmodels
|
statsmodels/graphics/boxplots.py
|
30
|
16437
|
"""Variations on boxplots."""
# Author: Ralf Gommers
# Based on code by Flavio Coelho and Teemu Ikonen.
from statsmodels.compat.python import zip
import numpy as np
from scipy.stats import gaussian_kde
from . import utils
__all__ = ['violinplot', 'beanplot']
def violinplot(data, ax=None, labels=None, positions=None, side='both',
show_boxplot=True, plot_opts={}):
"""Make a violin plot of each dataset in the `data` sequence.
A violin plot is a boxplot combined with a kernel density estimate of the
probability density function per point.
Parameters
----------
data : sequence of ndarrays
Data arrays, one array per value in `positions`.
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
labels : list of str, optional
Tick labels for the horizontal axis. If not given, integers
``1..len(data)`` are used.
positions : array_like, optional
Position array, used as the horizontal axis of the plot. If not given,
spacing of the violins will be equidistant.
side : {'both', 'left', 'right'}, optional
How to plot the violin. Default is 'both'. The 'left', 'right'
options can be used to create asymmetric violin plots.
show_boxplot : bool, optional
Whether or not to show normal box plots on top of the violins.
Default is True.
plot_opts : dict, optional
A dictionary with plotting options. Any of the following can be
provided, if not present in `plot_opts` the defaults will be used::
- 'violin_fc', MPL color. Fill color for violins. Default is 'y'.
- 'violin_ec', MPL color. Edge color for violins. Default is 'k'.
- 'violin_lw', scalar. Edge linewidth for violins. Default is 1.
- 'violin_alpha', float. Transparancy of violins. Default is 0.5.
- 'cutoff', bool. If True, limit violin range to data range.
Default is False.
- 'cutoff_val', scalar. Where to cut off violins if `cutoff` is
True. Default is 1.5 standard deviations.
- 'cutoff_type', {'std', 'abs'}. Whether cutoff value is absolute,
or in standard deviations. Default is 'std'.
- 'violin_width' : float. Relative width of violins. Max available
space is 1, default is 0.8.
- 'label_fontsize', MPL fontsize. Adjusts fontsize only if given.
- 'label_rotation', scalar. Adjusts label rotation only if given.
Specify in degrees.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
See Also
--------
beanplot : Bean plot, builds on `violinplot`.
matplotlib.pyplot.boxplot : Standard boxplot.
Notes
-----
The appearance of violins can be customized with `plot_opts`. If
customization of boxplot elements is required, set `show_boxplot` to False
and plot it on top of the violins by calling the Matplotlib `boxplot`
function directly. For example::
violinplot(data, ax=ax, show_boxplot=False)
ax.boxplot(data, sym='cv', whis=2.5)
It can happen that the axis labels or tick labels fall outside the plot
area, especially with rotated labels on the horizontal axis. With
Matplotlib 1.1 or higher, this can easily be fixed by calling
``ax.tight_layout()``. With older Matplotlib one has to use ``plt.rc`` or
``plt.rcParams`` to fix this, for example::
plt.rc('figure.subplot', bottom=0.25)
violinplot(data, ax=ax)
References
----------
J.L. Hintze and R.D. Nelson, "Violin Plots: A Box Plot-Density Trace
Synergism", The American Statistician, Vol. 52, pp.181-84, 1998.
Examples
--------
We use the American National Election Survey 1996 dataset, which has Party
Identification of respondents as independent variable and (among other
data) age as dependent variable.
>>> data = sm.datasets.anes96.load_pandas()
>>> party_ID = np.arange(7)
>>> labels = ["Strong Democrat", "Weak Democrat", "Independent-Democrat",
... "Independent-Indpendent", "Independent-Republican",
... "Weak Republican", "Strong Republican"]
Group age by party ID, and create a violin plot with it:
>>> plt.rcParams['figure.subplot.bottom'] = 0.23 # keep labels visible
>>> age = [data.exog['age'][data.endog == id] for id in party_ID]
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> sm.graphics.violinplot(age, ax=ax, labels=labels,
... plot_opts={'cutoff_val':5, 'cutoff_type':'abs',
... 'label_fontsize':'small',
... 'label_rotation':30})
>>> ax.set_xlabel("Party identification of respondent.")
>>> ax.set_ylabel("Age")
>>> plt.show()
.. plot:: plots/graphics_boxplot_violinplot.py
"""
fig, ax = utils.create_mpl_ax(ax)
if positions is None:
positions = np.arange(len(data)) + 1
# Determine available horizontal space for each individual violin.
pos_span = np.max(positions) - np.min(positions)
width = np.min([0.15 * np.max([pos_span, 1.]),
plot_opts.get('violin_width', 0.8) / 2.])
# Plot violins.
for pos_data, pos in zip(data, positions):
xvals, violin = _single_violin(ax, pos, pos_data, width, side,
plot_opts)
if show_boxplot:
ax.boxplot(data, notch=1, positions=positions, vert=1)
# Set ticks and tick labels of horizontal axis.
_set_ticks_labels(ax, data, labels, positions, plot_opts)
return fig
def _single_violin(ax, pos, pos_data, width, side, plot_opts):
""""""
def _violin_range(pos_data, plot_opts):
"""Return array with correct range, with which violins can be plotted."""
cutoff = plot_opts.get('cutoff', False)
cutoff_type = plot_opts.get('cutoff_type', 'std')
cutoff_val = plot_opts.get('cutoff_val', 1.5)
s = 0.0
if not cutoff:
if cutoff_type == 'std':
s = cutoff_val * np.std(pos_data)
else:
s = cutoff_val
x_lower = kde.dataset.min() - s
x_upper = kde.dataset.max() + s
return np.linspace(x_lower, x_upper, 100)
pos_data = np.asarray(pos_data)
# Kernel density estimate for data at this position.
kde = gaussian_kde(pos_data)
# Create violin for pos, scaled to the available space.
xvals = _violin_range(pos_data, plot_opts)
violin = kde.evaluate(xvals)
violin = width * violin / violin.max()
if side == 'both':
envelope_l, envelope_r = (-violin + pos, violin + pos)
elif side == 'right':
envelope_l, envelope_r = (pos, violin + pos)
elif side == 'left':
envelope_l, envelope_r = (-violin + pos, pos)
else:
msg = "`side` parameter should be one of {'left', 'right', 'both'}."
raise ValueError(msg)
# Draw the violin.
ax.fill_betweenx(xvals, envelope_l, envelope_r,
facecolor=plot_opts.get('violin_fc', '#66c2a5'),
edgecolor=plot_opts.get('violin_ec', 'k'),
lw=plot_opts.get('violin_lw', 1),
alpha=plot_opts.get('violin_alpha', 0.5))
return xvals, violin
def _set_ticks_labels(ax, data, labels, positions, plot_opts):
"""Set ticks and labels on horizontal axis."""
# Set xticks and limits.
ax.set_xlim([np.min(positions) - 0.5, np.max(positions) + 0.5])
ax.set_xticks(positions)
label_fontsize = plot_opts.get('label_fontsize')
label_rotation = plot_opts.get('label_rotation')
if label_fontsize or label_rotation:
from matplotlib.artist import setp
if labels is not None:
if not len(labels) == len(data):
msg = "Length of `labels` should equal length of `data`."
raise ValueError(msg)
xticknames = ax.set_xticklabels(labels)
if label_fontsize:
setp(xticknames, fontsize=label_fontsize)
if label_rotation:
setp(xticknames, rotation=label_rotation)
return
def beanplot(data, ax=None, labels=None, positions=None, side='both',
jitter=False, plot_opts={}):
"""Make a bean plot of each dataset in the `data` sequence.
A bean plot is a combination of a `violinplot` (kernel density estimate of
the probability density function per point) with a line-scatter plot of all
individual data points.
Parameters
----------
data : sequence of ndarrays
Data arrays, one array per value in `positions`.
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
labels : list of str, optional
Tick labels for the horizontal axis. If not given, integers
``1..len(data)`` are used.
positions : array_like, optional
Position array, used as the horizontal axis of the plot. If not given,
spacing of the violins will be equidistant.
side : {'both', 'left', 'right'}, optional
How to plot the violin. Default is 'both'. The 'left', 'right'
options can be used to create asymmetric violin plots.
jitter : bool, optional
If True, jitter markers within violin instead of plotting regular lines
around the center. This can be useful if the data is very dense.
plot_opts : dict, optional
A dictionary with plotting options. All the options for `violinplot`
can be specified, they will simply be passed to `violinplot`. Options
specific to `beanplot` are:
- 'violin_width' : float. Relative width of violins. Max available
space is 1, default is 0.8.
- 'bean_color', MPL color. Color of bean plot lines. Default is 'k'.
Also used for jitter marker edge color if `jitter` is True.
- 'bean_size', scalar. Line length as a fraction of maximum length.
Default is 0.5.
- 'bean_lw', scalar. Linewidth, default is 0.5.
- 'bean_show_mean', bool. If True (default), show mean as a line.
- 'bean_show_median', bool. If True (default), show median as a
marker.
- 'bean_mean_color', MPL color. Color of mean line. Default is 'b'.
- 'bean_mean_lw', scalar. Linewidth of mean line, default is 2.
- 'bean_mean_size', scalar. Line length as a fraction of maximum length.
Default is 0.5.
- 'bean_median_color', MPL color. Color of median marker. Default
is 'r'.
- 'bean_median_marker', MPL marker. Marker type, default is '+'.
- 'jitter_marker', MPL marker. Marker type for ``jitter=True``.
Default is 'o'.
- 'jitter_marker_size', int. Marker size. Default is 4.
- 'jitter_fc', MPL color. Jitter marker face color. Default is None.
- 'bean_legend_text', str. If given, add a legend with given text.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
See Also
--------
violinplot : Violin plot, also used internally in `beanplot`.
matplotlib.pyplot.boxplot : Standard boxplot.
References
----------
P. Kampstra, "Beanplot: A Boxplot Alternative for Visual Comparison of
Distributions", J. Stat. Soft., Vol. 28, pp. 1-9, 2008.
Examples
--------
We use the American National Election Survey 1996 dataset, which has Party
Identification of respondents as independent variable and (among other
data) age as dependent variable.
>>> data = sm.datasets.anes96.load_pandas()
>>> party_ID = np.arange(7)
>>> labels = ["Strong Democrat", "Weak Democrat", "Independent-Democrat",
... "Independent-Indpendent", "Independent-Republican",
... "Weak Republican", "Strong Republican"]
Group age by party ID, and create a violin plot with it:
>>> plt.rcParams['figure.subplot.bottom'] = 0.23 # keep labels visible
>>> age = [data.exog['age'][data.endog == id] for id in party_ID]
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> sm.graphics.beanplot(age, ax=ax, labels=labels,
... plot_opts={'cutoff_val':5, 'cutoff_type':'abs',
... 'label_fontsize':'small',
... 'label_rotation':30})
>>> ax.set_xlabel("Party identification of respondent.")
>>> ax.set_ylabel("Age")
>>> plt.show()
.. plot:: plots/graphics_boxplot_beanplot.py
"""
fig, ax = utils.create_mpl_ax(ax)
if positions is None:
positions = np.arange(len(data)) + 1
# Determine available horizontal space for each individual violin.
pos_span = np.max(positions) - np.min(positions)
violin_width = np.min([0.15 * np.max([pos_span, 1.]),
plot_opts.get('violin_width', 0.8) / 2.])
bean_width = np.min([0.15 * np.max([pos_span, 1.]),
plot_opts.get('bean_size', 0.5) / 2.])
bean_mean_width = np.min([0.15 * np.max([pos_span, 1.]),
plot_opts.get('bean_mean_size', 0.5) / 2.])
legend_txt = plot_opts.get('bean_legend_text', None)
for pos_data, pos in zip(data, positions):
# Draw violins.
xvals, violin = _single_violin(ax, pos, pos_data, violin_width, side, plot_opts)
if jitter:
# Draw data points at random coordinates within violin envelope.
jitter_coord = pos + _jitter_envelope(pos_data, xvals, violin, side)
ax.plot(jitter_coord, pos_data, ls='',
marker=plot_opts.get('jitter_marker', 'o'),
ms=plot_opts.get('jitter_marker_size', 4),
mec=plot_opts.get('bean_color', 'k'),
mew=1, mfc=plot_opts.get('jitter_fc', 'none'),
label=legend_txt)
else:
# Draw bean lines.
ax.hlines(pos_data, pos - bean_width, pos + bean_width,
lw=plot_opts.get('bean_lw', 0.5),
color=plot_opts.get('bean_color', 'k'),
label=legend_txt)
# Show legend if required.
if legend_txt is not None:
_show_legend(ax)
legend_txt = None # ensure we get one entry per call to beanplot
# Draw mean line.
if plot_opts.get('bean_show_mean', True):
ax.hlines(np.mean(pos_data), pos - bean_mean_width, pos + bean_mean_width,
lw=plot_opts.get('bean_mean_lw', 2.),
color=plot_opts.get('bean_mean_color', 'b'))
# Draw median marker.
if plot_opts.get('bean_show_median', True):
ax.plot(pos, np.median(pos_data),
marker=plot_opts.get('bean_median_marker', '+'),
color=plot_opts.get('bean_median_color', 'r'))
# Set ticks and tick labels of horizontal axis.
_set_ticks_labels(ax, data, labels, positions, plot_opts)
return fig
def _jitter_envelope(pos_data, xvals, violin, side):
"""Determine envelope for jitter markers."""
if side == 'both':
low, high = (-1., 1.)
elif side == 'right':
low, high = (0, 1.)
elif side == 'left':
low, high = (-1., 0)
else:
raise ValueError("`side` input incorrect: %s" % side)
jitter_envelope = np.interp(pos_data, xvals, violin)
jitter_coord = jitter_envelope * np.random.uniform(low=low, high=high,
size=pos_data.size)
return jitter_coord
def _show_legend(ax):
"""Utility function to show legend."""
leg = ax.legend(loc=1, shadow=True, fancybox=True, labelspacing=0.2,
borderpad=0.15)
ltext = leg.get_texts()
llines = leg.get_lines()
frame = leg.get_frame()
from matplotlib.artist import setp
setp(ltext, fontsize='small')
setp(llines, linewidth=1)
|
bsd-3-clause
|
CamDavidsonPilon/lifelines
|
lifelines/fitters/npmle.py
|
1
|
10143
|
# -*- coding: utf-8 -*-
"""
This code isn't to be called directly, but is the core logic of the KaplanMeierFitter.fit_interval_censoring
References
https://upcommons.upc.edu/bitstream/handle/2117/93831/01Rop01de01.pdf
https://docs.ufpr.br/~giolo/CE063/Artigos/A4_Gomes%20et%20al%202009.pdf
"""
from collections import defaultdict, namedtuple
import warnings
import numpy as np
from numpy.linalg import norm
import pandas as pd
from lifelines.exceptions import ConvergenceWarning
from typing import *
interval = namedtuple("Interval", ["left", "right"])
class min_max:
"""
Keep only the min/max of streaming values
"""
def __init__(self):
self.min = np.inf
self.max = -np.inf
def add(self, value: float):
if value > self.max:
self.max = value
if value < self.min:
self.min = value
def __iter__(self):
yield self.min
yield self.max
def temper(i: int, optimize) -> float:
if optimize:
return 0.9 * (2 * np.arctan(i / 100) / np.pi) + 1
else:
return 1.0
def E_step_M_step(observation_intervals, p_old, turnbull_interval_lookup, weights, i, optimize) -> np.ndarray:
"""
See [1], but also modifications.
References
-----------
1. Clifford Anderson-Bergman (2016): An efficient implementation of the
EMICM algorithm for the interval censored NPMLE, Journal of Computational and Graphical
Statistics, DOI: 10.1080/10618600.2016.1208616
"""
N = 0
m = np.zeros_like(p_old)
P = cumulative_sum(p_old)
for observation_interval, w in zip(observation_intervals, weights):
# find all turnbull intervals, t, that are contained in (ol, or). Call this set T
# the denominator is sum of p_old[T] probabilities
# the numerator is p_old[t]
min_, max_ = turnbull_interval_lookup[observation_interval]
m[min_ : max_ + 1] += w / (P[max_ + 1] - P[min_]).sum()
N += w
p_new = p_old * (m / N) ** temper(i, optimize)
p_new /= p_new.sum()
return p_new
def cumulative_sum(p: np.ndarray) -> np.ndarray:
# return np.insert(p, 0, 0).cumsum()
return np.concatenate((np.zeros(1), p)).cumsum()
def create_turnbull_intervals(left, right) -> List[interval]:
"""
obs are []
turnbulls are []
"""
left = [[l, "l"] for l in left]
right = [[r, "r"] for r in right]
union = sorted(left + right)
intervals = []
for e1, e2 in zip(union, union[1:]):
if e1[1] == "l" and e2[1] == "r":
intervals.append(interval(e1[0], e2[0]))
return intervals
def is_subset(query_interval: interval, super_interval: interval) -> bool:
"""
assumes query_interval is [], and super_interval is (]
"""
return super_interval.left <= query_interval.left and query_interval.right <= super_interval.right
def create_turnbull_lookup(
turnbull_intervals: List[interval], observation_intervals: List[interval]
) -> Dict[interval, List[interval]]:
turnbull_lookup = defaultdict(min_max)
for i, turnbull_interval in enumerate(turnbull_intervals):
# ask: which observations is this t_interval part of?
for observation_interval in observation_intervals:
# since left and right are sorted by left, we can stop after left > turnbull_interval[1] value
if observation_interval.left > turnbull_interval.right:
break
if is_subset(turnbull_interval, observation_interval):
turnbull_lookup[observation_interval].add(i)
return {o: list(s) for o, s in turnbull_lookup.items()}
def check_convergence(
p_new: np.ndarray,
p_old: np.ndarray,
turnbull_lookup: Dict[interval, List[interval]],
weights: np.ndarray,
tol: float,
i: int,
verbose=False,
) -> bool:
old_ll = log_likelihood(p_old, turnbull_lookup, weights)
new_ll = log_likelihood(p_new, turnbull_lookup, weights)
delta = new_ll - old_ll
if verbose:
print("Iteration %d " % i)
print(" delta log-likelihood: %.10f" % delta)
print(" log-like: %.6f" % log_likelihood(p_new, turnbull_lookup, weights))
if (delta < tol) and (delta >= 0):
return True
return False
def create_observation_intervals(obs) -> List[interval]:
return [interval(l, r) for l, r in obs]
def log_odds(p: np.ndarray) -> np.ndarray:
return np.log(p) - np.log(1 - p)
def probs(log_odds: np.ndarray) -> np.ndarray:
o = np.exp(log_odds)
return o / (o + 1)
def npmle(left, right, tol=1e-7, weights=None, verbose=False, max_iter=1e5, optimize=False, fit_method="em"):
"""
left and right are closed intervals.
TODO: extend this to open-closed intervals.
"""
left, right = np.asarray(left), np.asarray(right)
if weights is None:
weights = np.ones_like(left)
# perform a group by to get unique observations and weights
df_ = pd.DataFrame({"l": left, "r": right, "w": weights}).groupby(["l", "r"]).sum()
weights = df_["w"].values
unique_obs = df_.index.values
# create objects needed
turnbull_intervals = create_turnbull_intervals(left, right)
observation_intervals = create_observation_intervals(unique_obs)
turnbull_lookup = create_turnbull_lookup(turnbull_intervals, observation_intervals)
if fit_method == "em":
p = expectation_maximization_fit(
observation_intervals, turnbull_intervals, turnbull_lookup, weights, tol, max_iter, optimize, verbose
)
elif fit_method == "scipy":
p = scipy_minimize_fit(turnbull_lookup, turnbull_intervals, weights, tol, verbose)
return p, turnbull_intervals
def scipy_minimize_fit(turnbull_interval_lookup, turnbull_intervals, weights, tol, verbose):
import autograd.numpy as anp
from autograd import value_and_grad
from scipy.optimize import minimize
def cumulative_sum(p):
return anp.concatenate((anp.zeros(1), p)).cumsum()
def negative_log_likelihood(p, turnbull_interval_lookup, weights):
P = cumulative_sum(p)
ix = anp.array(list(turnbull_interval_lookup.values()))
return -(weights * anp.log(P[ix[:, 1] + 1] - P[ix[:, 0]])).sum()
def con(p):
return p.sum() - 1
# initialize to equal weight
T = len(turnbull_intervals)
p = 1 / T * np.ones(T)
cons = {"type": "eq", "fun": con}
results = minimize(
value_and_grad(negative_log_likelihood),
args=(turnbull_interval_lookup, weights),
x0=p,
bounds=[(0, 1)] * T,
jac=True,
constraints=cons,
tol=tol,
options={"disp": verbose},
)
return results.x
def expectation_maximization_fit(
observation_intervals, turnbull_intervals, turnbull_lookup, weights, tol, max_iter, optimize, verbose
):
# convergence init
converged = False
i = 0
# initialize to equal weight
T = len(turnbull_intervals)
p = 1 / T * np.ones(T)
while (not converged) and (i < max_iter):
new_p = E_step_M_step(observation_intervals, p, turnbull_lookup, weights, i, optimize)
converged = check_convergence(new_p, p, turnbull_lookup, weights, tol, i, verbose=verbose)
# find alpha that maximizes ll using a line search
best_p, best_ll = p, -np.inf
delta = log_odds(new_p) - log_odds(p)
for alpha in np.array([1.0, 1.25, 1.95]):
p_temp = probs(log_odds(p) + alpha * delta)
ll_temp = log_likelihood(p_temp, turnbull_lookup, weights)
if best_ll < ll_temp:
best_ll = ll_temp
best_p = p_temp
p = best_p
i += 1
if i >= max_iter:
warnings.warn("Exceeded max iterations.", ConvergenceWarning)
return p
def log_likelihood(p: np.ndarray, turnbull_interval_lookup, weights) -> float:
P = cumulative_sum(p)
ix = np.array(list(turnbull_interval_lookup.values()))
return (weights * np.log(P[ix[:, 1] + 1] - P[ix[:, 0]])).sum()
def reconstruct_survival_function(
probabilities: np.ndarray, turnbull_intervals: List[interval], timeline=None, label="NPMLE"
) -> pd.DataFrame:
if timeline is None:
timeline = []
index = np.unique(np.concatenate((turnbull_intervals, [(0, 0)])))
label_upper = label + "_upper"
label_lower = label + "_lower"
df = pd.DataFrame([], index=index, columns=[label_upper, label_lower])
running_sum = 1.0
# the below values may be overwritten later, but we
# always default to starting at point (0, 1)
df.loc[0, label_upper] = running_sum
df.loc[0, label_lower] = running_sum
for p, (left, right) in zip(probabilities, turnbull_intervals):
df.loc[left, label_upper] = running_sum
df.loc[left, label_lower] = running_sum
if left != right:
df.loc[right, label_upper] = running_sum
df.loc[right, label_lower] = running_sum - p
running_sum -= p
full_dataframe = pd.DataFrame(index=timeline, columns=df.columns)
# First backfill at events between known observations
# Second fill all events _outside_ known obs with running_sum
return full_dataframe.combine_first(df).bfill().fillna(running_sum).clip(lower=0.0)
def npmle_compute_confidence_intervals(left, right, mle_, alpha=0.05, samples=1000):
"""
uses basic bootstrap
"""
left, right = np.asarray(left, dtype=float), np.asarray(right, dtype=float)
all_times = np.unique(np.concatenate((left, right, [0])))
N = left.shape[0]
bootstrapped_samples = np.empty((all_times.shape[0], samples))
for i in range(samples):
ix = np.random.randint(low=0, high=N, size=N)
left_ = left[ix]
right_ = right[ix]
bootstrapped_samples[:, i] = reconstruct_survival_function(*npmle(left_, right_), all_times).values[:, 0]
return (
2 * mle_.squeeze() - pd.Series(np.percentile(bootstrapped_samples, (alpha / 2) * 100, axis=1), index=all_times),
2 * mle_.squeeze() - pd.Series(np.percentile(bootstrapped_samples, (1 - alpha / 2) * 100, axis=1), index=all_times),
)
|
mit
|
ndingwall/scikit-learn
|
examples/miscellaneous/plot_isotonic_regression.py
|
17
|
2629
|
"""
===================
Isotonic Regression
===================
An illustration of the isotonic regression on generated data (non-linear
monotonic trend with homoscedastic uniform noise).
The isotonic regression algorithm finds a non-decreasing approximation of a
function while minimizing the mean squared error on the training data. The
benefit of such a non-parametric model is that it does not assume any shape for
the target function besides monotonicity. For comparison a linear regression is
also presented.
The plot on the right-hand side shows the model prediction function that
results from the linear interpolation of thresholds points. The thresholds
points are a subset of the training input observations and their matching
target values are computed by the isotonic non-parametric fit.
"""
print(__doc__)
# Author: Nelle Varoquaux <nelle.varoquaux@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from sklearn.linear_model import LinearRegression
from sklearn.isotonic import IsotonicRegression
from sklearn.utils import check_random_state
n = 100
x = np.arange(n)
rs = check_random_state(0)
y = rs.randint(-50, 50, size=(n,)) + 50. * np.log1p(np.arange(n))
# %%
# Fit IsotonicRegression and LinearRegression models:
ir = IsotonicRegression(out_of_bounds="clip")
y_ = ir.fit_transform(x, y)
lr = LinearRegression()
lr.fit(x[:, np.newaxis], y) # x needs to be 2d for LinearRegression
# %%
# Plot results:
segments = [[[i, y[i]], [i, y_[i]]] for i in range(n)]
lc = LineCollection(segments, zorder=0)
lc.set_array(np.ones(len(y)))
lc.set_linewidths(np.full(n, 0.5))
fig, (ax0, ax1) = plt.subplots(ncols=2, figsize=(12, 6))
ax0.plot(x, y, 'C0.', markersize=12)
ax0.plot(x, y_, 'C1.-', markersize=12)
ax0.plot(x, lr.predict(x[:, np.newaxis]), 'C2-')
ax0.add_collection(lc)
ax0.legend(('Training data', 'Isotonic fit', 'Linear fit'), loc='lower right')
ax0.set_title('Isotonic regression fit on noisy data (n=%d)' % n)
x_test = np.linspace(-10, 110, 1000)
ax1.plot(x_test, ir.predict(x_test), 'C1-')
ax1.plot(ir.X_thresholds_, ir.y_thresholds_, 'C1.', markersize=12)
ax1.set_title("Prediction function (%d thresholds)" % len(ir.X_thresholds_))
plt.show()
# %%
# Note that we explicitly passed `out_of_bounds="clip"` to the constructor of
# `IsotonicRegression` to control the way the model extrapolates outside of the
# range of data observed in the training set. This "clipping" extrapolation can
# be seen on the plot of the decision function on the right-hand.
|
bsd-3-clause
|
boada/wmh
|
mkTournament.py
|
1
|
5496
|
import pandas as pd
from elo import pwin, new_elo
import numpy as np
from optparse import OptionParser
def init_tounament(df, initFrom):
''' This function initializes the elo scores using the ending score from
the previous year's tournament. If the player didn't play in the previous
year then they get the default value of 1500.
@type df: pandas.DataFrame
@param df: Current tournament dataframe
@type initFrom: into
@param initFrom: Previous year to use for the initialization
@rtype: pandas.DatFrame
@return: The initialized dataframe for the current tournament.
'''
old_df = pd.read_json('wtc{}_results_elo.json'.format(initFrom))
# now get a sorted list of unique players
players = old_df.player.unique()
# get the last rounds elo rating
rounds = old_df['round'].max()
# get the mean elo from the previous tournament
# we'll correct by this going into the new tournament
elo_mean = old_df.elo_new.loc[(old_df['round'] == rounds)].mean()
# move the ratings into the init spot of the new tournament
for p in players:
elo = old_df.elo_new.loc[(old_df.player == p) &
(old_df['round'] == rounds)].values[0]
# correct this elo rating for the new tournament
# move toward the mean by 1/3 of the difference
# this is taken from fivethirtyeight
elo_c = elo - (elo - elo_mean) / 3
df.loc[(df.player == p) & (df['round'] == 1), 'elo_current'] = elo_c
return df
def score_tournament(df):
''' Does all of the elo scoring for the entire tournament. Handles all of
the updating of the dataframe. The final elo standings for the players are
defined as the 'elo_current' for the final round. It is this value that
will be used to initialize a future year's tournament if that is desired.
@type df: pandas.DataFrame
@param df: The current tournament for which we are running the scoring.
@rtype: pandas.DataFrame
@return: The scored dataframe for the current tournament.
'''
for i in np.sort(df.match.unique()):
match = df.loc[df.match == i]
player1 = df.loc[df.player == match.player.iloc[0]]
player2 = df.loc[df.player == match.player.iloc[1]]
# get the previous round number
round_number = match['round'].iloc[0]
if round_number == 1:
pround_number = 1
else:
pround_number = round_number - 1
p1_pwin = pwin(player1.loc[player1['round'] == pround_number,
'elo_current'].values[0],
player2.loc[player2['round'] == pround_number,
'elo_current'].values[0])
p2_pwin = pwin(player2.loc[player2['round'] == pround_number,
'elo_current'].values[0],
player1.loc[player1['round'] == pround_number,
'elo_current'].values[0])
if round_number == 1:
p1_elo_old = player1.loc[player1['round'] == pround_number,
'elo_current'].values[0]
p1_elo_new = new_elo(p1_elo_old, p1_pwin,
player1.loc[player1['round'] == round_number,
'win'].values[0])
p2_elo_old = player2.loc[player2['round'] == pround_number,
'elo_current'].values[0]
p2_elo_new = new_elo(p2_elo_old, p2_pwin,
player2.loc[player2['round'] == round_number,
'win'].values[0])
else:
p1_elo_old = player1.loc[player1['round'] == pround_number,
'elo_new'].values[0]
p1_elo_new = new_elo(p1_elo_old, p1_pwin,
player1.loc[player1['round'] == round_number,
'win'].values[0])
p2_elo_old = player2.loc[player2['round'] == pround_number,
'elo_new'].values[0]
p2_elo_new = new_elo(p2_elo_old, p2_pwin,
player2.loc[player2['round'] == round_number,
'win'].values[0])
df.loc[df.match == i, 'elo_current'] = [p1_elo_old, p2_elo_old]
df.loc[df.match == i, 'pwin'] = [p1_pwin, p2_pwin]
df.loc[df.match == i, 'elo_new'] = [p1_elo_new, p2_elo_new]
return df
if __name__ == "__main__":
# Read in the command line options
USAGE = '''usage:\t %prog <year> [options]
i.e.: %prog 2013'''
parser = OptionParser(usage=USAGE)
parser.add_option("--init-from",
action="store",
dest="initFrom",
default=0,
help='Previous year from which to initialize the new '
'tournament')
(opt, args) = parser.parse_args()
# read the data from the desired year
df = pd.read_json('wtc_data/wtc{}_results.json'.format(args[0]))
# create the columns and fill with initial data
df['pwin'] = pd.Series(np.ones(len(df)))
df['elo_current'] = pd.Series(1500 * np.ones(len(df)))
df['elo_new'] = pd.Series(1500 * np.ones(len(df)))
if opt.initFrom:
df = init_tounament(df, opt.initFrom)
df = score_tournament(df)
|
mit
|
CVML/scikit-learn
|
examples/gaussian_process/gp_diabetes_dataset.py
|
223
|
1976
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
========================================================================
Gaussian Processes regression: goodness-of-fit on the 'diabetes' dataset
========================================================================
In this example, we fit a Gaussian Process model onto the diabetes
dataset.
We determine the correlation parameters with maximum likelihood
estimation (MLE). We use an anisotropic squared exponential
correlation model with a constant regression model. We also use a
nugget of 1e-2 to account for the (strong) noise in the targets.
We compute a cross-validation estimate of the coefficient of
determination (R2) without reperforming MLE, using the set of correlation
parameters found on the whole dataset.
"""
print(__doc__)
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# Licence: BSD 3 clause
from sklearn import datasets
from sklearn.gaussian_process import GaussianProcess
from sklearn.cross_validation import cross_val_score, KFold
# Load the dataset from scikit's data sets
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# Instanciate a GP model
gp = GaussianProcess(regr='constant', corr='absolute_exponential',
theta0=[1e-4] * 10, thetaL=[1e-12] * 10,
thetaU=[1e-2] * 10, nugget=1e-2, optimizer='Welch')
# Fit the GP model to the data performing maximum likelihood estimation
gp.fit(X, y)
# Deactivate maximum likelihood estimation for the cross-validation loop
gp.theta0 = gp.theta_ # Given correlation parameter = MLE
gp.thetaL, gp.thetaU = None, None # None bounds deactivate MLE
# Perform a cross-validation estimate of the coefficient of determination using
# the cross_validation module using all CPUs available on the machine
K = 20 # folds
R2 = cross_val_score(gp, X, y=y, cv=KFold(y.size, K), n_jobs=1).mean()
print("The %d-Folds estimate of the coefficient of determination is R2 = %s"
% (K, R2))
|
bsd-3-clause
|
Djabbz/scikit-learn
|
examples/hetero_feature_union.py
|
288
|
6236
|
"""
=============================================
Feature Union with Heterogeneous Data Sources
=============================================
Datasets can often contain components of that require different feature
extraction and processing pipelines. This scenario might occur when:
1. Your dataset consists of heterogeneous data types (e.g. raster images and
text captions)
2. Your dataset is stored in a Pandas DataFrame and different columns
require different processing pipelines.
This example demonstrates how to use
:class:`sklearn.feature_extraction.FeatureUnion` on a dataset containing
different types of features. We use the 20-newsgroups dataset and compute
standard bag-of-words features for the subject line and body in separate
pipelines as well as ad hoc features on the body. We combine them (with
weights) using a FeatureUnion and finally train a classifier on the combined
set of features.
The choice of features is not particularly helpful, but serves to illustrate
the technique.
"""
# Author: Matt Terry <matt.terry@gmail.com>
#
# License: BSD 3 clause
from __future__ import print_function
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.datasets import fetch_20newsgroups
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_footer
from sklearn.datasets.twenty_newsgroups import strip_newsgroup_quoting
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import classification_report
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Pipeline
from sklearn.svm import SVC
class ItemSelector(BaseEstimator, TransformerMixin):
"""For data grouped by feature, select subset of data at a provided key.
The data is expected to be stored in a 2D data structure, where the first
index is over features and the second is over samples. i.e.
>> len(data[key]) == n_samples
Please note that this is the opposite convention to sklearn feature
matrixes (where the first index corresponds to sample).
ItemSelector only requires that the collection implement getitem
(data[key]). Examples include: a dict of lists, 2D numpy array, Pandas
DataFrame, numpy record array, etc.
>> data = {'a': [1, 5, 2, 5, 2, 8],
'b': [9, 4, 1, 4, 1, 3]}
>> ds = ItemSelector(key='a')
>> data['a'] == ds.transform(data)
ItemSelector is not designed to handle data grouped by sample. (e.g. a
list of dicts). If your data is structured this way, consider a
transformer along the lines of `sklearn.feature_extraction.DictVectorizer`.
Parameters
----------
key : hashable, required
The key corresponding to the desired value in a mappable.
"""
def __init__(self, key):
self.key = key
def fit(self, x, y=None):
return self
def transform(self, data_dict):
return data_dict[self.key]
class TextStats(BaseEstimator, TransformerMixin):
"""Extract features from each document for DictVectorizer"""
def fit(self, x, y=None):
return self
def transform(self, posts):
return [{'length': len(text),
'num_sentences': text.count('.')}
for text in posts]
class SubjectBodyExtractor(BaseEstimator, TransformerMixin):
"""Extract the subject & body from a usenet post in a single pass.
Takes a sequence of strings and produces a dict of sequences. Keys are
`subject` and `body`.
"""
def fit(self, x, y=None):
return self
def transform(self, posts):
features = np.recarray(shape=(len(posts),),
dtype=[('subject', object), ('body', object)])
for i, text in enumerate(posts):
headers, _, bod = text.partition('\n\n')
bod = strip_newsgroup_footer(bod)
bod = strip_newsgroup_quoting(bod)
features['body'][i] = bod
prefix = 'Subject:'
sub = ''
for line in headers.split('\n'):
if line.startswith(prefix):
sub = line[len(prefix):]
break
features['subject'][i] = sub
return features
pipeline = Pipeline([
# Extract the subject & body
('subjectbody', SubjectBodyExtractor()),
# Use FeatureUnion to combine the features from subject and body
('union', FeatureUnion(
transformer_list=[
# Pipeline for pulling features from the post's subject line
('subject', Pipeline([
('selector', ItemSelector(key='subject')),
('tfidf', TfidfVectorizer(min_df=50)),
])),
# Pipeline for standard bag-of-words model for body
('body_bow', Pipeline([
('selector', ItemSelector(key='body')),
('tfidf', TfidfVectorizer()),
('best', TruncatedSVD(n_components=50)),
])),
# Pipeline for pulling ad hoc features from post's body
('body_stats', Pipeline([
('selector', ItemSelector(key='body')),
('stats', TextStats()), # returns a list of dicts
('vect', DictVectorizer()), # list of dicts -> feature matrix
])),
],
# weight components in FeatureUnion
transformer_weights={
'subject': 0.8,
'body_bow': 0.5,
'body_stats': 1.0,
},
)),
# Use a SVC classifier on the combined features
('svc', SVC(kernel='linear')),
])
# limit the list of categories to make running this exmaple faster.
categories = ['alt.atheism', 'talk.religion.misc']
train = fetch_20newsgroups(random_state=1,
subset='train',
categories=categories,
)
test = fetch_20newsgroups(random_state=1,
subset='test',
categories=categories,
)
pipeline.fit(train.data, train.target)
y = pipeline.predict(test.data)
print(classification_report(y, test.target))
|
bsd-3-clause
|
466152112/scikit-learn
|
sklearn/neighbors/tests/test_approximate.py
|
142
|
18692
|
"""
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = LSHForest(n_candidates=n_candidates)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = LSHForest(n_candidates=500, n_estimators=t)
lshf.fit(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)]
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
lshf.fit(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)]
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)]
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = LSHForest(min_hash_match=0, n_candidates=n_points).fit(X)
# define a query aligned with the first axis
query = [1., 0.]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slighltly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
lshf.fit(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)]
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators)
lshf.fit(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consitent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = LSHForest()
# Test unfitted estimator
lshf.partial_fit(X)
assert_array_equal(X, lshf._fit_X)
lshf.fit(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
lshf.partial_fit(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
lshf.fit(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32)
# For zero candidates
lshf = LSHForest(min_hash_match=32)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = LSHForest(min_hash_match=31)
lshf.fit(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
lshf.fit(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)
forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
|
bsd-3-clause
|
ahaberlie/MetPy
|
src/metpy/xarray.py
|
1
|
42403
|
# Copyright (c) 2018,2019 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Provide accessors to enhance interoperability between xarray and MetPy.
MetPy relies upon the `CF Conventions <http://cfconventions.org/>`_. to provide helpful
attributes and methods on xarray DataArrays and Dataset for working with
coordinate-related metadata. Also included are several attributes and methods for unit
operations.
These accessors will be activated with any import of MetPy. Do not use the
``MetPyDataArrayAccessor`` or ``MetPyDatasetAccessor`` classes directly, instead, utilize the
applicable properties and methods via the ``.metpy`` attribute on an xarray DataArray or
Dataset.
See Also: :doc:`xarray with MetPy Tutorial </tutorials/xarray_tutorial>`.
"""
import functools
import logging
import re
import warnings
import cartopy.crs as ccrs
import numpy as np
import xarray as xr
from ._vendor.xarray import either_dict_or_kwargs, expanded_indexer, is_dict_like
from .units import DimensionalityError, UndefinedUnitError, units
__all__ = []
metpy_axes = ['time', 'vertical', 'y', 'latitude', 'x', 'longitude']
# Define the criteria for coordinate matches
coordinate_criteria = {
'standard_name': {
'time': 'time',
'vertical': {'air_pressure', 'height', 'geopotential_height', 'altitude',
'model_level_number', 'atmosphere_ln_pressure_coordinate',
'atmosphere_sigma_coordinate',
'atmosphere_hybrid_sigma_pressure_coordinate',
'atmosphere_hybrid_height_coordinate', 'atmosphere_sleve_coordinate',
'height_above_geopotential_datum', 'height_above_reference_ellipsoid',
'height_above_mean_sea_level'},
'y': 'projection_y_coordinate',
'latitude': 'latitude',
'x': 'projection_x_coordinate',
'longitude': 'longitude'
},
'_CoordinateAxisType': {
'time': 'Time',
'vertical': {'GeoZ', 'Height', 'Pressure'},
'y': 'GeoY',
'latitude': 'Lat',
'x': 'GeoX',
'longitude': 'Lon'
},
'axis': {
'time': 'T',
'vertical': 'Z',
'y': 'Y',
'x': 'X'
},
'positive': {
'vertical': {'up', 'down'}
},
'units': {
'vertical': {
'match': 'dimensionality',
'units': 'Pa'
},
'latitude': {
'match': 'name',
'units': {'degree_north', 'degree_N', 'degreeN', 'degrees_north', 'degrees_N',
'degreesN'}
},
'longitude': {
'match': 'name',
'units': {'degree_east', 'degree_E', 'degreeE', 'degrees_east', 'degrees_E',
'degreesE'}
},
},
'regular_expression': {
'time': r'time[0-9]*',
'vertical': (r'(lv_|bottom_top|sigma|h(ei)?ght|altitude|depth|isobaric|pres|'
r'isotherm)[a-z_]*[0-9]*'),
'y': r'y',
'latitude': r'x?lat[a-z0-9]*',
'x': r'x',
'longitude': r'x?lon[a-z0-9]*'
}
}
log = logging.getLogger(__name__)
_axis_identifier_error = ('Given axis is not valid. Must be an axis number, a dimension '
'coordinate name, or a standard axis type.')
@xr.register_dataarray_accessor('metpy')
class MetPyDataArrayAccessor:
r"""Provide custom attributes and methods on xarray DataArrays for MetPy functionality.
This accessor provides several convenient attributes and methods through the `.metpy`
attribute on a DataArray. For example, MetPy can identify the coordinate corresponding
to a particular axis (given sufficent metadata):
>>> import xarray as xr
>>> temperature = xr.DataArray([[0, 1], [2, 3]], dims=('lat', 'lon'),
... coords={'lat': [40, 41], 'lon': [-105, -104]},
... attrs={'units': 'degC'})
>>> temperature.metpy.x
<xarray.DataArray 'lon' (lon: 2)>
array([-105, -104])
Coordinates:
* lon (lon) int64 -105 -104
Attributes:
_metpy_axis: x,longitude
"""
def __init__(self, data_array): # noqa: D107
# Initialize accessor with a DataArray. (Do not use directly).
self._data_array = data_array
self._units = self._data_array.attrs.get('units', 'dimensionless')
@property
def units(self):
"""Return the units of this DataArray as a `pint.Quantity`."""
if self._units != '%':
return units(self._units)
else:
return units('percent')
@property
def unit_array(self):
"""Return the data values of this DataArray as a `pint.Quantity`."""
return self._data_array.values * self.units
@unit_array.setter
def unit_array(self, values):
"""Set data values from a `pint.Quantity`."""
self._data_array.values = values.magnitude
self._units = self._data_array.attrs['units'] = str(values.units)
def convert_units(self, units):
"""Convert the data values to different units in-place."""
self.unit_array = self.unit_array.to(units)
return self._data_array # allow method chaining
@property
def crs(self):
"""Return the coordinate reference system (CRS) as a CFProjection object."""
if 'crs' in self._data_array.coords:
return self._data_array.coords['crs'].item()
raise AttributeError('crs attribute is not available.')
@property
def cartopy_crs(self):
"""Return the coordinate reference system (CRS) as a cartopy object."""
return self.crs.to_cartopy()
@property
def cartopy_globe(self):
"""Return the globe belonging to the coordinate reference system (CRS)."""
return self.crs.cartopy_globe
def _fixup_coordinate_map(self, coord_map):
"""Ensure sure we have coordinate variables in map, not coordinate names."""
for axis in coord_map:
if coord_map[axis] is not None and not isinstance(coord_map[axis], xr.DataArray):
coord_map[axis] = self._data_array[coord_map[axis]]
return coord_map
def assign_coordinates(self, coordinates):
"""Assign the given coordinates to the given MetPy axis types.
Parameters
----------
coordinates : dict or None
Mapping from axis types ('time', 'vertical', 'y', 'latitude', 'x', 'longitude') to
coordinates of this DataArray. Coordinates can either be specified directly or by
their name. If ``None``, clears the `_metpy_axis` attribute on all coordinates,
which will trigger reparsing of all coordinates on next access.
"""
if coordinates:
# Assign the _metpy_axis attributes according to supplied mapping
coordinates = self._fixup_coordinate_map(coordinates)
for axis in coordinates:
if coordinates[axis] is not None:
_assign_axis(coordinates[axis].attrs, axis)
else:
# Clear _metpy_axis attribute on all coordinates
for coord_var in self._data_array.coords.values():
coord_var.attrs.pop('_metpy_axis', None)
return self._data_array # allow method chaining
def _generate_coordinate_map(self):
"""Generate a coordinate map via CF conventions and other methods."""
coords = self._data_array.coords.values()
# Parse all the coordinates, attempting to identify x, longitude, y, latitude,
# vertical, time
coord_lists = {'time': [], 'vertical': [], 'y': [], 'latitude': [], 'x': [],
'longitude': []}
for coord_var in coords:
# Identify the coordinate type using check_axis helper
for axis in coord_lists:
if check_axis(coord_var, axis):
coord_lists[axis].append(coord_var)
# Fill in x/y with longitude/latitude if x/y not otherwise present
for geometric, graticule in (('y', 'latitude'), ('x', 'longitude')):
if len(coord_lists[geometric]) == 0 and len(coord_lists[graticule]) > 0:
coord_lists[geometric] = coord_lists[graticule]
# Filter out multidimensional coordinates where not allowed
require_1d_coord = ['time', 'vertical', 'y', 'x']
for axis in require_1d_coord:
coord_lists[axis] = [coord for coord in coord_lists[axis] if coord.ndim <= 1]
# Resolve any coordinate type duplication
axis_duplicates = [axis for axis in coord_lists if len(coord_lists[axis]) > 1]
for axis in axis_duplicates:
self._resolve_axis_duplicates(axis, coord_lists)
# Collapse the coord_lists to a coord_map
return {axis: (coord_lists[axis][0] if len(coord_lists[axis]) > 0 else None)
for axis in coord_lists}
def _resolve_axis_duplicates(self, axis, coord_lists):
"""Handle coordinate duplication for an axis type if it arises."""
# If one and only one of the possible axes is a dimension, use it
dimension_coords = [coord_var for coord_var in coord_lists[axis] if
coord_var.name in coord_var.dims]
if len(dimension_coords) == 1:
coord_lists[axis] = dimension_coords
return
# Ambiguous axis, raise warning and do not parse
varname = (' "' + self._data_array.name + '"'
if self._data_array.name is not None else '')
warnings.warn('More than one ' + axis + ' coordinate present for variable'
+ varname + '.')
coord_lists[axis] = []
def _metpy_axis_search(self, metpy_axis):
"""Search for cached _metpy_axis attribute on the coordinates, otherwise parse."""
# Search for coord with proper _metpy_axis
coords = self._data_array.coords.values()
for coord_var in coords:
if metpy_axis in coord_var.attrs.get('_metpy_axis', '').split(','):
return coord_var
# Opportunistically parse all coordinates, and assign if not already assigned
coord_map = self._generate_coordinate_map()
for axis, coord_var in coord_map.items():
if (coord_var is not None
and not any(axis in coord.attrs.get('_metpy_axis', '').split(',')
for coord in coords)):
_assign_axis(coord_var.attrs, axis)
# Return parsed result (can be None if none found)
return coord_map[metpy_axis]
def _axis(self, axis):
"""Return the coordinate variable corresponding to the given individual axis type."""
if axis in metpy_axes:
coord_var = self._metpy_axis_search(axis)
if coord_var is not None:
return coord_var
else:
raise AttributeError(axis + ' attribute is not available.')
else:
raise AttributeError("'" + axis + "' is not an interpretable axis.")
def coordinates(self, *args):
"""Return the coordinate variables corresponding to the given axes types.
Parameters
----------
args : str
Strings describing the axes type(s) to obtain. Currently understood types are
'time', 'vertical', 'y', 'latitude', 'x', and 'longitude'.
Notes
-----
This method is designed for use with multiple coordinates; it returns a generator. To
access a single coordinate, use the appropriate attribute on the accessor, or use tuple
unpacking.
"""
for arg in args:
yield self._axis(arg)
@property
def time(self):
"""Return the time coordinate."""
return self._axis('time')
@property
def vertical(self):
"""Return the vertical coordinate."""
return self._axis('vertical')
@property
def y(self):
"""Return the y coordinate."""
return self._axis('y')
@property
def latitude(self):
"""Return the latitude coordinate (if it exists)."""
return self._axis('latitude')
@property
def x(self):
"""Return the x coordinate."""
return self._axis('x')
@property
def longitude(self):
"""Return the longitude coordinate (if it exists)."""
return self._axis('longitude')
def coordinates_identical(self, other):
"""Return whether or not the coordinates of other match this DataArray's."""
# If the number of coordinates do not match, we know they can't match.
if len(self._data_array.coords) != len(other.coords):
return False
# If same length, iterate over all of them and check
for coord_name, coord_var in self._data_array.coords.items():
if coord_name not in other.coords or not other[coord_name].identical(coord_var):
return False
# Otherwise, they match.
return True
@property
def time_deltas(self):
"""Return the time difference of the data in seconds (to microsecond precision)."""
return (np.diff(self._data_array.values).astype('timedelta64[us]').astype('int64')
/ 1e6 * units.s)
def find_axis_name(self, axis):
"""Return the name of the axis corresponding to the given identifier.
Parameters
----------
axis : str or int
Identifier for an axis. Can be an axis number (integer), dimension coordinate
name (string) or a standard axis type (string).
"""
if isinstance(axis, int):
# If an integer, use the corresponding dimension
return self._data_array.dims[axis]
elif axis not in self._data_array.dims and axis in metpy_axes:
# If not a dimension name itself, but a valid axis type, get the name of the
# coordinate corresponding to that axis type
return self._axis(axis).name
elif axis in self._data_array.dims and axis in self._data_array.coords:
# If this is a dimension coordinate name, use it directly
return axis
else:
# Otherwise, not valid
raise ValueError(_axis_identifier_error)
def find_axis_number(self, axis):
"""Return the dimension number of the axis corresponding to the given identifier.
Parameters
----------
axis : str or int
Identifier for an axis. Can be an axis number (integer), dimension coordinate
name (string) or a standard axis type (string).
"""
if isinstance(axis, int):
# If an integer, use it directly
return axis
elif axis in self._data_array.dims:
# Simply index into dims
return self._data_array.dims.index(axis)
elif axis in metpy_axes:
# If not a dimension name itself, but a valid axis type, first determine if this
# standard axis type is present as a dimension coordinate
try:
name = self._axis(axis).name
return self._data_array.dims.index(name)
except AttributeError as exc:
# If x or y requested, but x or y not available, attempt to interpret dim
# names using regular expressions from coordinate parsing to allow for
# multidimensional lat/lon without y/x dimension coordinates
if axis in ('y', 'x'):
for i, dim in enumerate(self._data_array.dims):
if re.match(coordinate_criteria['regular_expression'][axis],
dim.lower()):
return i
raise exc
except ValueError:
# Intercept ValueError when axis type found but not dimension coordinate
raise AttributeError(f'Requested {axis} dimension coordinate but {axis} '
f'coordinate {name} is not a dimension')
else:
# Otherwise, not valid
raise ValueError(_axis_identifier_error)
class _LocIndexer:
"""Provide the unit-wrapped .loc indexer for data arrays."""
def __init__(self, data_array):
self.data_array = data_array
def expand(self, key):
"""Parse key using xarray utils to ensure we have dimension names."""
if not is_dict_like(key):
labels = expanded_indexer(key, self.data_array.ndim)
key = dict(zip(self.data_array.dims, labels))
return key
def __getitem__(self, key):
key = _reassign_quantity_indexer(self.data_array, self.expand(key))
return self.data_array.loc[key]
def __setitem__(self, key, value):
key = _reassign_quantity_indexer(self.data_array, self.expand(key))
self.data_array.loc[key] = value
@property
def loc(self):
"""Wrap DataArray.loc with an indexer to handle units and coordinate types."""
return self._LocIndexer(self._data_array)
def sel(self, indexers=None, method=None, tolerance=None, drop=False, **indexers_kwargs):
"""Wrap DataArray.sel to handle units and coordinate types."""
indexers = either_dict_or_kwargs(indexers, indexers_kwargs, 'sel')
indexers = _reassign_quantity_indexer(self._data_array, indexers)
return self._data_array.sel(indexers, method=method, tolerance=tolerance, drop=drop)
def assign_crs(self, cf_attributes=None, **kwargs):
"""Assign a CRS to this DataArray based on CF projection attributes.
Parameters
----------
cf_attributes : dict, optional
Dictionary of CF projection attributes
kwargs : optional
CF projection attributes specified as keyword arguments
Returns
-------
`xarray.DataArray`
New xarray DataArray with CRS coordinate assigned
Notes
-----
CF projection arguments should be supplied as a dictionary or collection of kwargs,
but not both.
"""
return _assign_crs(self._data_array, cf_attributes, kwargs)
def assign_latitude_longitude(self, force=False):
"""Assign latitude and longitude coordinates derived from y and x coordinates.
Parameters
----------
force : bool, optional
If force is true, overwrite latitude and longitude coordinates if they exist,
otherwise, raise a RuntimeError if such coordinates exist.
Returns
-------
`xarray.DataArray`
New xarray DataArray with latitude and longtiude auxilary coordinates assigned.
Notes
-----
A valid CRS coordinate must be present. Cartopy is used for the coordinate
transformations.
"""
# Check for existing latitude and longitude coords
if (not force and (self._metpy_axis_search('latitude') is not None
or self._metpy_axis_search('longitude'))):
raise RuntimeError('Latitude/longitude coordinate(s) are present. If you wish to '
'overwrite these, specify force=True.')
# Build new latitude and longitude DataArrays
latitude, longitude = _build_latitude_longitude(self._data_array)
# Assign new coordinates, refresh MetPy's parsed axis attribute, and return result
new_dataarray = self._data_array.assign_coords(latitude=latitude, longitude=longitude)
return new_dataarray.metpy.assign_coordinates(None)
def assign_y_x(self, force=False, tolerance=None):
"""Assign y and x dimension coordinates derived from 2D latitude and longitude.
Parameters
----------
force : bool, optional
If force is true, overwrite y and x coordinates if they exist, otherwise, raise a
RuntimeError if such coordinates exist.
tolerance : `pint.Quantity`
Maximum range tolerated when collapsing projected y and x coordinates from 2D to
1D. Defaults to 1 meter.
Returns
-------
`xarray.DataArray`
New xarray DataArray with y and x dimension coordinates assigned.
Notes
-----
A valid CRS coordinate must be present. Cartopy is used for the coordinate
transformations.
"""
# Check for existing latitude and longitude coords
if (not force and (self._metpy_axis_search('y') is not None
or self._metpy_axis_search('x'))):
raise RuntimeError('y/x coordinate(s) are present. If you wish to overwrite '
'these, specify force=True.')
# Build new y and x DataArrays
y, x = _build_y_x(self._data_array, tolerance)
# Assign new coordinates, refresh MetPy's parsed axis attribute, and return result
new_dataarray = self._data_array.assign_coords(**{y.name: y, x.name: x})
return new_dataarray.metpy.assign_coordinates(None)
@xr.register_dataset_accessor('metpy')
class MetPyDatasetAccessor:
"""Provide custom attributes and methods on XArray Datasets for MetPy functionality.
This accessor provides parsing of CF metadata and unit-/coordinate-type-aware selection.
>>> import xarray as xr
>>> from metpy.testing import get_test_data
>>> ds = xr.open_dataset(get_test_data('narr_example.nc', False)).metpy.parse_cf()
>>> print(ds['crs'].item())
Projection: lambert_conformal_conic
"""
def __init__(self, dataset): # noqa: D107
# Initialize accessor with a Dataset. (Do not use directly).
self._dataset = dataset
def parse_cf(self, varname=None, coordinates=None):
"""Parse Climate and Forecasting (CF) convention metadata.
Parameters
----------
varname : str or iterable of str, optional
Name of the variable(s) to extract from the dataset while parsing for CF metadata.
Defaults to all variables.
coordinates : dict, optional
Dictionary mapping CF axis types to coordinates of the variable(s). Only specify
if you wish to override MetPy's automatic parsing of some axis type(s).
Returns
-------
`xarray.DataArray` or `xarray.Dataset`
Parsed DataArray (if varname is a string) or Dataset
"""
from .cbook import iterable
from .plots.mapping import CFProjection
if varname is None:
# If no varname is given, parse all variables in the dataset
varname = list(self._dataset.data_vars)
if iterable(varname) and not isinstance(varname, str):
# If non-string iterable is given, apply recursively across the varnames
subset = xr.merge([self.parse_cf(single_varname, coordinates=coordinates)
for single_varname in varname])
subset.attrs = self._dataset.attrs
return subset
var = self._dataset[varname]
# Assign coordinates if the coordinates argument is given
if coordinates is not None:
var.metpy.assign_coordinates(coordinates)
# Attempt to build the crs coordinate
crs = None
if 'grid_mapping' in var.attrs:
# Use given CF grid_mapping
proj_name = var.attrs['grid_mapping']
try:
proj_var = self._dataset.variables[proj_name]
except KeyError:
log.warning(
'Could not find variable corresponding to the value of '
'grid_mapping: {}'.format(proj_name))
else:
crs = CFProjection(proj_var.attrs)
if crs is None and not check_axis(var, 'latitude', 'longitude'):
# This isn't a lat or lon coordinate itself, so determine if we need to fall back
# to creating a latitude_longitude CRS. We do so if there exists valid coordinates
# for latitude and longitude, even if they are not the dimension coordinates of
# the variable.
def _has_coord(coord_type):
return any(check_axis(coord_var, coord_type)
for coord_var in var.coords.values())
if _has_coord('latitude') and _has_coord('longitude'):
crs = CFProjection({'grid_mapping_name': 'latitude_longitude'})
log.warning('Found valid latitude/longitude coordinates, assuming '
'latitude_longitude for projection grid_mapping variable')
# Rebuild the coordinates of the dataarray, and return
coords = dict(self._rebuild_coords(var, crs))
if crs is not None:
coords['crs'] = crs
return var.assign_coords(**coords)
def _rebuild_coords(self, var, crs):
"""Clean up the units on the coordinate variables."""
for coord_name, coord_var in var.coords.items():
if (check_axis(coord_var, 'x', 'y')
and not check_axis(coord_var, 'longitude', 'latitude')):
try:
# Cannot modify an index inplace, so use copy
yield coord_name, coord_var.copy().metpy.convert_units('meters')
except DimensionalityError:
# Radians! Attempt to use perspective point height conversion
if crs is not None:
new_coord_var = coord_var.copy()
height = crs['perspective_point_height']
scaled_vals = new_coord_var.metpy.unit_array * (height * units.meters)
new_coord_var.metpy.unit_array = scaled_vals.to('meters')
yield coord_name, new_coord_var
else:
# Do nothing
yield coord_name, coord_var
class _LocIndexer:
"""Provide the unit-wrapped .loc indexer for datasets."""
def __init__(self, dataset):
self.dataset = dataset
def __getitem__(self, key):
parsed_key = _reassign_quantity_indexer(self.dataset, key)
return self.dataset.loc[parsed_key]
@property
def loc(self):
"""Wrap Dataset.loc with an indexer to handle units and coordinate types."""
return self._LocIndexer(self._dataset)
def sel(self, indexers=None, method=None, tolerance=None, drop=False, **indexers_kwargs):
"""Wrap Dataset.sel to handle units."""
indexers = either_dict_or_kwargs(indexers, indexers_kwargs, 'sel')
indexers = _reassign_quantity_indexer(self._dataset, indexers)
return self._dataset.sel(indexers, method=method, tolerance=tolerance, drop=drop)
def assign_crs(self, cf_attributes=None, **kwargs):
"""Assign a CRS to this Datatset based on CF projection attributes.
Parameters
----------
cf_attributes : dict, optional
Dictionary of CF projection attributes
kwargs : optional
CF projection attributes specified as keyword arguments
Returns
-------
`xarray.Dataset`
New xarray Dataset with CRS coordinate assigned
Notes
-----
CF projection arguments should be supplied as a dictionary or collection of kwargs,
but not both.
"""
return _assign_crs(self._dataset, cf_attributes, kwargs)
def assign_latitude_longitude(self, force=False):
"""Assign latitude and longitude coordinates derived from y and x coordinates.
Parameters
----------
force : bool, optional
If force is true, overwrite latitude and longitude coordinates if they exist,
otherwise, raise a RuntimeError if such coordinates exist.
Returns
-------
`xarray.Dataset`
New xarray Dataset with latitude and longitude coordinates assigned to all
variables with y and x coordinates.
Notes
-----
A valid CRS coordinate must be present. Cartopy is used for the coordinate
transformations.
"""
# Determine if there is a valid grid prototype from which to compute the coordinates,
# while also checking for existing lat/lon coords
grid_prototype = None
for data_var in self._dataset.data_vars.values():
if hasattr(data_var.metpy, 'y') and hasattr(data_var.metpy, 'x'):
if grid_prototype is None:
grid_prototype = data_var
if (not force and (hasattr(data_var.metpy, 'latitude')
or hasattr(data_var.metpy, 'longitude'))):
raise RuntimeError('Latitude/longitude coordinate(s) are present. If you '
'wish to overwrite these, specify force=True.')
# Calculate latitude and longitude from grid_prototype, if it exists, and assign
if grid_prototype is None:
warnings.warn('No latitude and longitude assigned since horizontal coordinates '
'were not found')
return self._dataset
else:
latitude, longitude = _build_latitude_longitude(grid_prototype)
return self.assign_coords(latitude=latitude, longitude=longitude)
def assign_y_x(self, force=False, tolerance=None):
"""Assign y and x dimension coordinates derived from 2D latitude and longitude.
Parameters
----------
force : bool, optional
If force is true, overwrite y and x coordinates if they exist, otherwise, raise a
RuntimeError if such coordinates exist.
tolerance : `pint.Quantity`
Maximum range tolerated when collapsing projected y and x coordinates from 2D to
1D. Defaults to 1 meter.
Returns
-------
`xarray.Dataset`
New xarray Dataset with y and x dimension coordinates assigned to all variables
with valid latitude and longitude coordinates.
Notes
-----
A valid CRS coordinate must be present. Cartopy is used for the coordinate
transformations.
"""
# Determine if there is a valid grid prototype from which to compute the coordinates,
# while also checking for existing y and x coords
grid_prototype = None
for data_var in self._dataset.data_vars.values():
if hasattr(data_var.metpy, 'latitude') and hasattr(data_var.metpy, 'longitude'):
if grid_prototype is None:
grid_prototype = data_var
if (not force and (hasattr(data_var.metpy, 'y')
or hasattr(data_var.metpy, 'x'))):
raise RuntimeError('y/x coordinate(s) are present. If you wish to '
'overwrite these, specify force=True.')
# Calculate y and x from grid_prototype, if it exists, and assign
if grid_prototype is None:
warnings.warn('No y and x coordinates assigned since horizontal coordinates '
'were not found')
return self._dataset
else:
y, x = _build_y_x(grid_prototype, tolerance)
return self._dataset.assign_coords(**{y.name: y, x.name: x})
def update_attribute(self, attribute, mapping):
"""Update attribute of all Dataset variables.
Parameters
----------
attribute : str,
Name of attribute to update
mapping : dict or callable
Either a dict, with keys as variable names and values as attribute values to set,
or a callable, which must accept one positional argument (variable name) and
arbitrary keyword arguments (all existing variable attributes). If a variable name
is not present/the callable returns None, the attribute will not be updated.
Returns
-------
`xarray.Dataset`
Dataset with attribute updated (modified in place, and returned to allow method
chaining)
"""
# Make mapping uniform
if callable(mapping):
mapping_func = mapping
else:
def mapping_func(varname, **kwargs):
return mapping.get(varname, None)
# Apply across all variables
for varname in list(self._dataset.data_vars) + list(self._dataset.coords):
value = mapping_func(varname, **self._dataset[varname].attrs)
if value is not None:
self._dataset[varname].attrs[attribute] = value
return self._dataset
def _assign_axis(attributes, axis):
"""Assign the given axis to the _metpy_axis attribute."""
existing_axes = attributes.get('_metpy_axis', '').split(',')
if ((axis == 'y' and 'latitude' in existing_axes)
or (axis == 'latitude' and 'y' in existing_axes)):
# Special case for combined y/latitude handling
attributes['_metpy_axis'] = 'y,latitude'
elif ((axis == 'x' and 'longitude' in existing_axes)
or (axis == 'longitude' and 'x' in existing_axes)):
# Special case for combined x/longitude handling
attributes['_metpy_axis'] = 'x,longitude'
else:
# Simply add it/overwrite past value
attributes['_metpy_axis'] = axis
return attributes
def check_axis(var, *axes):
"""Check if the criteria for any of the given axes are satisfied.
Parameters
----------
var : `xarray.DataArray`
DataArray belonging to the coordinate to be checked
axes : str
Axis type(s) to check for. Currently can check for 'time', 'vertical', 'y', 'latitude',
'x', and 'longitude'.
"""
for axis in axes:
# Check for
# - standard name (CF option)
# - _CoordinateAxisType (from THREDDS)
# - axis (CF option)
# - positive (CF standard for non-pressure vertical coordinate)
for criterion in ('standard_name', '_CoordinateAxisType', 'axis', 'positive'):
if (var.attrs.get(criterion, 'absent') in
coordinate_criteria[criterion].get(axis, set())):
return True
# Check for units, either by dimensionality or name
try:
if (axis in coordinate_criteria['units'] and (
(
coordinate_criteria['units'][axis]['match'] == 'dimensionality'
and (units.get_dimensionality(var.attrs.get('units'))
== units.get_dimensionality(
coordinate_criteria['units'][axis]['units']))
) or (
coordinate_criteria['units'][axis]['match'] == 'name'
and var.attrs.get('units')
in coordinate_criteria['units'][axis]['units']
))):
return True
except UndefinedUnitError:
pass
# Check if name matches regular expression (non-CF failsafe)
if re.match(coordinate_criteria['regular_expression'][axis], var.name.lower()):
return True
# If no match has been made, return False (rather than None)
return False
def _assign_crs(xarray_object, cf_attributes, cf_kwargs):
from .plots.mapping import CFProjection
# Handle argument options
if cf_attributes is not None and len(cf_kwargs) > 0:
raise ValueError('Cannot specify both attribute dictionary and kwargs.')
elif cf_attributes is None and len(cf_kwargs) == 0:
raise ValueError('Must specify either attribute dictionary or kwargs.')
attrs = cf_attributes if cf_attributes is not None else cf_kwargs
# Assign crs coordinate to xarray object
return xarray_object.assign_coords(crs=CFProjection(attrs))
def _build_latitude_longitude(da):
"""Build latitude/longitude coordinates from DataArray's y/x coordinates."""
y, x = da.metpy.coordinates('y', 'x')
xx, yy = np.meshgrid(x.values, y.values)
lonlats = ccrs.Geodetic(globe=da.metpy.cartopy_globe).transform_points(
da.metpy.cartopy_crs, xx, yy)
longitude = xr.DataArray(lonlats[..., 0], dims=(y.name, x.name),
coords={y.name: y, x.name: x},
attrs={'units': 'degrees_east', 'standard_name': 'longitude'})
latitude = xr.DataArray(lonlats[..., 1], dims=(y.name, x.name),
coords={y.name: y, x.name: x},
attrs={'units': 'degrees_north', 'standard_name': 'latitude'})
return latitude, longitude
def _build_y_x(da, tolerance):
"""Build y/x coordinates from DataArray's latitude/longitude coordinates."""
# Initial sanity checks
latitude, longitude = da.metpy.coordinates('latitude', 'longitude')
if latitude.dims != longitude.dims:
raise ValueError('Latitude and longitude must have same dimensionality')
elif latitude.ndim != 2:
raise ValueError('To build 1D y/x coordinates via assign_y_x, latitude/longitude '
'must be 2D')
# Convert to projected y/x
xxyy = da.metpy.cartopy_crs.transform_points(ccrs.Geodetic(da.metpy.cartopy_globe),
longitude.values,
latitude.values)
# Handle tolerance
tolerance = 1 if tolerance is None else tolerance.m_as('m')
# If within tolerance, take median to collapse to 1D
try:
y_dim = latitude.metpy.find_axis_number('y')
x_dim = latitude.metpy.find_axis_number('x')
except AttributeError:
warnings.warn('y and x dimensions unable to be identified. Assuming [..., y, x] '
'dimension order.')
y_dim, x_dim = 0, 1
if (np.all(np.ptp(xxyy[..., 0], axis=y_dim) < tolerance)
and np.all(np.ptp(xxyy[..., 1], axis=x_dim) < tolerance)):
x = np.median(xxyy[..., 0], axis=y_dim)
y = np.median(xxyy[..., 1], axis=x_dim)
x = xr.DataArray(x, name=latitude.dims[x_dim], dims=(latitude.dims[x_dim],),
coords={latitude.dims[x_dim]: x},
attrs={'units': 'meter', 'standard_name': 'projection_x_coordinate'})
y = xr.DataArray(y, name=latitude.dims[y_dim], dims=(latitude.dims[y_dim],),
coords={latitude.dims[y_dim]: y},
attrs={'units': 'meter', 'standard_name': 'projection_y_coordinate'})
return y, x
else:
raise ValueError('Projected y and x coordinates cannot be collapsed to 1D within '
'tolerance. Verify that your latitude and longitude coordinates '
'correpsond to your CRS coordinate.')
def preprocess_xarray(func):
"""Decorate a function to convert all DataArray arguments to pint.Quantities.
This uses the metpy xarray accessors to do the actual conversion.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
args = tuple(a.metpy.unit_array if isinstance(a, xr.DataArray) else a for a in args)
kwargs = {name: (v.metpy.unit_array if isinstance(v, xr.DataArray) else v)
for name, v in kwargs.items()}
return func(*args, **kwargs)
return wrapper
def check_matching_coordinates(func):
"""Decorate a function to make sure all given DataArrays have matching coordinates."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
data_arrays = ([a for a in args if isinstance(a, xr.DataArray)]
+ [a for a in kwargs.values() if isinstance(a, xr.DataArray)])
if len(data_arrays) > 1:
first = data_arrays[0]
for other in data_arrays[1:]:
if not first.metpy.coordinates_identical(other):
raise ValueError('Input DataArray arguments must be on same coordinates.')
return func(*args, **kwargs)
return wrapper
# If DatetimeAccessor does not have a strftime (xarray <0.12.2), monkey patch one in
try:
from xarray.core.accessors import DatetimeAccessor
if not hasattr(DatetimeAccessor, 'strftime'):
def strftime(self, date_format):
"""Format time as a string."""
import pandas as pd
values = self._obj.data
values_as_series = pd.Series(values.ravel())
strs = values_as_series.dt.strftime(date_format)
return strs.values.reshape(values.shape)
DatetimeAccessor.strftime = strftime
except ImportError:
pass
def _reassign_quantity_indexer(data, indexers):
"""Reassign a units.Quantity indexer to units of relevant coordinate."""
def _to_magnitude(val, unit):
try:
return val.to(unit).m
except AttributeError:
return val
# Update indexers keys for axis type -> coord name replacement
indexers = {(key if not isinstance(data, xr.DataArray) or key in data.dims
or key not in metpy_axes else
next(data.metpy.coordinates(key)).name): indexers[key]
for key in indexers}
# Update indexers to handle quantities and slices of quantities
reassigned_indexers = {}
for coord_name in indexers:
coord_units = data[coord_name].metpy.units
if isinstance(indexers[coord_name], slice):
# Handle slices of quantities
start = _to_magnitude(indexers[coord_name].start, coord_units)
stop = _to_magnitude(indexers[coord_name].stop, coord_units)
step = _to_magnitude(indexers[coord_name].step, coord_units)
reassigned_indexers[coord_name] = slice(start, stop, step)
else:
# Handle quantities
reassigned_indexers[coord_name] = _to_magnitude(indexers[coord_name], coord_units)
return reassigned_indexers
__all__ = ('MetPyDataArrayAccessor', 'MetPyDatasetAccessor')
|
bsd-3-clause
|
ruacon35/ns3-wireless-planning.ns-3
|
examples/flowmon/wifi-olsr-flowmon.py
|
4
|
6914
|
# -*- Mode: Python; -*-
# Copyright (c) 2009 INESC Porto
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation;
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Authors: Gustavo Carneiro <gjc@inescporto.pt>
import sys
import ns3
DISTANCE = 150 # (m)
NUM_NODES_SIDE = 3
def main(argv):
cmd = ns3.CommandLine()
cmd.NumNodesSide = None
cmd.AddValue("NumNodesSide", "Grid side number of nodes (total number of nodes will be this number squared)")
cmd.Results = None
cmd.AddValue("Results", "Write XML results to file")
cmd.Plot = None
cmd.AddValue("Plot", "Plot the results using the matplotlib python module")
cmd.Parse(argv)
wifi = ns3.WifiHelper.Default()
wifiMac = ns3.NqosWifiMacHelper.Default()
wifiPhy = ns3.YansWifiPhyHelper.Default()
wifiChannel = ns3.YansWifiChannelHelper.Default()
wifiPhy.SetChannel(wifiChannel.Create())
ssid = ns3.Ssid("wifi-default")
wifi.SetRemoteStationManager("ns3::ArfWifiManager")
wifiMac.SetType ("ns3::AdhocWifiMac", "Ssid", ns3.SsidValue(ssid))
internet = ns3.InternetStackHelper()
list_routing = ns3.Ipv4ListRoutingHelper()
olsr_routing = ns3.OlsrHelper()
static_routing = ns3.Ipv4StaticRoutingHelper()
list_routing.Add(static_routing, 0)
list_routing.Add(olsr_routing, 100)
internet.SetRoutingHelper(list_routing)
ipv4Addresses = ns3.Ipv4AddressHelper()
ipv4Addresses.SetBase(ns3.Ipv4Address("10.0.0.0"), ns3.Ipv4Mask("255.255.255.0"))
port = 9 # Discard port(RFC 863)
onOffHelper = ns3.OnOffHelper("ns3::UdpSocketFactory",
ns3.Address(ns3.InetSocketAddress(ns3.Ipv4Address("10.0.0.1"), port)))
onOffHelper.SetAttribute("DataRate", ns3.DataRateValue(ns3.DataRate("100kbps")))
onOffHelper.SetAttribute("OnTime", ns3.RandomVariableValue(ns3.ConstantVariable(1)))
onOffHelper.SetAttribute("OffTime", ns3.RandomVariableValue(ns3.ConstantVariable(0)))
addresses = []
nodes = []
if cmd.NumNodesSide is None:
num_nodes_side = NUM_NODES_SIDE
else:
num_nodes_side = int(cmd.NumNodesSide)
for xi in range(num_nodes_side):
for yi in range(num_nodes_side):
node = ns3.Node()
nodes.append(node)
internet.Install(ns3.NodeContainer(node))
mobility = ns3.ConstantPositionMobilityModel()
mobility.SetPosition(ns3.Vector(xi*DISTANCE, yi*DISTANCE, 0))
node.AggregateObject(mobility)
devices = wifi.Install(wifiPhy, wifiMac, node)
ipv4_interfaces = ipv4Addresses.Assign(devices)
addresses.append(ipv4_interfaces.GetAddress(0))
for i, node in enumerate(nodes):
destaddr = addresses[(len(addresses) - 1 - i) % len(addresses)]
#print i, destaddr
onOffHelper.SetAttribute("Remote", ns3.AddressValue(ns3.InetSocketAddress(destaddr, port)))
app = onOffHelper.Install(ns3.NodeContainer(node))
app.Start(ns3.Seconds(ns3.UniformVariable(20, 30).GetValue()))
#internet.EnablePcapAll("wifi-olsr")
flowmon_helper = ns3.FlowMonitorHelper()
#flowmon_helper.SetMonitorAttribute("StartTime", ns3.TimeValue(ns3.Seconds(31)))
monitor = flowmon_helper.InstallAll()
monitor.SetAttribute("DelayBinWidth", ns3.DoubleValue(0.001))
monitor.SetAttribute("JitterBinWidth", ns3.DoubleValue(0.001))
monitor.SetAttribute("PacketSizeBinWidth", ns3.DoubleValue(20))
ns3.Simulator.Stop(ns3.Seconds(44.0))
ns3.Simulator.Run()
def print_stats(os, st):
print >> os, " Tx Bytes: ", st.txBytes
print >> os, " Rx Bytes: ", st.rxBytes
print >> os, " Tx Packets: ", st.txPackets
print >> os, " Rx Packets: ", st.rxPackets
print >> os, " Lost Packets: ", st.lostPackets
if st.rxPackets > 0:
print >> os, " Mean{Delay}: ", (st.delaySum.GetSeconds() / st.rxPackets)
print >> os, " Mean{Jitter}: ", (st.jitterSum.GetSeconds() / (st.rxPackets-1))
print >> os, " Mean{Hop Count}: ", float(st.timesForwarded) / st.rxPackets + 1
if 0:
print >> os, "Delay Histogram"
for i in range(st.delayHistogram.GetNBins () ):
print >> os, " ",i,"(", st.delayHistogram.GetBinStart (i), "-", \
st.delayHistogram.GetBinEnd (i), "): ", st.delayHistogram.GetBinCount (i)
print >> os, "Jitter Histogram"
for i in range(st.jitterHistogram.GetNBins () ):
print >> os, " ",i,"(", st.jitterHistogram.GetBinStart (i), "-", \
st.jitterHistogram.GetBinEnd (i), "): ", st.jitterHistogram.GetBinCount (i)
print >> os, "PacketSize Histogram"
for i in range(st.packetSizeHistogram.GetNBins () ):
print >> os, " ",i,"(", st.packetSizeHistogram.GetBinStart (i), "-", \
st.packetSizeHistogram.GetBinEnd (i), "): ", st.packetSizeHistogram.GetBinCount (i)
for reason, drops in enumerate(st.packetsDropped):
print " Packets dropped by reason %i: %i" % (reason, drops)
#for reason, drops in enumerate(st.bytesDropped):
# print "Bytes dropped by reason %i: %i" % (reason, drops)
monitor.CheckForLostPackets()
classifier = flowmon_helper.GetClassifier()
if cmd.Results is None:
for flow_id, flow_stats in monitor.GetFlowStats():
t = classifier.FindFlow(flow_id)
proto = {6: 'TCP', 17: 'UDP'} [t.protocol]
print "FlowID: %i (%s %s/%s --> %s/%i)" % \
(flow_id, proto, t.sourceAddress, t.sourcePort, t.destinationAddress, t.destinationPort)
print_stats(sys.stdout, flow_stats)
else:
print monitor.SerializeToXmlFile(cmd.Results, True, True)
if cmd.Plot is not None:
import pylab
delays = []
for flow_id, flow_stats in monitor.GetFlowStats():
tupl = classifier.FindFlow(flow_id)
if tupl.protocol == 17 and tupl.sourcePort == 698:
continue
delays.append(flow_stats.delaySum.GetSeconds() / flow_stats.rxPackets)
pylab.hist(delays, 20)
pylab.xlabel("Delay (s)")
pylab.ylabel("Number of Flows")
pylab.show()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
gpl-2.0
|
vincent-noel/libSigNetSim
|
libsignetsim/data/Experiment.py
|
1
|
4359
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2017 Vincent Noel (vincent.noel@butantan.gov.br)
#
# This file is part of libSigNetSim.
#
# libSigNetSim is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# libSigNetSim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with libSigNetSim. If not, see <http://www.gnu.org/licenses/>.
"""
This file ...
"""
from libsignetsim.data.ExperimentalCondition import ExperimentalCondition
from libsignetsim.numl.NuMLDocument import NuMLDocument
from libsignetsim.figure.SigNetSimFigure import SigNetSimFigure
from matplotlib.pyplot import show
class Experiment(object):
def __init__ (self, name=""):
self.listOfConditions = {}
self.currentId = 0
self.name = name
self.notes = ""
def createCondition(self, name=""):
condition = ExperimentalCondition(name)
self.addCondition(condition)
return condition
def addCondition(self, condition):
self.listOfConditions.update({self.currentId: condition})
self.currentId += 1
def readNuMLFromFile(self, filename):
numl_doc = NuMLDocument()
numl_doc.readNuMLFromFile(filename)
result = numl_doc.listOfResultComponents[0]
data = result.getDimensions()[0]
self.name = data.getIndexValue()
self.notes = data.getNotes()
for data_condition in data.getContents():
condition = self.createCondition(data_condition.getIndexValue())
condition.readNuML(data_condition)
def writeNuMLToFile(self, filename):
numl_doc = NuMLDocument()
time_term = numl_doc.listOfOntologyTerms.createOntologyTerm()
time_term.defineAsTime()
concentration_term = numl_doc.listOfOntologyTerms.createOntologyTerm()
concentration_term.defineAsConcentration()
result = numl_doc.listOfResultComponents.createResultComponent()
self.writeNuMLDescription(result)
experiment = result.createCompositeValue(result.getDimensionsDescriptions()[0], self.name)
if self.notes is not None and len(self.notes) > 0:
experiment.setNotes(self.notes)
for condition in list(self.listOfConditions.values()):
t_condition = experiment.createCompositeValue(experiment.getDescription().getContent(), condition.name)
condition.writeNuML(t_condition)
numl_doc.writeNuMLToFile(filename)
def writeNuMLDescription(self, result_component):
desc_experiment = result_component.createCompositeDescription("Experiment", "string")
desc_condition = desc_experiment.createCompositeDescription("Condition", "string")
desc_type = desc_condition.createCompositeDescription("Data type", "string")
desc_time = desc_type.createCompositeDescription("Time", "double")
desc_species = desc_time.createCompositeDescription("Species", "xpath")
desc_values = desc_species.createTupleDescription()
desc_values.createAtomicDescription("Concentration", "double")
desc_values.createAtomicDescription("Standard deviation", "double")
def getMaxTime(self):
max_time = 0
for condition in list(self.listOfConditions.values()):
if condition.getMaxTime() > max_time:
max_time = condition.getMaxTime()
return max_time
def getTimes(self):
times = []
for condition in list(self.listOfConditions.values()):
times += condition.getTimes()
return list(set(times))
def getTreatedVariables(self):
species = []
for condition in list(self.listOfConditions.values()):
species += condition.getTreatedVariables()
if len(species) > 1:
return list(set(species))
else:
return species
def getVariables(self):
species = []
for condition in list(self.listOfConditions.values()):
species += condition.getVariables()
if len(species) > 1:
return list(set(species))
else:
return species
def plot(self, figure=None, suffix="", marker="-"):
if figure is None:
figure = SigNetSimFigure()
plots = []
for condition in list(self.listOfConditions.values()):
plots.append(condition.plot(figure, suffix=suffix, marker=marker))
show()
return plots
|
gpl-3.0
|
gergopokol/renate-od
|
cherab_demos/object_construction.py
|
1
|
4560
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.constants import electron_mass, atomic_mass
from raysect.core import Point3D, Vector3D, translate, rotate_basis
from raysect.primitive import Box
from raysect.optical import World
from cherab.core.math import Interpolate1DCubic, sample1d, sample3d, ConstantVector3D
from cherab.core.math.mappers import Xto3D
from cherab.core import Species, Maxwellian, Plasma, Beam
from cherab.core.atomic import hydrogen
from cherab.core.model import SingleRayAttenuator
from cherab.tools.plasmas.slab import build_slab_plasma
from cherab.openadas import OpenADAS
# create atomic data source
adas = OpenADAS(permit_extrapolation=True)
world = World()
# PLASMA ----------------------------------------------------------------------
plasma = build_slab_plasma(peak_density=5e19, world=world)
integration_step = 0.0025
beam_transform = translate(-0.000001, 0.0, 0) * rotate_basis(Vector3D(1, 0, 0), Vector3D(0, 0, 1))
beam_full = Beam(parent=world, transform=beam_transform)
beam_full.plasma = plasma
beam_full.atomic_data = adas
beam_full.energy = 100000
beam_full.power = 3e6
beam_full.element = hydrogen
beam_full.sigma = 0.05
beam_full.divergence_x = 0.5
beam_full.divergence_y = 0.5
beam_full.length = 3.0
beam_full.attenuator = SingleRayAttenuator(clamp_to_zero=True)
beam_full.integrator.step = integration_step
beam_full.integrator.min_samples = 10
############################
# Try converting to Renate #
import pandas as pd
from lxml import etree
from crm_solver.beamlet import Beamlet
# build species specifications, starting with electrons
charges = [-1]
charges.extend([s.charge for s in plasma.composition if not s.charge == 0])
nuclear_charges = [0]
nuclear_charges.extend([s.element.atomic_number for s in plasma.composition if not s.charge == 0])
atomic_weights = [0]
atomic_weights.extend([int(s.element.atomic_weight) for s in plasma.composition if not s.charge == 0])
index = ['electrons']
index.extend(['ion{}'.format(i+1) for i in range(len(atomic_weights) - 1)])
components = pd.DataFrame(data={'q': charges, 'Z': nuclear_charges, 'A': atomic_weights}, index=index)
beam_axis = np.linspace(0, 5, num=500)
e_densities = [plasma.electron_distribution.density(x, 0, 0) for x in beam_axis]
e_temps = [plasma.electron_distribution.effective_temperature(x, 0, 0) for x in beam_axis]
h1 = plasma.composition[hydrogen, 1]
h1_densities = [h1.distribution.density(x, 0, 0) for x in beam_axis]
h1_temps = [h1.distribution.effective_temperature(x, 0, 0) for x in beam_axis]
profiles_data = np.zeros((5, 500))
profiles_data[0, :] = beam_axis
profiles_data[1, :] = e_densities
profiles_data[2, :] = e_temps
profiles_data[3, :] = h1_densities
profiles_data[4, :] = h1_temps
profiles_data = np.swapaxes(profiles_data, 0, 1)
row_index = [i for i in range(500)]
column_index = pd.MultiIndex.from_arrays([['beamlet grid', 'electron', 'electron', 'ion1', 'ion1'],
['distance', 'density', 'temperature', 'density', 'temperature'],
['m', 'm-3', 'eV', 'm-3', 'eV']],
names=['type', 'property', 'unit'])
profiles = pd.DataFrame(data=profiles_data, columns=column_index, index=row_index)
# construct beam param specification
xml = etree.Element('xml')
head = etree.SubElement(xml, 'head')
id_tag = etree.SubElement(head, 'id')
id_tag.text = 'beamlet_test'
body_tag = etree.SubElement(xml, 'body')
beamlet_energy = etree.SubElement(body_tag, 'beamlet_energy', {'unit': 'keV'})
beamlet_energy.text = '100'
beamlet_species = etree.SubElement(body_tag, 'beamlet_species')
beamlet_species.text = 'H' # Li
beamlet_source = etree.SubElement(body_tag, 'beamlet_source')
beamlet_source.text = 'beamlet/test_impurity.h5'
beamlet_current = etree.SubElement(body_tag, 'beamlet_current', {'unit': 'A'})
beamlet_current.text = '0.001'
beamlet_mass = etree.SubElement(body_tag, 'beamlet_mass', {'unit': 'kg'})
beamlet_mass.text = '1.15258e-026'
beamlet_velocity = etree.SubElement(body_tag, 'beamlet_velocity', {'unit': 'm/s'})
beamlet_velocity.text = '1291547.1348855693'
beamlet_profiles = etree.SubElement(body_tag, 'beamlet_profiles', {})
beamlet_profiles.text = './beamlet_test.h5'
param = etree.ElementTree(element=xml)
b = Beamlet(param=param, profiles=profiles, components=components)
b.compute_linear_emission_density()
b.compute_linear_density_attenuation()
b.compute_relative_populations()
plt.plot(b.profiles['beamlet grid'], b.profiles['linear_emission_density'])
|
lgpl-3.0
|
raincoatrun/ThinkStats2
|
code/hypothesis.py
|
75
|
10162
|
"""This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import nsfg
import nsfg2
import first
import thinkstats2
import thinkplot
import copy
import random
import numpy as np
import matplotlib.pyplot as pyplot
class CoinTest(thinkstats2.HypothesisTest):
"""Tests the hypothesis that a coin is fair."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
heads, tails = data
test_stat = abs(heads - tails)
return test_stat
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
heads, tails = self.data
n = heads + tails
sample = [random.choice('HT') for _ in range(n)]
hist = thinkstats2.Hist(sample)
data = hist['H'], hist['T']
return data
class DiffMeansPermute(thinkstats2.HypothesisTest):
"""Tests a difference in means by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
group1, group2 = data
test_stat = abs(group1.mean() - group2.mean())
return test_stat
def MakeModel(self):
"""Build a model of the null hypothesis.
"""
group1, group2 = self.data
self.n, self.m = len(group1), len(group2)
self.pool = np.hstack((group1, group2))
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
np.random.shuffle(self.pool)
data = self.pool[:self.n], self.pool[self.n:]
return data
class DiffMeansOneSided(DiffMeansPermute):
"""Tests a one-sided difference in means by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
group1, group2 = data
test_stat = group1.mean() - group2.mean()
return test_stat
class DiffStdPermute(DiffMeansPermute):
"""Tests a one-sided difference in standard deviation by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
group1, group2 = data
test_stat = group1.std() - group2.std()
return test_stat
class CorrelationPermute(thinkstats2.HypothesisTest):
"""Tests correlations by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: tuple of xs and ys
"""
xs, ys = data
test_stat = abs(thinkstats2.Corr(xs, ys))
return test_stat
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
xs, ys = self.data
xs = np.random.permutation(xs)
return xs, ys
class DiceTest(thinkstats2.HypothesisTest):
"""Tests whether a six-sided die is fair."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: list of frequencies
"""
observed = data
n = sum(observed)
expected = np.ones(6) * n / 6
test_stat = sum(abs(observed - expected))
return test_stat
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
n = sum(self.data)
values = [1,2,3,4,5,6]
rolls = np.random.choice(values, n, replace=True)
hist = thinkstats2.Hist(rolls)
freqs = hist.Freqs(values)
return freqs
class DiceChiTest(DiceTest):
"""Tests a six-sided die using a chi-squared statistic."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: list of frequencies
"""
observed = data
n = sum(observed)
expected = np.ones(6) * n / 6
test_stat = sum((observed - expected)**2 / expected)
return test_stat
class PregLengthTest(thinkstats2.HypothesisTest):
"""Tests difference in pregnancy length using a chi-squared statistic."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: pair of lists of pregnancy lengths
"""
firsts, others = data
stat = self.ChiSquared(firsts) + self.ChiSquared(others)
return stat
def ChiSquared(self, lengths):
"""Computes the chi-squared statistic.
lengths: sequence of lengths
returns: float
"""
hist = thinkstats2.Hist(lengths)
observed = np.array(hist.Freqs(self.values))
expected = self.expected_probs * len(lengths)
stat = sum((observed - expected)**2 / expected)
return stat
def MakeModel(self):
"""Build a model of the null hypothesis.
"""
firsts, others = self.data
self.n = len(firsts)
self.pool = np.hstack((firsts, others))
pmf = thinkstats2.Pmf(self.pool)
self.values = range(35, 44)
self.expected_probs = np.array(pmf.Probs(self.values))
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
np.random.shuffle(self.pool)
data = self.pool[:self.n], self.pool[self.n:]
return data
def RunDiceTest():
"""Tests whether a die is fair.
"""
data = [8, 9, 19, 5, 8, 11]
dt = DiceTest(data)
print('dice test', dt.PValue(iters=10000))
dt = DiceChiTest(data)
print('dice chi test', dt.PValue(iters=10000))
def FalseNegRate(data, num_runs=1000):
"""Computes the chance of a false negative based on resampling.
data: pair of sequences
num_runs: how many experiments to simulate
returns: float false negative rate
"""
group1, group2 = data
count = 0
for i in range(num_runs):
sample1 = thinkstats2.Resample(group1)
sample2 = thinkstats2.Resample(group2)
ht = DiffMeansPermute((sample1, sample2))
p_value = ht.PValue(iters=101)
if p_value > 0.05:
count += 1
return count / num_runs
def PrintTest(p_value, ht):
"""Prints results from a hypothesis test.
p_value: float
ht: HypothesisTest
"""
print('p-value =', p_value)
print('actual =', ht.actual)
print('ts max =', ht.MaxTestStat())
def RunTests(data, iters=1000):
"""Runs several tests on the given data.
data: pair of sequences
iters: number of iterations to run
"""
# test the difference in means
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=iters)
print('\nmeans permute two-sided')
PrintTest(p_value, ht)
ht.PlotCdf()
thinkplot.Save(root='hypothesis1',
title='Permutation test',
xlabel='difference in means (weeks)',
ylabel='CDF',
legend=False)
# test the difference in means one-sided
ht = DiffMeansOneSided(data)
p_value = ht.PValue(iters=iters)
print('\nmeans permute one-sided')
PrintTest(p_value, ht)
# test the difference in std
ht = DiffStdPermute(data)
p_value = ht.PValue(iters=iters)
print('\nstd permute one-sided')
PrintTest(p_value, ht)
def ReplicateTests():
"""Replicates tests with the new NSFG data."""
live, firsts, others = nsfg2.MakeFrames()
# compare pregnancy lengths
print('\nprglngth2')
data = firsts.prglngth.values, others.prglngth.values
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=1000)
print('means permute two-sided')
PrintTest(p_value, ht)
print('\nbirth weight 2')
data = (firsts.totalwgt_lb.dropna().values,
others.totalwgt_lb.dropna().values)
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=1000)
print('means permute two-sided')
PrintTest(p_value, ht)
# test correlation
live2 = live.dropna(subset=['agepreg', 'totalwgt_lb'])
data = live2.agepreg.values, live2.totalwgt_lb.values
ht = CorrelationPermute(data)
p_value = ht.PValue()
print('\nage weight correlation 2')
PrintTest(p_value, ht)
# compare pregnancy lengths (chi-squared)
data = firsts.prglngth.values, others.prglngth.values
ht = PregLengthTest(data)
p_value = ht.PValue()
print('\npregnancy length chi-squared 2')
PrintTest(p_value, ht)
def main():
thinkstats2.RandomSeed(17)
# run the coin test
ct = CoinTest((140, 110))
pvalue = ct.PValue()
print('coin test p-value', pvalue)
# compare pregnancy lengths
print('\nprglngth')
live, firsts, others = first.MakeFrames()
data = firsts.prglngth.values, others.prglngth.values
RunTests(data)
# compare birth weights
print('\nbirth weight')
data = (firsts.totalwgt_lb.dropna().values,
others.totalwgt_lb.dropna().values)
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=1000)
print('means permute two-sided')
PrintTest(p_value, ht)
# test correlation
live2 = live.dropna(subset=['agepreg', 'totalwgt_lb'])
data = live2.agepreg.values, live2.totalwgt_lb.values
ht = CorrelationPermute(data)
p_value = ht.PValue()
print('\nage weight correlation')
print('n=', len(live2))
PrintTest(p_value, ht)
# run the dice test
RunDiceTest()
# compare pregnancy lengths (chi-squared)
data = firsts.prglngth.values, others.prglngth.values
ht = PregLengthTest(data)
p_value = ht.PValue()
print('\npregnancy length chi-squared')
PrintTest(p_value, ht)
# compute the false negative rate for difference in pregnancy length
data = firsts.prglngth.values, others.prglngth.values
neg_rate = FalseNegRate(data)
print('false neg rate', neg_rate)
# run the tests with new nsfg data
ReplicateTests()
if __name__ == "__main__":
main()
|
gpl-3.0
|
madjelan/scikit-learn
|
sklearn/feature_selection/tests/test_from_model.py
|
244
|
1593
|
import numpy as np
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.svm import LinearSVC
iris = load_iris()
def test_transform_linear_model():
for clf in (LogisticRegression(C=0.1),
LinearSVC(C=0.01, dual=False),
SGDClassifier(alpha=0.001, n_iter=50, shuffle=True,
random_state=0)):
for thresh in (None, ".09*mean", "1e-5 * median"):
for func in (np.array, sp.csr_matrix):
X = func(iris.data)
clf.set_params(penalty="l1")
clf.fit(X, iris.target)
X_new = clf.transform(X, thresh)
if isinstance(clf, SGDClassifier):
assert_true(X_new.shape[1] <= X.shape[1])
else:
assert_less(X_new.shape[1], X.shape[1])
clf.set_params(penalty="l2")
clf.fit(X_new, iris.target)
pred = clf.predict(X_new)
assert_greater(np.mean(pred == iris.target), 0.7)
def test_invalid_input():
clf = SGDClassifier(alpha=0.1, n_iter=10, shuffle=True, random_state=None)
clf.fit(iris.data, iris.target)
assert_raises(ValueError, clf.transform, iris.data, "gobbledigook")
assert_raises(ValueError, clf.transform, iris.data, ".5 * gobbledigook")
|
bsd-3-clause
|
ssaeger/scikit-learn
|
sklearn/gaussian_process/tests/test_gpc.py
|
24
|
6079
|
"""Testing for Gaussian process classification """
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# Licence: BSD 3 clause
import numpy as np
from scipy.optimize import approx_fprime
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF, ConstantKernel as C
from sklearn.utils.testing import (assert_true, assert_greater,
assert_almost_equal, assert_array_equal)
def f(x):
return np.sin(x)
X = np.atleast_2d(np.linspace(0, 10, 30)).T
X2 = np.atleast_2d([2., 4., 5.5, 6.5, 7.5]).T
y = np.array(f(X).ravel() > 0, dtype=int)
fX = f(X).ravel()
y_mc = np.empty(y.shape, dtype=int) # multi-class
y_mc[fX < -0.35] = 0
y_mc[(fX >= -0.35) & (fX < 0.35)] = 1
y_mc[fX > 0.35] = 2
fixed_kernel = RBF(length_scale=1.0, length_scale_bounds="fixed")
kernels = [RBF(length_scale=0.1), fixed_kernel,
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3)),
C(1.0, (1e-2, 1e2)) *
RBF(length_scale=1.0, length_scale_bounds=(1e-3, 1e3))]
def test_predict_consistent():
""" Check binary predict decision has also predicted probability above 0.5.
"""
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_array_equal(gpc.predict(X),
gpc.predict_proba(X)[:, 1] >= 0.5)
def test_lml_improving():
""" Test that hyperparameter-tuning improves log-marginal likelihood. """
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_greater(gpc.log_marginal_likelihood(gpc.kernel_.theta),
gpc.log_marginal_likelihood(kernel.theta))
def test_lml_precomputed():
""" Test that lml of optimized kernel is stored correctly. """
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
assert_almost_equal(gpc.log_marginal_likelihood(gpc.kernel_.theta),
gpc.log_marginal_likelihood(), 7)
def test_converged_to_local_maximum():
""" Test that we are in local maximum after hyperparameter-optimization."""
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
lml, lml_gradient = \
gpc.log_marginal_likelihood(gpc.kernel_.theta, True)
assert_true(np.all((np.abs(lml_gradient) < 1e-4) |
(gpc.kernel_.theta == gpc.kernel_.bounds[:, 0]) |
(gpc.kernel_.theta == gpc.kernel_.bounds[:, 1])))
def test_lml_gradient():
""" Compare analytic and numeric gradient of log marginal likelihood. """
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel).fit(X, y)
lml, lml_gradient = gpc.log_marginal_likelihood(kernel.theta, True)
lml_gradient_approx = \
approx_fprime(kernel.theta,
lambda theta: gpc.log_marginal_likelihood(theta,
False),
1e-10)
assert_almost_equal(lml_gradient, lml_gradient_approx, 3)
def test_random_starts():
"""
Test that an increasing number of random-starts of GP fitting only
increases the log marginal likelihood of the chosen theta.
"""
n_samples, n_features = 25, 2
np.random.seed(0)
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features) * 2 - 1
y = (np.sin(X).sum(axis=1) + np.sin(3 * X).sum(axis=1)) > 0
kernel = C(1.0, (1e-2, 1e2)) \
* RBF(length_scale=[1e-3] * n_features,
length_scale_bounds=[(1e-4, 1e+2)] * n_features)
last_lml = -np.inf
for n_restarts_optimizer in range(5):
gp = GaussianProcessClassifier(
kernel=kernel, n_restarts_optimizer=n_restarts_optimizer,
random_state=0).fit(X, y)
lml = gp.log_marginal_likelihood(gp.kernel_.theta)
assert_greater(lml, last_lml - np.finfo(np.float32).eps)
last_lml = lml
def test_custom_optimizer():
""" Test that GPC can use externally defined optimizers. """
# Define a dummy optimizer that simply tests 50 random hyperparameters
def optimizer(obj_func, initial_theta, bounds):
rng = np.random.RandomState(0)
theta_opt, func_min = \
initial_theta, obj_func(initial_theta, eval_gradient=False)
for _ in range(50):
theta = np.atleast_1d(rng.uniform(np.maximum(-2, bounds[:, 0]),
np.minimum(1, bounds[:, 1])))
f = obj_func(theta, eval_gradient=False)
if f < func_min:
theta_opt, func_min = theta, f
return theta_opt, func_min
for kernel in kernels:
if kernel == fixed_kernel:
continue
gpc = GaussianProcessClassifier(kernel=kernel, optimizer=optimizer)
gpc.fit(X, y_mc)
# Checks that optimizer improved marginal likelihood
assert_greater(gpc.log_marginal_likelihood(gpc.kernel_.theta),
gpc.log_marginal_likelihood(kernel.theta))
def test_multi_class():
""" Test GPC for multi-class classification problems. """
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel)
gpc.fit(X, y_mc)
y_prob = gpc.predict_proba(X2)
assert_almost_equal(y_prob.sum(1), 1)
y_pred = gpc.predict(X2)
assert_array_equal(np.argmax(y_prob, 1), y_pred)
def test_multi_class_n_jobs():
""" Test that multi-class GPC produces identical results with n_jobs>1. """
for kernel in kernels:
gpc = GaussianProcessClassifier(kernel=kernel)
gpc.fit(X, y_mc)
gpc_2 = GaussianProcessClassifier(kernel=kernel, n_jobs=2)
gpc_2.fit(X, y_mc)
y_prob = gpc.predict_proba(X2)
y_prob_2 = gpc_2.predict_proba(X2)
assert_almost_equal(y_prob, y_prob_2)
|
bsd-3-clause
|
geopandas/geopandas
|
doc/source/_static/code/buffer.py
|
2
|
1130
|
"""
Create an illustrative figure for different kwargs
in buffer method.
"""
import geopandas
import matplotlib.pyplot as plt
from shapely.geometry import Point, LineString, Polygon
s = geopandas.GeoSeries(
[
Point(0, 0),
LineString([(1, -1), (1, 0), (2, 0), (2, 1)]),
Polygon([(3, -1), (4, 0), (3, 1)]),
]
)
fix, axs = plt.subplots(
3, 2, figsize=(12, 12), sharex=True, sharey=True
)
for ax in axs.flatten():
s.plot(ax=ax)
ax.set(xticks=[], yticks=[])
s.buffer(0.2).plot(ax=axs[0, 0], alpha=0.6)
axs[0, 0].set_title("s.buffer(0.2)")
s.buffer(0.2, resolution=2).plot(ax=axs[0, 1], alpha=0.6)
axs[0, 1].set_title("s.buffer(0.2, resolution=2)")
s.buffer(0.2, cap_style=2).plot(ax=axs[1, 0], alpha=0.6)
axs[1, 0].set_title("s.buffer(0.2, cap_style=2)")
s.buffer(0.2, cap_style=3).plot(ax=axs[1, 1], alpha=0.6)
axs[1, 1].set_title("s.buffer(0.2, cap_style=3)")
s.buffer(0.2, join_style=2).plot(ax=axs[2, 0], alpha=0.6)
axs[2, 0].set_title("s.buffer(0.2, join_style=2)")
s.buffer(0.2, join_style=3).plot(ax=axs[2, 1], alpha=0.6)
axs[2, 1].set_title("s.buffer(0.2, join_style=3)")
|
bsd-3-clause
|
dpaiton/OpenPV
|
pv-core/analysis/python/plot_reconstruction.py
|
1
|
2441
|
"""
Plot a reconstruction of the retina image from the l1 activity and patches
"""
import sys
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import matplotlib.cm as cm
import PVReadSparse as rs
import PVConversions as conv
import PVReadWeights as rw
import math
if len(sys.argv) < 7:
print "usage: plot_avg_activity activity-filename [end_time step_time begin_time] w4, w5, activity-test"
sys.exit()
extended = False
a1 = rs.PVReadSparse(sys.argv[1], extended)
end = int(sys.argv[2])
step = int(sys.argv[3])
begin = int(sys.argv[4])
w = rw.PVReadWeights(sys.argv[5])
wOff = rw.PVReadWeights(sys.argv[6])
atest = rs.PVReadSparse(sys.argv[7], extended)
coord = 1
endtest = 2000
steptest = 1999
begintest = 0
anX = 32
anY = 32
nx = w.nx
ny = w.ny
nxp = w.nxp
nyp = w.nyp
numpat = w.numPatches
space = 1
slPre = -math.log(4.0,2)
slPost = -math.log(1.0,2)
pa = []
grayp = []
graypo = []
grau = []
for i in range((32*32)):
grayp.append(0.5)
graypo.append(0.5)
grau.append(0.5)
grayp = np.reshape(grayp, (32, 32))
grau = np.reshape(grau, (32, 32))
graypo = np.reshape(graypo, (32, 32))
for endtest in range(begintest+steptest, endtest, steptest):
Atest = atest.avg_activity(begintest, endtest)
lenofo = len(Atest)
for i in range(lenofo):
for j in range(lenofo):
pa = np.append(pa, Atest[i,j])
amax = np.max(pa)
nxp2 = 1.0
count = 0
for end in range(begin+step, end, step):
A1 = a1.avg_activity(begin, end)
lenofo = len(A1)
lenofb = lenofo * lenofo
#print "a1 = ", np.shape(A1)
#print "a2 = ", np.shape(A2)
for j in range(lenofo):
for i in range(lenofo):
ix = conv.zPatchHead(i, nxp2, slPre, slPost)
jy = conv.zPatchHead(j, nxp2, slPre, slPost)
p = w.next_patch()
pOff = wOff.next_patch()
grayp[ix, jy] = grayp[ix, jy] + (np.sum(((A1[i, j]/amax)*p)) / (16*2))
graypo[ix, jy] = graypo[ix, jy] - (np.sum(((A1[i, j]/amax)*pOff)) / (16*2))
fig = plt.figure()
ax = fig.add_subplot(211)
ax.set_xlabel('Kx')
ax.set_ylabel('Ky')
ax.set_title('On Reconstruction')
ax.imshow(grayp, cmap=cm.binary, interpolation='nearest', vmin=0, vmax=1)
ax = fig.add_subplot(212)
ax.set_xlabel('Kx')
ax.set_ylabel('Ky')
ax.set_title('Off Reconstruction')
ax.imshow(graypo, cmap=cm.binary, interpolation='nearest', vmin=0, vmax=1)
plt.show()
#end fig loop
|
epl-1.0
|
q1ang/scikit-learn
|
sklearn/utils/setup.py
|
296
|
2884
|
import os
from os.path import join
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('utils', parent_package, top_path)
config.add_subpackage('sparsetools')
cblas_libs, blas_info = get_blas_info()
cblas_compile_args = blas_info.pop('extra_compile_args', [])
cblas_includes = [join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])]
libraries = []
if os.name == 'posix':
libraries.append('m')
cblas_libs.append('m')
config.add_extension('sparsefuncs_fast', sources=['sparsefuncs_fast.c'],
libraries=libraries)
config.add_extension('arrayfuncs',
sources=['arrayfuncs.c'],
depends=[join('src', 'cholesky_delete.h')],
libraries=cblas_libs,
include_dirs=cblas_includes,
extra_compile_args=cblas_compile_args,
**blas_info
)
config.add_extension(
'murmurhash',
sources=['murmurhash.c', join('src', 'MurmurHash3.cpp')],
include_dirs=['src'])
config.add_extension('lgamma',
sources=['lgamma.c', join('src', 'gamma.c')],
include_dirs=['src'],
libraries=libraries)
config.add_extension('graph_shortest_path',
sources=['graph_shortest_path.c'],
include_dirs=[numpy.get_include()])
config.add_extension('fast_dict',
sources=['fast_dict.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('seq_dataset',
sources=['seq_dataset.c'],
include_dirs=[numpy.get_include()])
config.add_extension('weight_vector',
sources=['weight_vector.c'],
include_dirs=cblas_includes,
libraries=cblas_libs,
**blas_info)
config.add_extension("_random",
sources=["_random.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension("_logistic_sigmoid",
sources=["_logistic_sigmoid.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
bsd-3-clause
|
maheshakya/scikit-learn
|
examples/datasets/plot_random_dataset.py
|
348
|
2254
|
"""
==============================================
Plot randomly generated classification dataset
==============================================
Plot several randomly generated 2D classification datasets.
This example illustrates the :func:`datasets.make_classification`
:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`
functions.
For ``make_classification``, three binary and two multi-class classification
datasets are generated, with different numbers of informative features and
clusters per class. """
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()
|
bsd-3-clause
|
siutanwong/scikit-learn
|
examples/model_selection/plot_validation_curve.py
|
229
|
1823
|
"""
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.learning_curve import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
plt.semilogx(param_range, train_scores_mean, label="Training score", color="r")
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2, color="r")
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="g")
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2, color="g")
plt.legend(loc="best")
plt.show()
|
bsd-3-clause
|
CforED/Machine-Learning
|
sklearn/utils/graph.py
|
289
|
6239
|
"""
Graph utilities and algorithms
Graphs are represented with their adjacency matrices, preferably using
sparse matrices.
"""
# Authors: Aric Hagberg <hagberg@lanl.gov>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Jake Vanderplas <vanderplas@astro.washington.edu>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from .validation import check_array
from .graph_shortest_path import graph_shortest_path
###############################################################################
# Path and connected component analysis.
# Code adapted from networkx
def single_source_shortest_path_length(graph, source, cutoff=None):
"""Return the shortest path length from source to all reachable nodes.
Returns a dictionary of shortest path lengths keyed by target.
Parameters
----------
graph: sparse matrix or 2D array (preferably LIL matrix)
Adjacency matrix of the graph
source : node label
Starting node for path
cutoff : integer, optional
Depth to stop the search - only
paths of length <= cutoff are returned.
Examples
--------
>>> from sklearn.utils.graph import single_source_shortest_path_length
>>> import numpy as np
>>> graph = np.array([[ 0, 1, 0, 0],
... [ 1, 0, 1, 0],
... [ 0, 1, 0, 1],
... [ 0, 0, 1, 0]])
>>> single_source_shortest_path_length(graph, 0)
{0: 0, 1: 1, 2: 2, 3: 3}
>>> single_source_shortest_path_length(np.ones((6, 6)), 2)
{0: 1, 1: 1, 2: 0, 3: 1, 4: 1, 5: 1}
"""
if sparse.isspmatrix(graph):
graph = graph.tolil()
else:
graph = sparse.lil_matrix(graph)
seen = {} # level (number of hops) when seen in BFS
level = 0 # the current level
next_level = [source] # dict of nodes to check at next level
while next_level:
this_level = next_level # advance to next level
next_level = set() # and start a new list (fringe)
for v in this_level:
if v not in seen:
seen[v] = level # set the level of vertex v
next_level.update(graph.rows[v])
if cutoff is not None and cutoff <= level:
break
level += 1
return seen # return all path lengths as dictionary
if hasattr(sparse, 'connected_components'):
connected_components = sparse.connected_components
else:
from .sparsetools import connected_components
###############################################################################
# Graph laplacian
def graph_laplacian(csgraph, normed=False, return_diag=False):
""" Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = check_array(csgraph, dtype=np.float64, accept_sparse=True)
if sparse.isspmatrix(csgraph):
return _laplacian_sparse(csgraph, normed=normed,
return_diag=return_diag)
else:
return _laplacian_dense(csgraph, normed=normed,
return_diag=return_diag)
def _laplacian_sparse(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
if not graph.format == 'coo':
lap = (-graph).tocoo()
else:
lap = -graph.copy()
diag_mask = (lap.row == lap.col)
if not diag_mask.sum() == n_nodes:
# The sparsity pattern of the matrix has holes on the diagonal,
# we need to fix that
diag_idx = lap.row[diag_mask]
diagonal_holes = list(set(range(n_nodes)).difference(diag_idx))
new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])
new_row = np.concatenate([lap.row, diagonal_holes])
new_col = np.concatenate([lap.col, diagonal_holes])
lap = sparse.coo_matrix((new_data, (new_row, new_col)),
shape=lap.shape)
diag_mask = (lap.row == lap.col)
lap.data[diag_mask] = 0
w = -np.asarray(lap.sum(axis=1)).squeeze()
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap.data /= w[lap.row]
lap.data /= w[lap.col]
lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(
lap.data.dtype)
else:
lap.data[diag_mask] = w[lap.row[diag_mask]]
if return_diag:
return lap, w
return lap
def _laplacian_dense(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
lap = -np.asarray(graph) # minus sign leads to a copy
# set diagonal to zero
lap.flat[::n_nodes + 1] = 0
w = -lap.sum(axis=0)
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap /= w
lap /= w[:, np.newaxis]
lap.flat[::n_nodes + 1] = (1 - w_zeros).astype(lap.dtype)
else:
lap.flat[::n_nodes + 1] = w.astype(lap.dtype)
if return_diag:
return lap, w
return lap
|
bsd-3-clause
|
sinhrks/scikit-learn
|
examples/datasets/plot_random_multilabel_dataset.py
|
278
|
3402
|
"""
==============================================
Plot randomly generated multilabel dataset
==============================================
This illustrates the `datasets.make_multilabel_classification` dataset
generator. Each sample consists of counts of two features (up to 50 in
total), which are differently distributed in each of two classes.
Points are labeled as follows, where Y means the class is present:
===== ===== ===== ======
1 2 3 Color
===== ===== ===== ======
Y N N Red
N Y N Blue
N N Y Yellow
Y Y N Purple
Y N Y Orange
Y Y N Green
Y Y Y Brown
===== ===== ===== ======
A star marks the expected sample for each class; its size reflects the
probability of selecting that class label.
The left and right examples highlight the ``n_labels`` parameter:
more of the samples in the right plot have 2 or 3 labels.
Note that this two-dimensional example is very degenerate:
generally the number of features would be much greater than the
"document length", while here we have much larger documents than vocabulary.
Similarly, with ``n_classes > n_features``, it is much less likely that a
feature distinguishes a particular class.
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification as make_ml_clf
print(__doc__)
COLORS = np.array(['!',
'#FF3333', # red
'#0198E1', # blue
'#BF5FFF', # purple
'#FCD116', # yellow
'#FF7216', # orange
'#4DBD33', # green
'#87421F' # brown
])
# Use same random seed for multiple calls to make_multilabel_classification to
# ensure same distributions
RANDOM_SEED = np.random.randint(2 ** 10)
def plot_2d(ax, n_labels=1, n_classes=3, length=50):
X, Y, p_c, p_w_c = make_ml_clf(n_samples=150, n_features=2,
n_classes=n_classes, n_labels=n_labels,
length=length, allow_unlabeled=False,
return_distributions=True,
random_state=RANDOM_SEED)
ax.scatter(X[:, 0], X[:, 1], color=COLORS.take((Y * [1, 2, 4]
).sum(axis=1)),
marker='.')
ax.scatter(p_w_c[0] * length, p_w_c[1] * length,
marker='*', linewidth=.5, edgecolor='black',
s=20 + 1500 * p_c ** 2,
color=COLORS.take([1, 2, 4]))
ax.set_xlabel('Feature 0 count')
return p_c, p_w_c
_, (ax1, ax2) = plt.subplots(1, 2, sharex='row', sharey='row', figsize=(8, 4))
plt.subplots_adjust(bottom=.15)
p_c, p_w_c = plot_2d(ax1, n_labels=1)
ax1.set_title('n_labels=1, length=50')
ax1.set_ylabel('Feature 1 count')
plot_2d(ax2, n_labels=3)
ax2.set_title('n_labels=3, length=50')
ax2.set_xlim(left=0, auto=True)
ax2.set_ylim(bottom=0, auto=True)
plt.show()
print('The data was generated from (random_state=%d):' % RANDOM_SEED)
print('Class', 'P(C)', 'P(w0|C)', 'P(w1|C)', sep='\t')
for k, p, p_w in zip(['red', 'blue', 'yellow'], p_c, p_w_c.T):
print('%s\t%0.2f\t%0.2f\t%0.2f' % (k, p, p_w[0], p_w[1]))
|
bsd-3-clause
|
rdipietro/tensorflow
|
tensorflow/examples/learn/iris.py
|
25
|
1649
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib import learn
def main(unused_argv):
# Load dataset.
iris = learn.datasets.load_dataset('iris')
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = learn.infer_real_valued_columns_from_input(x_train)
classifier = learn.DNNClassifier(
feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3)
# Fit and predict.
classifier.fit(x_train, y_train, steps=200)
predictions = list(classifier.predict(x_test, as_iterable=True))
score = metrics.accuracy_score(y_test, predictions)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
|
apache-2.0
|
abhishekgahlot/pybrain
|
examples/supervised/evolino/superimposed_sine.py
|
4
|
3443
|
#!/usr/bin/env python
__author__ = 'Michael Isik'
from pylab import plot, show, ion, cla, subplot, title, figlegend, draw
import numpy
from pybrain.structure.modules.evolinonetwork import EvolinoNetwork
from pybrain.supervised.trainers.evolino import EvolinoTrainer
from lib.data_generator import generateSuperimposedSineData
print
print "=== Learning to extrapolate 5 superimposed sine waves ==="
print
sinefreqs = ( 0.2, 0.311, 0.42, 0.51, 0.74 )
# sinefreqs = ( 0.2, 0.311, 0.42, 0.51, 0.74, 0.81 )
metascale = 8.
scale = 0.5 * metascale
stepsize = 0.1 * metascale
# === create training dataset
# the sequences must be stored in the target field
# the input field will be ignored
print "creating training data"
trnInputSpace = numpy.arange( 0*scale , 190*scale , stepsize )
trnData = generateSuperimposedSineData(sinefreqs, trnInputSpace)
# === create testing dataset
print "creating test data"
tstInputSpace = numpy.arange( 400*scale , 540*scale , stepsize)
tstData = generateSuperimposedSineData(sinefreqs, tstInputSpace)
# === create the evolino-network
print "creating EvolinoNetwork"
net = EvolinoNetwork( trnData.outdim, 40 )
wtRatio = 1./3.
# === instantiate an evolino trainer
# it will train our network through evolutionary algorithms
print "creating EvolinoTrainer"
trainer = EvolinoTrainer(
net,
dataset=trnData,
subPopulationSize = 20,
nParents = 8,
nCombinations = 1,
initialWeightRange = ( -0.01 , 0.01 ),
# initialWeightRange = ( -0.1 , 0.1 ),
# initialWeightRange = ( -0.5 , -0.2 ),
backprojectionFactor = 0.001,
mutationAlpha = 0.001,
# mutationAlpha = 0.0000001,
nBurstMutationEpochs = numpy.Infinity,
wtRatio = wtRatio,
verbosity = 2)
# === prepare sequences for extrapolation and plotting
trnSequence = trnData.getField('target')
separatorIdx = int(len(trnSequence)*wtRatio)
trnSequenceWashout = trnSequence[0:separatorIdx]
trnSequenceTarget = trnSequence[separatorIdx:]
tstSequence = tstData.getField('target')
separatorIdx = int(len(tstSequence)*wtRatio)
tstSequenceWashout = tstSequence[0:separatorIdx]
tstSequenceTarget = tstSequence[separatorIdx:]
ion() # switch matplotlib to interactive mode
for i in range(3000):
print "======================"
print "====== NEXT RUN ======"
print "======================"
print "=== TRAINING"
# train the network for 1 epoch
trainer.trainEpochs( 1 )
print "=== PLOTTING\n"
# calculate the nets output for train and the test data
trnSequenceOutput = net.extrapolate(trnSequenceWashout, len(trnSequenceTarget))
tstSequenceOutput = net.extrapolate(tstSequenceWashout, len(tstSequenceTarget))
# plot training data
sp = subplot(211) # switch to the first subplot
cla() # clear the subplot
title("Training Set") # set the subplot's title
sp.set_autoscale_on( True ) # enable autoscaling
targetline = plot(trnSequenceTarget,"r-") # plot the targets
sp.set_autoscale_on( False ) # disable autoscaling
outputline = plot(trnSequenceOutput,"b-") # plot the actual output
# plot test data
sp = subplot(212)
cla()
title("Test Set")
sp.set_autoscale_on( True )
plot(tstSequenceTarget,"r-")
sp.set_autoscale_on( False )
plot(tstSequenceOutput,"b-")
# create a legend
figlegend((targetline, outputline),('target','output'),('upper right'))
# draw everything
draw()
show()
|
bsd-3-clause
|
PredictiveScienceLab/inverse-bgo
|
demos/catalysis/model_1.py
|
2
|
5523
|
"""
Implements the catalysis model as found in Yiannis paper.
"""
from system import *
def make_A(kappa):
"""
Make the matrix of the dynamical system from ``kappa``.
"""
A = np.array([[-kappa[0], 0, 0, 0, 0, 0],
[kappa[0], -kappa[1]-kappa[3]-kappa[4], 0, 0, 0, 0],
[0, kappa[1], -kappa[2], 0, 0, 0],
[0, 0, kappa[2], 0, 0, 0],
[0, kappa[4], 0, 0, 0, 0],
[0, kappa[3], 0, 0, 0, 0]])
# The derivative of A with respect to kappa
d = A.shape[0]
s = kappa.shape[0]
dA = np.zeros((d, d, s))
dA[0, 0, 0] = -1.
dA[1, 0, 0] = 1.
dA[1, 1, 1] = -1.
dA[1, 1, 4] = -1.
dA[1, 1, 3] = -1.
dA[2, 1, 1] = 1.
dA[2, 2, 2] = -1.
dA[3, 2, 2] = 1.
dA[4, 1, 4] = 1.
dA[5, 1, 3] = 1.
return A, dA
def make_full_A(kappa):
"""
Make the matrix of the dynamical system from ``kappa``.
"""
assert kappa.shape[0] == 36
A = kappa.reshape((6,6))
dA = np.zeros((36,36))
for i in xrange(36):
dA[i,i] = 1
dA = dA.reshape((6,6,36))
return A, dA
def f(kappa, y0, t):
"""
Evaluate the model at ``kappa`` and at times ``t`` with initial
conditions ``y0``.
It returns a flatten version of the system, i.e.:
y_1(t_1)
...
y_d(t_1)
...
y_1(t_K)
...
y_d(t_K)
"""
A = make_A(kappa)[0]
r = ode(f0)
r.set_initial_value(y0, 0).set_f_params(A)
y_m = [y0[None, :]]
for tt in t[1:]:
r.integrate(tt)
y_m.append(r.y[None, :])
y_m = np.vstack(y_m)
return y_m.flatten()
def f_full(kappa, y0, t):
"""
Evaluate the model at ``kappa`` and at times ``t`` with initial
conditions ``y0``.
It returns a flatten version of the system, i.e.:
y_1(t_1)
...
y_d(t_1)
...
y_1(t_K)
...
y_d(t_K)
"""
A = make_full_A(kappa)[0]
r = ode(f0)
r.set_initial_value(y0, 0).set_f_params(A)
y_m = [y0[None, :]]
for tt in t[1:]:
r.integrate(tt)
y_m.append(r.y[None, :])
y_m = np.vstack(y_m)
return y_m.flatten()
def df(kappa, y0, t):
"""
Evaluate the derivative of the model derivatives at ``kappa`` and at times ``t``
with initial conditions ``y0``.
This returns a matrix of the following form:
dy_1(t_1) / dkappa_1 ... dy_1(t_1) / dkappa_s
...
dy_d(t_1) / dkappa_1 ... dy_d(t_1) / dkappa_s
...
dy_1(t_K) / dkappa_1 ... dy_d(t_K) / dkappa_s
...
dy_d(t_K) / dkappa_1 ... dy_d(t_K) / dkappa_s
"""
A, dA = make_A(kappa)
d = A.shape[0]
r = ode(f1) # Look at system.py: this should be the f of the adjoint
r.set_initial_value(np.hstack([y0, np.zeros((d ** 3, ))])).set_f_params(A)
y_m = [r.y]
for tt in t[1:]:
r.integrate(tt)
y_m.append(r.y[None, :])
y_m = np.vstack(y_m)
# This is the Jacobian with respect to the full matrix A
J = y_m[:, d:].reshape((t.shape[0], d, d, d))
# Now we apply the chain rule to compute the jacobian wrt kappa
J_kappa = np.einsum('ijkl,klr', J, dA)
return J_kappa.reshape((d * t.shape[0], kappa.shape[0]))
def df_full(kappa, y0, t):
"""
Evaluate the derivative of the model derivatives at ``kappa`` and at times ``t``
with initial conditions ``y0``.
This returns a matrix of the following form:
dy_1(t_1) / dkappa_1 ... dy_1(t_1) / dkappa_s
...
dy_d(t_1) / dkappa_1 ... dy_d(t_1) / dkappa_s
...
dy_1(t_K) / dkappa_1 ... dy_d(t_K) / dkappa_s
...
dy_d(t_K) / dkappa_1 ... dy_d(t_K) / dkappa_s
"""
A, dA = make_full_A(kappa)
d = A.shape[0]
r = ode(f1) # Look at system.py: this should be the f of the adjoint
r.set_initial_value(np.hstack([y0, np.zeros((d ** 3, ))])).set_f_params(A)
y_m = [r.y]
for tt in t[1:]:
r.integrate(tt)
y_m.append(r.y[None, :])
y_m = np.vstack(y_m)
# This is the Jacobian with respect to the full matrix A
J = y_m[:, d:].reshape((t.shape[0], d, d, d))
# Now we apply the chain rule to compute the jacobian wrt kappa
J_kappa = np.einsum('ijkl,klr', J, dA)
return J_kappa.reshape((d * t.shape[0], kappa.shape[0]))
def ndf(kappa0, y0, t, h=1e-3):
"""
Numerically compute the Jacobian of f at x0.
"""
y = f(kappa0, y0, t)
kappa = kappa0.copy()
J = np.zeros((y.shape[0], kappa0.shape[0]))
for i in xrange(kappa0.shape[0]):
kappa[i] += h
py = f(kappa, y0, t)
kappa[i] -= h
J[:, i] = (py - y) / h
return J
if __name__ == '__main__':
# Test if what we are doing make sense
import matplotlib.pyplot as plt
# Some kappas to evaluate the model at:
kappa = np.array([0.0216, 0.0292, 0.0219, 0.0021, 0.0048])
# The observed data (to read the initial conditions):
obs_data = np.loadtxt('obs_data.txt')
# Remember that the column of the data that has all zeros contains X
# which is not observed. This is the 4th column.
# The observed times
t = obs_data[:, 0]
# The intial points
y0 = obs_data[0, 1:]
# Here is how we evaluate the model
y = f(kappa, y0, t)
# Here is how we evaluate the derivatives of the model
dy = df(kappa, y0, t)
# Now evaluate the numerical derivatives at kappa
J = ndf(kappa, y0, t)
for i in xrange(kappa.shape[0]):
plt.plot(dy[:, i], 'r', linewidth=2)
plt.plot(J[:, i], '--g', linewidth=2)
plt.legend(['Adjoint derivative', 'Numerical Derivative'])
plt.show()
|
mit
|
iproduct/course-social-robotics
|
11-dnn-keras/venv/Lib/site-packages/pandas/tests/extension/arrow/arrays.py
|
2
|
5291
|
"""
Rudimentary Apache Arrow-backed ExtensionArray.
At the moment, just a boolean array / type is implemented.
Eventually, we'll want to parametrize the type and support
multiple dtypes. Not all methods are implemented yet, and the
current implementation is not efficient.
"""
import copy
import itertools
import operator
from typing import Type
import numpy as np
import pyarrow as pa
import pandas as pd
from pandas.api.extensions import (
ExtensionArray,
ExtensionDtype,
register_extension_dtype,
take,
)
from pandas.core.arraylike import OpsMixin
@register_extension_dtype
class ArrowBoolDtype(ExtensionDtype):
type = np.bool_
kind = "b"
name = "arrow_bool"
na_value = pa.NULL
@classmethod
def construct_array_type(cls) -> Type["ArrowBoolArray"]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
return ArrowBoolArray
@property
def _is_boolean(self) -> bool:
return True
@register_extension_dtype
class ArrowStringDtype(ExtensionDtype):
type = str
kind = "U"
name = "arrow_string"
na_value = pa.NULL
@classmethod
def construct_array_type(cls) -> Type["ArrowStringArray"]:
"""
Return the array type associated with this dtype.
Returns
-------
type
"""
return ArrowStringArray
class ArrowExtensionArray(OpsMixin, ExtensionArray):
_data: pa.ChunkedArray
@classmethod
def from_scalars(cls, values):
arr = pa.chunked_array([pa.array(np.asarray(values))])
return cls(arr)
@classmethod
def from_array(cls, arr):
assert isinstance(arr, pa.Array)
return cls(pa.chunked_array([arr]))
@classmethod
def _from_sequence(cls, scalars, dtype=None, copy=False):
return cls.from_scalars(scalars)
def __repr__(self):
return f"{type(self).__name__}({repr(self._data)})"
def __getitem__(self, item):
if pd.api.types.is_scalar(item):
return self._data.to_pandas()[item]
else:
vals = self._data.to_pandas()[item]
return type(self).from_scalars(vals)
def __len__(self):
return len(self._data)
def astype(self, dtype, copy=True):
# needed to fix this astype for the Series constructor.
if isinstance(dtype, type(self.dtype)) and dtype == self.dtype:
if copy:
return self.copy()
return self
return super().astype(dtype, copy)
@property
def dtype(self):
return self._dtype
def _logical_method(self, other, op):
if not isinstance(other, type(self)):
raise NotImplementedError()
result = op(np.array(self._data), np.array(other._data))
return ArrowBoolArray(
pa.chunked_array([pa.array(result, mask=pd.isna(self._data.to_pandas()))])
)
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
return self._logical_method(other, operator.eq)
@property
def nbytes(self) -> int:
return sum(
x.size
for chunk in self._data.chunks
for x in chunk.buffers()
if x is not None
)
def isna(self):
nas = pd.isna(self._data.to_pandas())
return type(self).from_scalars(nas)
def take(self, indices, allow_fill=False, fill_value=None):
data = self._data.to_pandas()
if allow_fill and fill_value is None:
fill_value = self.dtype.na_value
result = take(data, indices, fill_value=fill_value, allow_fill=allow_fill)
return self._from_sequence(result, dtype=self.dtype)
def copy(self):
return type(self)(copy.copy(self._data))
@classmethod
def _concat_same_type(cls, to_concat):
chunks = list(itertools.chain.from_iterable(x._data.chunks for x in to_concat))
arr = pa.chunked_array(chunks)
return cls(arr)
def __invert__(self):
return type(self).from_scalars(~self._data.to_pandas())
def _reduce(self, name: str, *, skipna: bool = True, **kwargs):
if skipna:
arr = self[~self.isna()]
else:
arr = self
try:
op = getattr(arr, name)
except AttributeError as err:
raise TypeError from err
return op(**kwargs)
def any(self, axis=0, out=None):
# Explicitly return a plain bool to reproduce GH-34660
return bool(self._data.to_pandas().any())
def all(self, axis=0, out=None):
# Explicitly return a plain bool to reproduce GH-34660
return bool(self._data.to_pandas().all())
class ArrowBoolArray(ArrowExtensionArray):
def __init__(self, values):
if not isinstance(values, pa.ChunkedArray):
raise ValueError
assert values.type == pa.bool_()
self._data = values
self._dtype = ArrowBoolDtype()
class ArrowStringArray(ArrowExtensionArray):
def __init__(self, values):
if not isinstance(values, pa.ChunkedArray):
raise ValueError
assert values.type == pa.string()
self._data = values
self._dtype = ArrowStringDtype()
|
gpl-2.0
|
lekshmideepu/nest-simulator
|
pynest/examples/intrinsic_currents_subthreshold.py
|
8
|
8242
|
# -*- coding: utf-8 -*-
#
# intrinsic_currents_subthreshold.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Intrinsic currents subthreshold
-------------------------------
This example illustrates how to record from a model with multiple
intrinsic currents and visualize the results. This is illustrated
using the ``ht_neuron`` which has four intrinsic currents: ``I_NaP``,
``I_KNa``, ``I_T``, and ``I_h``. It is a slightly simplified implementation of
neuron model proposed in [1]_.
The neuron is driven by DC current, which is alternated
between depolarizing and hyperpolarizing. Hyperpolarization
intervals become increasingly longer.
References
~~~~~~~~~~
.. [1] Hill and Tononi (2005) Modeling Sleep and Wakefulness in the
Thalamocortical System J Neurophysiol 93:1671
http://dx.doi.org/10.1152/jn.00915.2004.
See Also
~~~~~~~~
:doc:`intrinsic_currents_spiking`
"""
###############################################################################
# We imported all necessary modules for simulation, analysis and plotting.
import nest
import matplotlib.pyplot as plt
###############################################################################
# Additionally, we set the verbosity using ``set_verbosity`` to suppress info
# messages. We also reset the kernel to be sure to start with a clean NEST.
nest.set_verbosity("M_WARNING")
nest.ResetKernel()
###############################################################################
# We define simulation parameters:
#
# - The length of depolarization intervals
# - The length of hyperpolarization intervals
# - The amplitude for de- and hyperpolarizing currents
# - The end of the time window to plot
n_blocks = 5
t_block = 20.
t_dep = [t_block] * n_blocks
t_hyp = [t_block * 2 ** n for n in range(n_blocks)]
I_dep = 10.
I_hyp = -5.
t_end = 500.
###############################################################################
# We create the one neuron instance and the DC current generator and store
# the returned handles.
nrn = nest.Create('ht_neuron')
dc = nest.Create('dc_generator')
###############################################################################
# We create a multimeter to record
#
# - membrane potential ``V_m``
# - threshold value ``theta``
# - intrinsic currents ``I_NaP``, ``I_KNa``, ``I_T``, ``I_h``
#
# by passing these names in the ``record_from`` list.
#
# To find out which quantities can be recorded from a given neuron,
# run::
#
# nest.GetDefaults('ht_neuron')['recordables']
#
# The result will contain an entry like::
#
# <SLILiteral: V_m>
#
# for each recordable quantity. You need to pass the value of the
# ``SLILiteral``, in this case ``V_m`` in the ``record_from`` list.
#
# We want to record values with 0.1 ms resolution, so we set the
# recording interval as well; the default recording resolution is 1 ms.
# create multimeter and configure it to record all information
# we want at 0.1 ms resolution
mm = nest.Create('multimeter',
params={'interval': 0.1,
'record_from': ['V_m', 'theta',
'I_NaP', 'I_KNa', 'I_T', 'I_h']}
)
###############################################################################
# We connect the DC generator and the multimeter to the neuron. Note that
# the multimeter, just like the voltmeter is connected to the neuron,
# not the neuron to the multimeter.
nest.Connect(dc, nrn)
nest.Connect(mm, nrn)
###############################################################################
# We are ready to simulate. We alternate between driving the neuron with
# depolarizing and hyperpolarizing currents. Before each simulation
# interval, we set the amplitude of the DC generator to the correct value.
for t_sim_dep, t_sim_hyp in zip(t_dep, t_hyp):
dc.amplitude = I_dep
nest.Simulate(t_sim_dep)
dc.amplitude = I_hyp
nest.Simulate(t_sim_hyp)
###############################################################################
# We now fetch the data recorded by the multimeter. The data are returned as
# a dictionary with entry ``times`` containing timestamps for all recorded
# data, plus one entry per recorded quantity.
#
# All data is contained in the ``events`` entry of the status dictionary
# returned by the multimeter.
data = mm.events
t = data['times']
###############################################################################
# The next step is to plot the results. We create a new figure, add a single
# subplot and plot at first membrane potential and threshold.
fig = plt.figure()
Vax = fig.add_subplot(111)
Vax.plot(t, data['V_m'], 'b-', lw=2, label=r'$V_m$')
Vax.plot(t, data['theta'], 'g-', lw=2, label=r'$\Theta$')
Vax.set_ylim(-80., 0.)
Vax.set_ylabel('Voltageinf [mV]')
Vax.set_xlabel('Time [ms]')
###############################################################################
# To plot the input current, we need to create an input current trace. We
# construct it from the durations of the de- and hyperpolarizing inputs and
# add the delay in the connection between DC generator and neuron:
#
# * We find the delay by checking the status of the dc->nrn connection.
# * We find the resolution of the simulation from the kernel status.
# * Each current interval begins one time step after the previous interval,
# is delayed by the delay and effective for the given duration.
# * We build the time axis incrementally. We only add the delay when adding
# the first time point after t=0. All subsequent points are then
# automatically shifted by the delay.
conns = nest.GetConnections(dc, nrn)
delay = conns.delay
dt = nest.GetKernelStatus('resolution')
t_dc, I_dc = [0], [0]
for td, th in zip(t_dep, t_hyp):
t_prev = t_dc[-1]
t_start_dep = t_prev + dt
if t_prev == 0:
t_start_dep += delay
t_end_dep = t_start_dep + td
t_start_hyp = t_end_dep + dt
t_end_hyp = t_start_hyp + th
t_dc.extend([t_start_dep, t_end_dep, t_start_hyp, t_end_hyp])
I_dc.extend([I_dep, I_dep, I_hyp, I_hyp])
###############################################################################
# The following function turns a name such as ``I_NaP`` into proper TeX code
# :math:`I_{\mathrm{NaP}}` for a pretty label.
def texify_name(name):
return r'${}_{{\mathrm{{{}}}}}$'.format(*name.split('_'))
###############################################################################
# Next, we add a right vertical axis and plot the currents with respect to
# that axis.
Iax = Vax.twinx()
Iax.plot(t_dc, I_dc, 'k-', lw=2, label=texify_name('I_DC'))
for iname, color in (('I_h', 'maroon'), ('I_T', 'orange'),
('I_NaP', 'crimson'), ('I_KNa', 'aqua')):
Iax.plot(t, data[iname], color=color, lw=2, label=texify_name(iname))
Iax.set_xlim(0, t_end)
Iax.set_ylim(-10., 15.)
Iax.set_ylabel('Current [pA]')
Iax.set_title('ht_neuron driven by DC current')
###############################################################################
# We need to make a little extra effort to combine lines from the two axis
# into one legend.
lines_V, labels_V = Vax.get_legend_handles_labels()
lines_I, labels_I = Iax.get_legend_handles_labels()
try:
Iax.legend(lines_V + lines_I, labels_V + labels_I, fontsize='small')
except TypeError:
# work-around for older Matplotlib versions
Iax.legend(lines_V + lines_I, labels_V + labels_I)
###############################################################################
# Note that ``I_KNa`` is not activated in this example because the neuron does
# not spike. ``I_T`` has only a very small amplitude.
|
gpl-2.0
|
XianliangJ/collections
|
Bufferbloat/plot_queue.py
|
2
|
2224
|
'''
Plot queue occupancy over time
'''
from helper import *
import plot_defaults
from matplotlib.ticker import MaxNLocator
from pylab import figure
parser = argparse.ArgumentParser()
parser.add_argument('--files', '-f',
help="Queue timeseries output to one plot",
required=True,
action="store",
nargs='+',
dest="files")
parser.add_argument('--legend', '-l',
help="Legend to use if there are multiple plots. File names used as default.",
action="store",
nargs="+",
default=None,
dest="legend")
parser.add_argument('--out', '-o',
help="Output png file for the plot.",
default=None, # Will show the plot
dest="out")
parser.add_argument('--labels',
help="Labels for x-axis if summarising; defaults to file names",
required=False,
default=[],
nargs="+",
dest="labels")
parser.add_argument('--every',
help="If the plot has a lot of data points, plot one of every EVERY (x,y) point (default 1).",
default=1,
type=int)
args = parser.parse_args()
if args.legend is None:
args.legend = []
for file in args.files:
args.legend.append(file)
to_plot=[]
def get_style(i):
if i == 0:
return {'color': 'red'}
else:
return {'color': 'black', 'ls': '-.'}
m.rc('figure', figsize=(16, 6))
fig = figure()
ax = fig.add_subplot(111)
for i, f in enumerate(args.files):
data = read_list(f)
xaxis = map(float, col(0, data))
start_time = xaxis[0]
xaxis = map(lambda x: x - start_time, xaxis)
qlens = map(float, col(1, data))
xaxis = xaxis[::args.every]
qlens = qlens[::args.every]
ax.plot(xaxis, qlens, label=args.legend[i], lw=2, **get_style(i))
ax.xaxis.set_major_locator(MaxNLocator(4))
plt.ylabel("Packets")
plt.grid(True)
plt.xlabel("Seconds")
if args.out:
print 'saving to', args.out
plt.savefig(args.out)
else:
plt.show()
|
gpl-3.0
|
schoolie/bokeh
|
bokeh/sampledata/project_funding.py
|
12
|
3025
|
"""Provides convenient access to data viz challenge data.
Source: https://github.com/localytics/data-viz-challenge
This dataset is excellent for testing and demonstrating data
viz capabilities because it contains numerous categorical
columns, with both high and low cardinality, columns with NaN
values, dates and locations. This is a very good example of
the kind of data that you might see from an information system,
where the analyst might be simply helping visualize the data
(business intelligence), or trying to understand how to exploit
the data for better system performance.
This script will download the json data, only the first time imported
from, then will load the data and clean it up in a pandas
DataFrame.
Resulting dataframe reports the following dtypes:
age object
amount float64
category object
client_time datetime64[ns]
device object
event_name object
gender object
city object
latitude float64
longitude float64
state object
zip_code int64
marital_status object
session_id object
"""
from __future__ import absolute_import
from bokeh.util.dependencies import import_required
pd = import_required('pandas',
'project_funding sample data requires Pandas (http://pandas.pydata.org) to be installed')
import os
from six.moves.urllib.request import URLopener
from bokeh.charts.utils import df_from_json
DATA_URL = "https://raw.githubusercontent.com/localytics/data-viz-challenge/master/data.json"
DOWNLOAD_NAME = 'project_funding.json'
CSV_NAME = 'project_funding.csv'
# Get absolute path relative to script
data_dir = os.path.dirname(os.path.realpath(__file__))
json_file_path = os.path.join(data_dir, DOWNLOAD_NAME)
csv_file_path = os.path.join(data_dir, CSV_NAME)
def download_project_funding():
if not os.path.isfile(json_file_path):
print('Downloading project funding source data.')
json_data = URLopener()
json_data.retrieve(DATA_URL, json_file_path)
print('Download complete!')
def load_project_funding():
project_funding = df_from_json(json_file_path)
# cleanup column names
cols = project_funding.columns
flat_cols = [col.split('.')[1] if '.' in col else col for col in cols]
project_funding.columns = flat_cols
# convert to dates
project_funding['client_time'] = pd.to_datetime(project_funding['client_time'], unit='s')
return project_funding
def load_cached_funding():
if not os.path.isfile(csv_file_path):
project_funding = load_project_funding()
project_funding.to_csv(csv_file_path, index=False)
else:
project_funding = pd.read_csv(csv_file_path, parse_dates=['client_time'])
return project_funding
download_project_funding()
project_funding = load_cached_funding()
|
bsd-3-clause
|
MycChiu/tensorflow
|
tensorflow/contrib/learn/python/learn/estimators/estimator_test.py
|
6
|
36350
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import json
import os
import tempfile
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib import learn
from tensorflow.contrib import lookup
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn import monitors as monitors_lib
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import linear
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.utils import input_fn_utils
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.contrib.testing.python.framework import util_test
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import monitored_session
from tensorflow.python.training import queue_runner_impl
from tensorflow.python.training import session_run_hook
from tensorflow.python.util import compat
_BOSTON_INPUT_DIM = 13
_IRIS_INPUT_DIM = 4
def boston_input_fn(num_epochs=None):
boston = base.load_boston()
features = input_lib.limit_epochs(
array_ops.reshape(
constant_op.constant(boston.data), [-1, _BOSTON_INPUT_DIM]),
num_epochs=num_epochs)
labels = array_ops.reshape(constant_op.constant(boston.target), [-1, 1])
return features, labels
def boston_input_fn_with_queue(num_epochs=None):
features, labels = boston_input_fn(num_epochs=num_epochs)
# Create a minimal queue runner.
fake_queue = data_flow_ops.FIFOQueue(30, dtypes.int32)
queue_runner = queue_runner_impl.QueueRunner(fake_queue,
[constant_op.constant(0)])
queue_runner_impl.add_queue_runner(queue_runner)
return features, labels
def iris_input_fn():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = array_ops.reshape(constant_op.constant(iris.target), [-1])
return features, labels
def iris_input_fn_labels_dict():
iris = base.load_iris()
features = array_ops.reshape(
constant_op.constant(iris.data), [-1, _IRIS_INPUT_DIM])
labels = {
'labels': array_ops.reshape(constant_op.constant(iris.target), [-1])
}
return features, labels
def boston_eval_fn():
boston = base.load_boston()
n_examples = len(boston.target)
features = array_ops.reshape(
constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
labels = array_ops.reshape(
constant_op.constant(boston.target), [n_examples, 1])
return array_ops.concat([features, features], 0), array_ops.concat(
[labels, labels], 0)
def extract(data, key):
if isinstance(data, dict):
assert key in data
return data[key]
else:
return data
def linear_model_params_fn(features, labels, mode, params):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss,
variables.get_global_step(),
optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
def linear_model_fn(features, labels, mode):
features = extract(features, 'input')
labels = extract(labels, 'labels')
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
if isinstance(features, dict):
(_, features), = features.items()
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return prediction, loss, train_op
def linear_model_fn_with_model_fn_ops(features, labels, mode):
"""Same as linear_model_fn, but returns `ModelFnOps`."""
assert mode in (model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
model_fn.ModeKeys.INFER)
prediction, loss = (models.linear_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return model_fn.ModelFnOps(
mode=mode, predictions=prediction, loss=loss, train_op=train_op)
def logistic_model_no_mode_fn(features, labels):
features = extract(features, 'input')
labels = extract(labels, 'labels')
labels = array_ops.one_hot(labels, 3, 1, 0)
prediction, loss = (models.logistic_regression_zero_init(features, labels))
train_op = optimizers.optimize_loss(
loss, variables.get_global_step(), optimizer='Adagrad', learning_rate=0.1)
return {
'class': math_ops.argmax(prediction, 1),
'prob': prediction
}, loss, train_op
VOCAB_FILE_CONTENT = 'emerson\nlake\npalmer\n'
EXTRA_FILE_CONTENT = 'kermit\npiggy\nralph\n'
def _build_estimator_for_export_tests(tmpdir):
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_columns = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
est = linear.LinearRegressor(feature_columns)
est.fit(input_fn=_input_fn, steps=20)
feature_spec = feature_column_lib.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
# hack in an op that uses an asset, in order to test asset export.
# this is not actually valid, of course.
def serving_input_fn_with_asset():
features, labels, inputs = serving_input_fn()
vocab_file_name = os.path.join(tmpdir, 'my_vocab_file')
vocab_file = gfile.GFile(vocab_file_name, mode='w')
vocab_file.write(VOCAB_FILE_CONTENT)
vocab_file.close()
hashtable = lookup.HashTable(
lookup.TextFileStringTableInitializer(vocab_file_name), 'x')
features['bogus_lookup'] = hashtable.lookup(
math_ops.to_int64(features['feature']))
return input_fn_utils.InputFnOps(features, labels, inputs)
return est, serving_input_fn_with_asset
class CheckCallsMonitor(monitors_lib.BaseMonitor):
def __init__(self, expect_calls):
super(CheckCallsMonitor, self).__init__()
self.begin_calls = None
self.end_calls = None
self.expect_calls = expect_calls
def begin(self, max_steps):
self.begin_calls = 0
self.end_calls = 0
def step_begin(self, step):
self.begin_calls += 1
return {}
def step_end(self, step, outputs):
self.end_calls += 1
return False
def end(self):
assert (self.end_calls == self.expect_calls and
self.begin_calls == self.expect_calls)
class EstimatorTest(test.TestCase):
def testExperimentIntegration(self):
exp = experiment.Experiment(
estimator=estimator.Estimator(model_fn=linear_model_fn),
train_input_fn=boston_input_fn,
eval_input_fn=boston_input_fn)
exp.test()
def testModelFnArgs(self):
expected_param = {'some_param': 'some_value'}
expected_config = run_config.RunConfig()
expected_config.i_am_test = True
def _argument_checker(features, labels, mode, params, config):
_, _ = features, labels
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_param, params)
self.assertTrue(config.i_am_test)
return constant_op.constant(0.), constant_op.constant(
0.), constant_op.constant(0.)
est = estimator.Estimator(
model_fn=_argument_checker,
params=expected_param,
config=expected_config)
est.fit(input_fn=boston_input_fn, steps=1)
def testModelFnWithModelDir(self):
expected_param = {'some_param': 'some_value'}
expected_model_dir = tempfile.mkdtemp()
def _argument_checker(features, labels, mode, params, config=None,
model_dir=None):
_, _, _ = features, labels, config
self.assertEqual(model_fn.ModeKeys.TRAIN, mode)
self.assertEqual(expected_param, params)
self.assertEqual(model_dir, expected_model_dir)
return constant_op.constant(0.), constant_op.constant(
0.), constant_op.constant(0.)
est = estimator.Estimator(model_fn=_argument_checker,
params=expected_param,
model_dir=expected_model_dir)
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_train_op(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
return None, loss, None
est = estimator.Estimator(model_fn=_invalid_model_fn)
with self.assertRaisesRegexp(ValueError, 'Missing training_op'):
est.fit(input_fn=boston_input_fn, steps=1)
def testInvalidModelFn_no_loss(self):
def _invalid_model_fn(features, labels, mode):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
train_op = w.assign_add(loss / 100.0)
predictions = loss
if mode == model_fn.ModeKeys.EVAL:
loss = None
return predictions, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing loss'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
def testInvalidModelFn_no_prediction(self):
def _invalid_model_fn(features, labels):
# pylint: disable=unused-argument
w = variables_lib.Variable(42.0, 'weight')
loss = 100.0 - w
train_op = w.assign_add(loss / 100.0)
return None, loss, train_op
est = estimator.Estimator(model_fn=_invalid_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.evaluate(input_fn=boston_eval_fn, steps=1)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(input_fn=boston_input_fn)
with self.assertRaisesRegexp(ValueError, 'Missing prediction'):
est.predict(
input_fn=functools.partial(
boston_input_fn, num_epochs=1),
as_iterable=True)
def testModelFnScaffold(self):
self.is_init_fn_called = False
def _init_fn(scaffold, session):
_, _ = scaffold, session
self.is_init_fn_called = True
def _model_fn_scaffold(features, labels, mode):
_, _ = features, labels
return model_fn.ModelFnOps(
mode=mode,
predictions=constant_op.constant(0.),
loss=constant_op.constant(0.),
train_op=constant_op.constant(0.),
scaffold=monitored_session.Scaffold(init_fn=_init_fn))
est = estimator.Estimator(model_fn=_model_fn_scaffold)
est.fit(input_fn=boston_input_fn, steps=1)
self.assertTrue(self.is_init_fn_called)
def testCheckpointSaverHookSuppressesTheDefaultOne(self):
saver_hook = test.mock.Mock(
spec=basic_session_run_hooks.CheckpointSaverHook)
saver_hook.before_run.return_value = None
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1, monitors=[saver_hook])
# test nothing is saved, due to suppressing default saver
with self.assertRaises(learn.NotFittedError):
est.evaluate(input_fn=boston_input_fn, steps=1)
def testCustomConfig(self):
test_random_seed = 5783452
class TestInput(object):
def __init__(self):
self.random_seed = 0
def config_test_input_fn(self):
self.random_seed = ops.get_default_graph().seed
return constant_op.constant([[1.]]), constant_op.constant([1.])
config = run_config.RunConfig(tf_random_seed=test_random_seed)
test_input = TestInput()
est = estimator.Estimator(model_fn=linear_model_fn, config=config)
est.fit(input_fn=test_input.config_test_input_fn, steps=1)
# If input_fn ran, it will have given us the random seed set on the graph.
self.assertEquals(test_random_seed, test_input.random_seed)
def testRunConfigModelDir(self):
config = run_config.RunConfig(model_dir='test_dir')
est = estimator.Estimator(model_fn=linear_model_fn,
config=config)
self.assertEqual('test_dir', est.config.model_dir)
def testModelDirAndRunConfigModelDir(self):
config = run_config.RunConfig(model_dir='test_dir')
est = estimator.Estimator(model_fn=linear_model_fn,
config=config,
model_dir='test_dir')
self.assertEqual('test_dir', est.config.model_dir)
with self.assertRaises(ValueError):
estimator.Estimator(model_fn=linear_model_fn,
config=config,
model_dir='different_dir')
def testCheckInputs(self):
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
# Lambdas so we have to different objects to compare
right_features = lambda: np.ones(shape=[7, 8], dtype=np.float32)
right_labels = lambda: np.ones(shape=[7, 10], dtype=np.int32)
est.fit(right_features(), right_labels(), steps=1)
# TODO(wicke): This does not fail for np.int32 because of data_feeder magic.
wrong_type_features = np.ones(shape=[7, 8], dtype=np.int64)
wrong_size_features = np.ones(shape=[7, 10])
wrong_type_labels = np.ones(shape=[7, 10], dtype=np.float32)
wrong_size_labels = np.ones(shape=[7, 11])
est.fit(x=right_features(), y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_type_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=wrong_size_features, y=right_labels(), steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_type_labels, steps=1)
with self.assertRaises(ValueError):
est.fit(x=right_features(), y=wrong_size_labels, steps=1)
def testBadInput(self):
est = estimator.Estimator(model_fn=linear_model_fn)
self.assertRaisesRegexp(
ValueError,
'Either x or input_fn must be provided.',
est.fit,
x=None,
input_fn=None,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
x='X',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and x or y',
est.fit,
y='Y',
input_fn=iris_input_fn,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Can not provide both input_fn and batch_size',
est.fit,
input_fn=iris_input_fn,
batch_size=100,
steps=1)
self.assertRaisesRegexp(
ValueError,
'Inputs cannot be tensors. Please provide input_fn.',
est.fit,
x=constant_op.constant(1.),
steps=1)
def testUntrained(self):
boston = base.load_boston()
est = estimator.SKCompat(estimator.Estimator(model_fn=linear_model_fn))
with self.assertRaises(learn.NotFittedError):
_ = est.score(x=boston.data, y=boston.target.astype(np.float64))
with self.assertRaises(learn.NotFittedError):
est.predict(x=boston.data)
def testContinueTraining(self):
boston = base.load_boston()
output_dir = tempfile.mkdtemp()
est = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_fn, model_dir=output_dir))
float64_labels = boston.target.astype(np.float64)
est.fit(x=boston.data, y=float64_labels, steps=50)
scores = est.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
del est
# Create another estimator object with the same output dir.
est2 = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_fn, model_dir=output_dir))
# Check we can evaluate and predict.
scores2 = est2.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertAllClose(scores['MSE'], scores2['MSE'])
predictions = np.array(list(est2.predict(x=boston.data)))
other_score = _sklearn.mean_squared_error(predictions, float64_labels)
self.assertAllClose(scores['MSE'], other_score)
# Check we can keep training.
est2.fit(x=boston.data, y=float64_labels, steps=100)
scores3 = est2.score(
x=boston.data,
y=float64_labels,
metrics={'MSE': metric_ops.streaming_mean_squared_error})
self.assertLess(scores3['MSE'], scores['MSE'])
def testEstimatorParams(self):
boston = base.load_boston()
est = estimator.SKCompat(
estimator.Estimator(
model_fn=linear_model_params_fn, params={'learning_rate': 0.01}))
est.fit(x=boston.data, y=boston.target, steps=100)
def testHooksNotChanged(self):
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
# We pass empty array and expect it to remain empty after calling
# fit and evaluate. Requires inside to copy this array if any hooks were
# added.
my_array = []
est.fit(input_fn=iris_input_fn, steps=100, monitors=my_array)
_ = est.evaluate(input_fn=iris_input_fn, steps=1, hooks=my_array)
self.assertEqual(my_array, [])
def testIrisIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = itertools.islice(iris.target, 100)
estimator.SKCompat(est).fit(x_iter, y_iter, steps=20)
eval_result = est.evaluate(input_fn=iris_input_fn, steps=1)
x_iter_eval = itertools.islice(iris.data, 100)
y_iter_eval = itertools.islice(iris.target, 100)
score_result = estimator.SKCompat(est).score(x_iter_eval, y_iter_eval)
print(score_result)
self.assertItemsEqual(eval_result.keys(), score_result.keys())
self.assertItemsEqual(['global_step', 'loss'], score_result.keys())
predictions = estimator.SKCompat(est).predict(x=iris.data)['class']
self.assertEqual(len(predictions), iris.target.shape[0])
def testIrisIteratorArray(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (np.array(x) for x in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisIteratorPlainInt(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 100)
y_iter = (v for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
_ = est.evaluate(input_fn=iris_input_fn, steps=1)
_ = six.next(est.predict(x=iris.data))['class']
def testIrisTruncatedIterator(self):
iris = base.load_iris()
est = estimator.Estimator(model_fn=logistic_model_no_mode_fn)
x_iter = itertools.islice(iris.data, 50)
y_iter = ([np.int32(v)] for v in iris.target)
est.fit(x_iter, y_iter, steps=100)
def testTrainStepsIsIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, steps=15)
self.assertEqual(25, est.get_variable_value('global_step'))
def testTrainMaxStepsIsNotIncremental(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, max_steps=10)
self.assertEqual(10, est.get_variable_value('global_step'))
est.fit(input_fn=boston_input_fn, max_steps=15)
self.assertEqual(15, est.get_variable_value('global_step'))
def testPredict(self):
est = estimator.Estimator(model_fn=linear_model_fn)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
output = list(est.predict(x=boston.data, batch_size=10))
self.assertEqual(len(output), boston.target.shape[0])
def testWithModelFnOps(self):
"""Test for model_fn that returns `ModelFnOps`."""
est = estimator.Estimator(model_fn=linear_model_fn_with_model_fn_ops)
boston = base.load_boston()
est.fit(input_fn=boston_input_fn, steps=1)
input_fn = functools.partial(boston_input_fn, num_epochs=1)
scores = est.evaluate(input_fn=input_fn, steps=1)
self.assertIn('loss', scores.keys())
output = list(est.predict(input_fn=input_fn))
self.assertEqual(len(output), boston.target.shape[0])
def testWrongInput(self):
def other_input_fn():
return {
'other': constant_op.constant([0, 0, 0])
}, constant_op.constant([0, 0, 0])
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
with self.assertRaises(ValueError):
est.fit(input_fn=other_input_fn, steps=1)
def testMonitorsForFit(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn,
steps=21,
monitors=[CheckCallsMonitor(expect_calls=21)])
def testHooksForEvaluate(self):
class CheckCallHook(session_run_hook.SessionRunHook):
def __init__(self):
self.run_count = 0
def after_run(self, run_context, run_values):
self.run_count += 1
est = learn.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=1)
hook = CheckCallHook()
est.evaluate(input_fn=boston_eval_fn, steps=3, hooks=[hook])
self.assertEqual(3, hook.run_count)
def testSummaryWriting(self):
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200)
est.evaluate(input_fn=boston_input_fn, steps=200)
loss_summary = util_test.simple_values_from_events(
util_test.latest_events(est.model_dir), ['OptimizeLoss/loss'])
self.assertEqual(1, len(loss_summary))
def testLossInGraphCollection(self):
class _LossCheckerHook(session_run_hook.SessionRunHook):
def begin(self):
self.loss_collection = ops.get_collection(ops.GraphKeys.LOSSES)
hook = _LossCheckerHook()
est = estimator.Estimator(model_fn=linear_model_fn)
est.fit(input_fn=boston_input_fn, steps=200, monitors=[hook])
self.assertTrue(hook.loss_collection)
def test_export_returns_exported_dirname(self):
expected = '/path/to/some_dir'
with test.mock.patch.object(estimator, 'export') as mock_export_module:
mock_export_module._export_estimator.return_value = expected
est = estimator.Estimator(model_fn=linear_model_fn)
actual = est.export('/path/to')
self.assertEquals(expected, actual)
def test_export_savedmodel(self):
tmpdir = tempfile.mkdtemp()
est, serving_input_fn = _build_estimator_for_export_tests(tmpdir)
extra_file_name = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_extra_file'))
extra_file = gfile.GFile(extra_file_name, mode='w')
extra_file.write(EXTRA_FILE_CONTENT)
extra_file.close()
assets_extra = {'some/sub/directory/my_extra_file': extra_file_name}
export_dir_base = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('export'))
export_dir = est.export_savedmodel(
export_dir_base, serving_input_fn, assets_extra=assets_extra)
self.assertTrue(gfile.Exists(export_dir_base))
self.assertTrue(gfile.Exists(export_dir))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes(
'saved_model.pb'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('variables'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.index'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('variables/variables.data-00000-of-00001'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets'))))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))))
self.assertEqual(
compat.as_bytes(VOCAB_FILE_CONTENT),
compat.as_bytes(
gfile.GFile(
os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets/my_vocab_file'))).read()))
expected_extra_path = os.path.join(
compat.as_bytes(export_dir),
compat.as_bytes('assets.extra/some/sub/directory/my_extra_file'))
self.assertTrue(
gfile.Exists(
os.path.join(
compat.as_bytes(export_dir), compat.as_bytes('assets.extra'))))
self.assertTrue(gfile.Exists(expected_extra_path))
self.assertEqual(
compat.as_bytes(EXTRA_FILE_CONTENT),
compat.as_bytes(gfile.GFile(expected_extra_path).read()))
expected_vocab_file = os.path.join(
compat.as_bytes(tmpdir), compat.as_bytes('my_vocab_file'))
# Restore, to validate that the export was well-formed.
with ops.Graph().as_default() as graph:
with session_lib.Session(graph=graph) as sess:
loader.load(sess, [tag_constants.SERVING], export_dir)
assets = [
x.eval()
for x in graph.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
]
self.assertItemsEqual([expected_vocab_file], assets)
graph_ops = [x.name for x in graph.get_operations()]
self.assertTrue('input_example_tensor' in graph_ops)
self.assertTrue('ParseExample/ParseExample' in graph_ops)
self.assertTrue('linear/linear/feature/matmul' in graph_ops)
# cleanup
gfile.DeleteRecursively(tmpdir)
class InferRealValuedColumnsTest(test.TestCase):
def testInvalidArgs(self):
with self.assertRaisesRegexp(ValueError, 'x or input_fn must be provided'):
estimator.infer_real_valued_columns_from_input(None)
with self.assertRaisesRegexp(ValueError, 'cannot be tensors'):
estimator.infer_real_valued_columns_from_input(constant_op.constant(1.0))
def _assert_single_feature_column(self, expected_shape, expected_dtype,
feature_columns):
self.assertEqual(1, len(feature_columns))
feature_column = feature_columns[0]
self.assertEqual('', feature_column.name)
self.assertEqual(
{
'':
parsing_ops.FixedLenFeature(
shape=expected_shape, dtype=expected_dtype)
},
feature_column.config)
def testInt32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.int32))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int32), None))
self._assert_single_feature_column([8], dtypes.int32, feature_columns)
def testInt64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.int64))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testInt64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.int64), None))
self._assert_single_feature_column([8], dtypes.int64, feature_columns)
def testFloat32Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.float32))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat32InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float32), None))
self._assert_single_feature_column([8], dtypes.float32, feature_columns)
def testFloat64Input(self):
feature_columns = estimator.infer_real_valued_columns_from_input(
np.ones(
shape=[7, 8], dtype=np.float64))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testFloat64InputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
lambda: (array_ops.ones(shape=[7, 8], dtype=dtypes.float64), None))
self._assert_single_feature_column([8], dtypes.float64, feature_columns)
def testBoolInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
estimator.infer_real_valued_columns_from_input(
np.array([[False for _ in xrange(8)] for _ in xrange(7)]))
def testBoolInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (constant_op.constant(False, shape=[7, 8], dtype=dtypes.bool), None))
def testStringInput(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input(
np.array([['%d.0' % i for i in xrange(8)] for _ in xrange(7)]))
def testStringInputFn(self):
with self.assertRaisesRegexp(
ValueError, 'on integer or non floating types are not supported'):
# pylint: disable=g-long-lambda
estimator.infer_real_valued_columns_from_input_fn(
lambda: (
constant_op.constant([['%d.0' % i for i in xrange(8)] for _ in xrange(7)]),
None))
def testBostonInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
self._assert_single_feature_column([_BOSTON_INPUT_DIM], dtypes.float64,
feature_columns)
def testIrisInputFn(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
iris_input_fn)
self._assert_single_feature_column([_IRIS_INPUT_DIM], dtypes.float64,
feature_columns)
class ReplicaDeviceSetterTest(test.TestCase):
def testVariablesAreOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker', a.device)
def testVariablesAreLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('', v.device)
self.assertDeviceEqual('', v.initializer.device)
self.assertDeviceEqual('', w.device)
self.assertDeviceEqual('', w.initializer.device)
self.assertDeviceEqual('', a.device)
def testMutableHashTableIsOnPs(self):
tf_config = {'cluster': {run_config.TaskType.PS: ['fake_ps_0']}}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('/job:ps/task:0', table._table_ref.device)
self.assertDeviceEqual('/job:ps/task:0', output.device)
def testMutableHashTableIsLocal(self):
with ops.device(
estimator._get_replica_device_setter(run_config.RunConfig())):
default_val = constant_op.constant([-1, -1], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
input_string = constant_op.constant(['brain', 'salad', 'tank'])
output = table.lookup(input_string)
self.assertDeviceEqual('', table._table_ref.device)
self.assertDeviceEqual('', output.device)
def testTaskIsSetOnWorkerWhenJobNameIsSet(self):
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0']
},
'task': {
'type': run_config.TaskType.WORKER,
'index': 3
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
with ops.device(estimator._get_replica_device_setter(config)):
v = variables_lib.Variable([1, 2])
w = variables_lib.Variable([2, 1])
a = v + w
self.assertDeviceEqual('/job:ps/task:0', v.device)
self.assertDeviceEqual('/job:ps/task:0', v.initializer.device)
self.assertDeviceEqual('/job:ps/task:0', w.device)
self.assertDeviceEqual('/job:ps/task:0', w.initializer.device)
self.assertDeviceEqual('/job:worker/task:3', a.device)
if __name__ == '__main__':
test.main()
|
apache-2.0
|
yuxiang-zhou/deepmachine
|
deepmachine/contrib/training/estimator_3dmm.py
|
1
|
20805
|
# basic library
import os
import shutil
import math
import time
import menpo.io as mio
import menpo3d.io as m3io
import numpy as np
import h5py
import pandas as pd
import datetime
from menpo.shape import ColouredTriMesh, PointCloud
from menpo.image import Image
from menpo.transform import Homogeneous
from menpo3d.rasterize import rasterize_mesh
from pathlib import Path
from functools import partial
from itwmm.visualize import lambertian_shading
# deepmachine
import scipy
import tensorflow as tf
import deepmachine as dm
from deepmachine.utils import mesh as graph
from deepmachine.layers.mesh_renderer.mesh_renderer import mesh_renderer
tf.logging.set_verbosity(tf.logging.INFO)
# Component Definition
def get_data_fn(config, dataset_path, is_training=True, format='tf'):
BATCH_SIZE = config.BATCH_SIZE
NUM_GPUS = config.NUM_GPUS
def data_fn_tf():
keys_to_features = dm.utils.union_dict([
dm.data.provider.features.image_feature(),
dm.data.provider.features.matrix_feature('mesh'),
dm.data.provider.features.matrix_feature('mesh/in_img'),
dm.data.provider.features.matrix_feature('mesh/colour'),
dm.data.provider.features.array_feature('mesh/mask'),
])
dataset = tf.data.TFRecordDataset(
dataset_path, num_parallel_reads=config.no_thread)
# Shuffle the dataset
dataset = dataset.shuffle(
buffer_size=BATCH_SIZE * NUM_GPUS * config.no_thread)
# Repeat the input indefinitly
dataset = dataset.repeat()
# Generate batches
dataset = dataset.batch(BATCH_SIZE)
# example proto decode
def _parse_function(example_proto):
parsed_features = tf.parse_example(example_proto, keys_to_features)
feature_dict = {}
# parse image
def parse_single_image(feature):
m = tf.image.decode_jpeg(feature, channels=3)
m = tf.reshape(m, [256, 256, 3])
m = tf.to_float(m) / 255.
return m
feature_dict['image'] = tf.image.resize_images(
tf.map_fn(parse_single_image,
parsed_features['image'], dtype=tf.float32),
[config.INPUT_SHAPE, config.INPUT_SHAPE]
)
# parse mesh
m = tf.decode_raw(parsed_features['mesh'], tf.float32)
m = tf.to_float(tf.reshape(m, [-1, config.N_VERTICES, 3]))
feature_dict['mesh'] = m
# parse mesh in image
m = tf.decode_raw(parsed_features['mesh/in_img'], tf.float32)
m = tf.to_float(tf.reshape(m, [-1, config.N_VERTICES, 3]))
feature_dict['mesh/in_img'] = m
# parse mesh/colour
m = tf.decode_raw(parsed_features['mesh/colour'], tf.float32)
m = tf.reshape(m, [-1, config.N_VERTICES, 3])
feature_dict['mesh/colour'] = tf.to_float(m)
# create cmesh
feature_dict['cmesh'] = tf.concat(
[feature_dict['mesh'], feature_dict['mesh/colour']], axis=-1
)
# parse mask
m = tf.decode_raw(parsed_features['mesh/mask'], tf.float32)
m = tf.reshape(m, [-1, config.N_VERTICES, 1])
feature_dict['mask'] = tf.to_float(m)
return feature_dict, feature_dict
# Parse the record into tensors.
dataset = dataset.map(
_parse_function, num_parallel_calls=config.no_thread)
return dataset
return data_fn_tf
def get_model_fn(config):
BATCH_SIZE = config.BATCH_SIZE
def model_fn(features, labels, mode, params):
input_image = features['image']
# image encoder
encoder_embedding = dm.networks.tfl.Encoder2D(input_image, config.EMBEDING)
# decoder
output_rec_cmesh = dm.networks.tfl.MeshDecoder(
encoder_embedding,
config.inputs_channels,
config.graph_laplacians,
config.adj_matrices,
config.upsamling_matrices,
polynomial_order=6,
filter_list=config.FILTERS
)
with tf.variable_scope('projection'):
output_mesh_proj = dm.layers.tfl.MeshConv(config.graph_laplacians[0], nf=3, name='proj_conv')(output_rec_cmesh[..., :3])
output_mesh_proj = tf.layers.batch_normalization(output_mesh_proj)
output_mesh_proj = dm.layers.tfl.MeshReLU1B(name='proj_relu1b')(output_mesh_proj)
# PREDICT mode
# Build estimator spec
predictions = {
'cmesh': output_rec_cmesh,
'pcmesh': tf.concat([output_mesh_proj, output_rec_cmesh[...,3:]], axis=-1)
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Build Additional Graph and Loss (for both TRAIN and EVAL modes)
## rendering config
eye = tf.constant(BATCH_SIZE * [[0.0, 0.0, -2.0]], dtype=tf.float32)
center = tf.constant(BATCH_SIZE * [[0.0, 0.0, 0.0]], dtype=tf.float32)
world_up = tf.constant(BATCH_SIZE * [[-1.0, 0.0, 0.0]], dtype=tf.float32)
ambient_colors = tf.constant(BATCH_SIZE * [[1., 1., 1.]], dtype=tf.float32) * 0.1
light_positions = tf.constant(BATCH_SIZE * [[[2.0, 0, -2.0], [-2.0, 0, -2.0]]])
light_intensities = tf.ones([BATCH_SIZE, 1, 3], dtype=tf.float32)
mesh_triangles = tf.constant(config.trilist, dtype=tf.int32)
def render_fn(cmesh):
## predicted mesh render
mesh_v = cmesh[...,:3]
mesh_c = cmesh[...,3:]
mesh_c = tf.clip_by_value(mesh_c,0,1)
mesh_v.set_shape([BATCH_SIZE, config.N_VERTICES, 3])
mesh_n = tf.nn.l2_normalize(mesh_v, axis=2)
mesh_n.set_shape([BATCH_SIZE, config.N_VERTICES, 3])
mesh_r = mesh_renderer(
mesh_v,
triangles=mesh_triangles,
normals=mesh_n,
diffuse_colors=mesh_c,
camera_position=eye,
camera_lookat=center,
camera_up=world_up,
light_positions=light_positions,
light_intensities=light_intensities,
image_width=config.INPUT_SHAPE,
image_height=config.INPUT_SHAPE,
specular_colors=None,
shininess_coefficients=None,
ambient_color=ambient_colors,
model_transform=None,
fov_y=40.0,
near_clip=0.01,
far_clip=10.0
)
return mesh_r
# mesh encoder
input_mesh = features['cmesh']
mesh_embedding = dm.networks.tfl.MeshEncoder(
input_mesh, config.EMBEDING, config.graph_laplacians, config.downsampling_matrices, filter_list=config.FILTERS)
output_rec_cmesh_ae = dm.networks.tfl.MeshDecoder(
mesh_embedding,
config.inputs_channels,
config.graph_laplacians,
config.adj_matrices,
config.upsamling_matrices,
polynomial_order=6,
filter_list=config.FILTERS,
reuse=True,
)
# in-graph rendering
gt_mesh_r = render_fn(input_mesh)
c3dmm_mesh_r = render_fn(output_rec_cmesh)
ae_mesh_r = render_fn(output_rec_cmesh_ae)
## summaries
tf.summary.image('image/input', input_image)
tf.summary.image('image/mesh/gt', gt_mesh_r)
tf.summary.image('image/mesh/pred', c3dmm_mesh_r)
tf.summary.image('image/mesh/ae', ae_mesh_r)
# define losses
## Autoencoder Losses
loss_shape = tf.reduce_mean(tf.losses.absolute_difference(
features['cmesh'][...,:3], output_rec_cmesh_ae[...,:3]
))
loss_appearance = tf.reduce_mean(tf.losses.absolute_difference(
features['cmesh'][...,3:], output_rec_cmesh_ae[...,3:], weights=labels['mask']
))
canny_weight = dm.utils.tf_canny(gt_mesh_r)
tf.summary.image('image/render/weight', canny_weight)
loss_render = tf.reduce_mean(tf.losses.absolute_difference(
gt_mesh_r, ae_mesh_r, weights=canny_weight
))
tf.summary.scalar('loss/ae/shape', loss_shape)
tf.summary.scalar('loss/ae/appearance', loss_appearance)
tf.summary.scalar('loss/ae/render', loss_render)
loss_ae = loss_shape + 0.5*loss_appearance + 0.5*loss_render
loss_ae /= 2.
tf.summary.scalar('loss/ae/total', loss_ae)
## 3DMM Losses
loss_shape = tf.reduce_mean(tf.losses.absolute_difference(
features['cmesh'][...,:3], output_rec_cmesh[...,:3]
))
loss_appearance = tf.reduce_mean(tf.losses.absolute_difference(
features['cmesh'][...,3:], output_rec_cmesh[...,3:], weights=labels['mask']
))
canny_weight = dm.utils.tf_canny(gt_mesh_r)
tf.summary.image('image/render/weight', canny_weight)
loss_render = tf.reduce_mean(tf.losses.absolute_difference(
gt_mesh_r, c3dmm_mesh_r, weights=canny_weight
))
loss_projection = tf.reduce_mean(tf.losses.absolute_difference(
features['mesh/in_img'], output_mesh_proj
))
tf.summary.scalar('loss/3dmm/shape', loss_shape)
tf.summary.scalar('loss/3dmm/appearance', loss_appearance)
tf.summary.scalar('loss/3dmm/render', loss_render)
tf.summary.scalar('loss/3dmm/projection', loss_projection)
loss_3dmm = loss_shape + 0.5*loss_appearance + 0.5*loss_render + loss_projection
loss_3dmm /= 3.
tf.summary.scalar('loss/3dmm/total', loss_3dmm)
## total loss
loss_total = loss_ae + loss_3dmm
tf.summary.scalar('loss/total', loss_3dmm)
## global steps
global_steps = tf.train.get_global_step()
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
learning_rate = tf.train.exponential_decay(
config.LR,
global_steps,
config.EPOCH_STEPS,
config.lr_decay
)
tf.summary.scalar('lr/ae', learning_rate)
## 3dmm train op
tf.summary.scalar('lr/3dmm', learning_rate)
optimizer_3dmm = tf.train.RMSPropOptimizer(learning_rate=learning_rate)
optimizer_3dmm = tf.contrib.estimator.clip_gradients_by_norm(
optimizer_3dmm, 5.0)
train_op_3dmm = optimizer_3dmm.minimize(
loss=loss_3dmm,
global_step=global_steps,
var_list=tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope='image_encoder'
) + tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope='projection'
)
)
## autoencoder train op
optimizer_mesh_ae = tf.train.RMSPropOptimizer(
learning_rate=learning_rate / 100.)
optimizer_mesh_ae = tf.contrib.estimator.clip_gradients_by_norm(
optimizer_mesh_ae, 5.0)
train_op_mesh_ae = optimizer_mesh_ae.minimize(
loss=loss_ae,
global_step=global_steps,
var_list=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='mesh_encoder') +
tf.get_collection(
tf.GraphKeys.GLOBAL_VARIABLES, scope='mesh_decoder')
)
train_op = tf.group(train_op_3dmm, train_op_mesh_ae)
return tf.estimator.EstimatorSpec(mode=mode, loss=loss_total, train_op=train_op)
# Add evaluation metrics (for EVAL mode)
eval_metric_ops = {
"Reconstruction/All": tf.metrics.mean_absolute_error(labels=features['cmesh'], predictions=predictions["cmesh"]),
"Reconstruction/Mesh": tf.metrics.mean_absolute_error(labels=features['cmesh'][...,:3], predictions=predictions["cmesh"][...,:3]),
"Reconstruction/Appearance": tf.metrics.mean_absolute_error(labels=features['cmesh'][...,3:], predictions=predictions["cmesh"][...,3:]),
"Reconstruction/Proj": tf.metrics.mean_absolute_error(labels=features['mesh/in_img'], predictions=output_mesh_proj),
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss_total, eval_metric_ops=eval_metric_ops)
return model_fn
def get_config():
# flag definitions
tf.app.flags.DEFINE_string(
'meta_path', '/vol/atlas/homes/yz4009/databases/mesh/meta', '''path to meta files''')
tf.app.flags.DEFINE_string(
'test_path', '', '''path to test files''')
tf.app.flags.DEFINE_string(
'warm_start_from', None, '''path to test files''')
tf.app.flags.DEFINE_integer('embedding', 128, '''embedding''')
from deepmachine.flags import FLAGS
class Config:
def format_folder(self, FLAGS):
post_fix = 'lr{:f}_d{:f}_emb{:03d}_b{:02d}'.format(
FLAGS.lr, FLAGS.lr_decay, FLAGS.embedding, FLAGS.batch_size
)
logdir = FLAGS.logdir if 'model_' in FLAGS.logdir else "{}/model_{}".format(
FLAGS.logdir, post_fix
)
return logdir
def __init__(self, *args, **kwargs):
# hyperparameters
self.BATCH_SIZE = FLAGS.batch_size
self.LR = FLAGS.lr
self.inputs_channels = 6
self.lr_decay = FLAGS.lr_decay
self.warm_start_from = FLAGS.warm_start_from
self.EMBEDING = FLAGS.embedding
self.LOGDIR = self.format_folder(FLAGS)
# self.N_VERTICES = 28431
self.N_VERTICES = 53215
self.INPUT_SHAPE = 112
self.FILTERS = [16, 32, 32, 64]
self.DB_SIZE = 30000
self.NUM_GPUS = len(FLAGS.gpu.split(','))
self.EPOCH_STEPS = self.DB_SIZE // (self.BATCH_SIZE * self.NUM_GPUS) * 2
self.TOTAL_EPOCH = FLAGS.n_epoch
self.no_thread = FLAGS.no_thread
self.dataset_path = FLAGS.dataset_path
self.test_path = FLAGS.test_path
self.train_format = 'tf' if FLAGS.dataset_path.endswith('.tfrecord') else 'h5py'
self.test_format = 'tf' if FLAGS.test_path.endswith('.tfrecord') else 'h5py'
self.luda_file = 'lsfm_LDUA.pkl'
self.symetric_index = np.load(FLAGS.meta_path + '/symetric_index_full.npy').astype(np.int32)
# globel constant
self.shape_model = mio.import_pickle(FLAGS.meta_path + '/all_all_all.pkl', encoding='latin1')
self.trilist = self.shape_model.instance([]).trilist
self.graph_laplacians, self.downsampling_matrices, self.upsamling_matrices, self.adj_matrices = mio.import_pickle(
FLAGS.meta_path + '/' + self.luda_file, encoding='latin1')
return Config()
def main():
config = get_config()
# configuration
strategy = tf.contrib.distribute.MirroredStrategy(num_gpus=config.NUM_GPUS) if config.NUM_GPUS > 1 else None
config_estimator = tf.estimator.RunConfig(
train_distribute=strategy,
save_checkpoints_steps=config.EPOCH_STEPS,
save_summary_steps=100,
keep_checkpoint_max=None,
)
# Set up Hooks
class TimeHistory(tf.train.SessionRunHook):
def begin(self):
self._step_tf = tf.train.get_global_step()
self.times = []
self.total_epoch = config.TOTAL_EPOCH
self.total_steps = config.EPOCH_STEPS * self.total_epoch
def before_run(self, run_context):
self.iter_time_start = time.time()
def after_run(self, run_context, run_values):
self._step = run_context.session.run(self._step_tf)
self.times.append(time.time() - self.iter_time_start)
if self._step % 20 == 0:
total_time = sum(self.times)
avg_time_per_batch = np.mean(time_hist.times[-20:])
estimate_finishing_time = (
self.total_steps - self._step) * avg_time_per_batch
i_batch = self._step % config.EPOCH_STEPS
i_epoch = self._step // config.EPOCH_STEPS
print("INFO: Epoch [{}/{}], Batch [{}/{}]".format(i_epoch,self.total_epoch,i_batch,config.EPOCH_STEPS))
print("INFO: Estimate Finishing time: {}".format(datetime.timedelta(seconds=estimate_finishing_time)))
print("INFO: Image/sec: {}".format(config.BATCH_SIZE*config.NUM_GPUS/avg_time_per_batch))
print("INFO: N GPUs: {}".format(config.NUM_GPUS))
time_hist = TimeHistory()
ws = None
if config.warm_start_from:
ws = tf.estimator.WarmStartSettings(
ckpt_to_initialize_from=config.warm_start_from,
vars_to_warm_start='mesh_.*',
var_name_to_prev_var_name={
"mesh_decoder/dense/bias": "mesh_decoder/dense/bias",
"mesh_decoder/dense/kernel": "mesh_decoder/dense/kernel",
"mesh_decoder_1/mesh_conv_9/kernel": "mesh_decoder/mesh_conv_4/kernel",
"mesh_decoder_1/mesh_conv_10/kernel": "mesh_decoder/mesh_conv_5/kernel",
"mesh_decoder_1/mesh_conv_11/kernel": "mesh_decoder/mesh_conv_6/kernel",
"mesh_decoder_1/mesh_conv_12/kernel": "mesh_decoder/mesh_conv_7/kernel",
"mesh_decoder_1/mesh_conv_13/kernel": "mesh_decoder/mesh_conv_8/kernel",
"mesh_decoder_1/mesh_re_l_u1b_8/kernel": "mesh_decoder/mesh_re_l_u1b_4/kernel",
"mesh_decoder_1/mesh_re_l_u1b_9/kernel": "mesh_decoder/mesh_re_l_u1b_5/kernel",
"mesh_decoder_1/mesh_re_l_u1b_10/kernel": "mesh_decoder/mesh_re_l_u1b_6/kernel",
"mesh_decoder_1/mesh_re_l_u1b_11/kernel": "mesh_decoder/mesh_re_l_u1b_7/kernel",
"mesh_decoder/mesh_conv/kernel": "mesh_decoder/mesh_conv_4/kernel",
"mesh_decoder/mesh_conv_1/kernel": "mesh_decoder/mesh_conv_5/kernel",
"mesh_decoder/mesh_conv_2/kernel": "mesh_decoder/mesh_conv_6/kernel",
"mesh_decoder/mesh_conv_3/kernel": "mesh_decoder/mesh_conv_7/kernel",
"mesh_decoder/mesh_conv_4/kernel": "mesh_decoder/mesh_conv_8/kernel",
"mesh_decoder/mesh_re_l_u1b/kernel": "mesh_decoder/mesh_re_l_u1b_4/kernel",
"mesh_decoder/mesh_re_l_u1b_1/kernel": "mesh_decoder/mesh_re_l_u1b_5/kernel",
"mesh_decoder/mesh_re_l_u1b_2/kernel": "mesh_decoder/mesh_re_l_u1b_6/kernel",
"mesh_decoder/mesh_re_l_u1b_3/kernel": "mesh_decoder/mesh_re_l_u1b_7/kernel",
"mesh_encoder/dense/bias": "mesh_encoder/dense/bias",
"mesh_encoder/dense/kernel": "mesh_encoder/dense/kernel",
"mesh_encoder/mesh_conv_5/kernel": "mesh_encoder/mesh_conv/kernel",
"mesh_encoder/mesh_conv_6/kernel": "mesh_encoder/mesh_conv_1/kernel",
"mesh_encoder/mesh_conv_7/kernel": "mesh_encoder/mesh_conv_2/kernel",
"mesh_encoder/mesh_conv_8/kernel": "mesh_encoder/mesh_conv_3/kernel",
"mesh_encoder/mesh_re_l_u1b_4/kernel": "mesh_encoder/mesh_re_l_u1b/kernel",
"mesh_encoder/mesh_re_l_u1b_5/kernel": "mesh_encoder/mesh_re_l_u1b_1/kernel",
"mesh_encoder/mesh_re_l_u1b_6/kernel": "mesh_encoder/mesh_re_l_u1b_2/kernel",
"mesh_encoder/mesh_re_l_u1b_7/kernel": "mesh_encoder/mesh_re_l_u1b_3/kernel",
}
)
# Create the Estimator
Fast_3DMM = tf.estimator.Estimator(
model_fn=get_model_fn(config),
model_dir=config.LOGDIR,
config=config_estimator,
warm_start_from=ws
)
if config.test_path:
train_spec = tf.estimator.TrainSpec(
input_fn=get_data_fn(config, config.dataset_path, format=config.train_format),
max_steps=config.EPOCH_STEPS * config.TOTAL_EPOCH,
hooks=[time_hist]
)
eval_spec = tf.estimator.EvalSpec(
input_fn=get_data_fn(config, config.test_path, is_training=False, format=config.test_format),
steps=config.EPOCH_STEPS * 5
)
tf.estimator.train_and_evaluate(Fast_3DMM, train_spec, eval_spec)
else:
Fast_3DMM.train(get_data_fn(
config, config.dataset_path, is_training=True, format=config.train_format
), steps=config.EPOCH_STEPS * config.TOTAL_EPOCH, hooks=[time_hist])
if __name__ == '__main__':
main()
|
mit
|
harry0519/nsnqt
|
nsnqtlib/strategies/moneyfundstrategy.py
|
1
|
19576
|
# -*- coding:utf-8 -*-
from nsnqtlib.strategies.strategy import basestrategy
import pandas as pd
import tushare as ts
class moneyfundstrategy(basestrategy):
'''
重写买入条件和卖出条件,
'''
def __init__(self,startdate=(2011, 1, 1),enddate=[],emafast=12,emaslow=26,demday=9):
self.pre_MA = False
self.curr_MA = False
self.buyprice = 0
#self.formatlistnew = ["date", "volume", "close", "high", "low", "open", "pre_close", 'actualvalue','cumulativevalue']
self.formatlistnew = ["date", "volume", "close", "high", "low", "open", "pre_close", 'actualvalue']
super(moneyfundstrategy, self).__init__(startdate, enddate)
# 获取需要交易股票列表
def import_stocklist(self, stocklistname):
df = pd.read_csv(str(stocklistname) + '.csv')
#df = pd.read_csv(str(stocklistname) + '.csv', parse_dates=['startdate'])
df['code'] = df['code'].astype('str')
count = 0
df_len = len(df.index)
while (count < df_len):
stock_name = str(df.iat[count, 0])
if len(stock_name) == 1:
stock_name = '00000' + stock_name
df.iat[count, 0] = stock_name
elif len(stock_name) == 2:
stock_name = '0000' + stock_name
df.iat[count, 0] = stock_name
elif len(stock_name) == 3:
stock_name = '000' + stock_name
df.iat[count, 0] = stock_name
elif len(stock_name) == 4:
stock_name = '00' + stock_name
df.iat[count, 0] = stock_name
elif len(stock_name) == 5:
stock_name = '0' + stock_name
df.iat[count, 0] = stock_name
count = count + 1
return df
def _getdata(self,collection="600455.SH",db="ml_security_table",out=[],isfilt=True,filt={}):
self.collection = collection
if db == "tushare":
#d1 = datetime.datetime.now()
#d2 = d1 + datetime.timedelta(-240)
#d1 = d1.strftime('%Y-%m-%d')
#d2 = d2.strftime('%Y-%m-%d')
#query = ts.get_hist_data(collection, start=d2, end=d1, )
query = ts.get_hist_data(collection, start='2012-01-01', end='2017-02-03')
query['date'] = query.index
query = query.sort_index(axis=0, ascending=True)
query['pre_close'] = query['close'].shift(1)
query.to_csv(collection + 'new.csv')
return query
elif db == 'local':
query = pd.read_csv(str(collection) + '.csv')
#out = self.formatlist
return query
else:
if not out: out = self.formatlist
if isfilt and not filt: filt = {"date": {"$gt": self.startdate}}
query = self.m.read_data(db, collection, filt=filt)
#query.to_csv(collection)
#df = pd.DataFrame(query)
#df.to_csv(collection+'new.csv')
print(query)
print('downloaded')
return self.formatquery(query, out)
'''
#def _getdata(self,collection="600455.SH",db="ml_security_table"):
def _getdata(self, collection="600455.SH", db="ml_security_table", out=[], isfilt=True, filt={}):
if db == "ml_security_table":
query = self.m.read_data(db,collection,filt={"date":{"$gt": self.startdate}})
query.to_csv('db_'+collection + '.csv')
out = self.formatlist
return self.formatquery(query, out)
elif db == "tushare":
query = ts.get_hist_data(collection)
#query = ts.get_hist_data(collection)
#print(collection)
query['date'] = query.index
query = query.sort_index(axis=0, ascending=True)
query['pre_close'] = query['close'].shift(1)
query['actualvalue'] = query['close'] - query['pre_close']
out = self.formatlist
#return self.formatquery(query, out)
return query
elif db == 'local':
query = pd.read_csv(str(collection) + '.csv')
#out = self.formatlist
return query
'''
'''
def _getdata(self,collection="600455.SH",db="ml_security_table"):
#query = pd.read_csv(str(collection) + '.csv', parse_dates=['date'])
#print(query)
query = self.m.read_data(db,collection,filt={"date":{"$gt": self.startdate}})
out = self.formatlist
return self.formatquery(query,out)
'''
def historyreturn(self, collection, par):
trading_record = []
holding_record = []
data = self._getdata(collection, 'tushare')
#print(collection)
#newdata = self.updatemoneyfund(data)
#newdata.to_csv(collection+'moneyfundstrategy_new.csv')
#print(data)
lst = [l for l in data[self.formatlistnew].fillna(0).values if l[1] != 0]
#lst.extend('dddd')
#df = pd.DataFrame(lst)
#df.to_csv(collection+'moneyfundstrategy.csv')
count = 0
for line in lst[:]:
isbuy = self.buy(lst, count, par)
for b in holding_record[:]:
issell, traderecord = self.sell(lst, count, b)
if issell:
holding_record.remove(b)
trading_record.append(traderecord)
print (traderecord)
if isbuy:
#holding_record.append((line, count, collection))
holding_record.append(([i for i in line], count, collection))
count += 1
return trading_record, holding_record
def updatemoneyfund(self, df):
count = 1
df_len = len(df.index)
yearvaluelist = []
year1end = 0
year2end = 0
df['cumulativevalue'] = df['actualvalue']
while (count < df_len):
if df['actualvalue'].iloc[count] < -1:
yearvalue = [-1 * df['actualvalue'].iloc[count],count]
yearvaluelist.append(yearvalue)
df['cumulativevalue'].iloc[count] = df['close'].iloc[count]
count = count + 1
newdf = pd.DataFrame(yearvaluelist)
#for b in yearvaluelist[:]:
#print(b[0],b[1])
count = 0
df_len = len(df.index)
newdf_count = 0
newdf_len = len(newdf.index)
while (newdf_count < newdf_len):
#print(newdf[1].iloc[newdf_count])
if newdf[1].iloc[newdf_count] < 200:
while (count < newdf[1].iloc[newdf_count]):
df['cumulativevalue'].iloc[count] = df['close'].iloc[count]
count = count + 1
elif newdf[1].iloc[newdf_count] - newdf[1].iloc[newdf_count-1] > 200:
count = newdf[1].iloc[newdf_count-1]
print(count)
endcount = newdf[1].iloc[newdf_count]
print(endcount)
increasevalue = newdf[0].iloc[newdf_count] / (endcount - count)
print(increasevalue)
df['cumulativevalue'].iloc[count] = df['close'].iloc[count]
count = count +1
while (count < endcount):
df['cumulativevalue'].iloc[count] = df['cumulativevalue'].iloc[count-1] + increasevalue
count = count + 1
if newdf_count == newdf_len-1:
count = newdf[1].iloc[newdf_count]
#increasevalue = newdf[0].iloc[newdf_count] / (df_len - newdf[1].iloc[newdf_count])
df['cumulativevalue'].iloc[count] = df['close'].iloc[count]
count = count + 1
while (count < df_len):
df['cumulativevalue'].iloc[count] = df['cumulativevalue'].iloc[count - 1] + increasevalue
count = count + 1
newdf_count = newdf_count + 1
#print(newdf)
return df
def looplist_historyreturn(self, df, actiontype="regression"):
error_list = []
buylist = []
selllist = []
count = 0
df_len = len(df.index)
column_num = len(df.count())
while (count < df_len):
columncount = 1
par = []
while (columncount < column_num):
par.append(df.iat[count, columncount])
columncount = columncount + 1
print(par)
#stock_name = str(df.iat[count, 0])
stock_name = str(df.ix[count, 'stock'])
try:
if actiontype == 'regression':
tr,hr = self.historyreturn(stock_name, par)
self.trading_records.extend(tr)
self.holding_records.extend(hr)
elif actiontype == 'trade':
buy, sell = self.getprocedure(isdb=True, collection=stock_name)
buylist.extend(buy)
selllist.extend(sell)
except:
error_list.append(stock_name)
count = count + 1
print(error_list)
print(buylist)
print(selllist)
return self.trading_records,self.holding_records, buylist, selllist
def buy(self, lst, count, par):
''' input:
line: [] ,row data in every stock data,default is in self.formatlist = ["date","volume","close","high","low","open","pre_close"]
count: float, the number to the row since first row
ouput:
bool, can buy or not buy
[], buy record,if can't buy,is empty list
'''
vol_day = 10
price_day = 60
vol_weight = 1.2
dat = lst[count][0]
close = lst[count][2]
pre_close = lst[count][6]
#if count <= 60: return False
vol_data = [i[1] for i in lst[count - vol_day:count]]
#maxprice = max([i[3]] for i in lst[count - price_day:count])[0]
#minprice = min([i[4]] for i in lst[count - price_day:count])[0]
#maxindex = [i for i in range(count - price_day, count) if lst[i][3] == maxprice][0]
'''
if self.buy_condition1(vol, vol_data, vol_weight) and \
self.buy_condition2(close, lst[count - 1][3]) and \
self.buy_condition3(close, maxprice) and \
self.buy_condition4(close, minprice) and \
self.buy_condition5(count, maxindex):
return True
'''
#and self.condition7(close, par[0]) and self.condition9(close, pre_close)
#if self.condition10(close) and self.condition9(close, pre_close) and self.MA_condition(lst, count):
if self.moneyfundbuycondiction(close, pre_close):
#print(dat)
self.buyprice = pre_close
return True
return False
#print(self.waitbuy)
#and self.condition9(close, pre_close)
def waitforbuy(self, dat, close, par):
if self.condition6(dat, par[1]) and \
self.condition7(close, par[0]):
return True
return False
def sell(self, lst, count, buyrecord):
currentday_high = lst[count][3]
gain_grads = 0.2
loss_grads = -0.05
dayout = 30
currentday_low = lst[count][4]
sell_date = lst[count][0]
close = lst[count][2]
high = lst[count][3]
low = lst[count][4]
buy_price = buyrecord[0][2]
hold_days = count - buyrecord[1]
buy_date = buyrecord[0][0]
collection = buyrecord[2]
'''
if self.stopgain_condition(buy_price, currentday_high, gain_grads):
self.bought = False
gain_grads = (currentday_high - buy_price) / buy_price
#sell_date = sell_date.strftime('%Y-%m-%d')
#buy_date = buy_date.strftime('%Y-%m-%d')
#sell_date = changedateformat(sell_date)
return True, [collection, buy_date, sell_date, hold_days, gain_grads, '']
elif self.stoploss_condition(buy_price, currentday_low, loss_grads):
#sell_date = sell_date.strftime('%Y-%m-%d')
#buy_date = buy_date.strftime('%Y-%m-%d')
return True, [collection, buy_date, sell_date, hold_days, (close - buy_price) / buy_price, '']
elif self.holdingtime_condition(hold_days, dayout):
#sell_date = sell_date.strftime('%Y-%m-%d')
#buy_date = buy_date.strftime('%Y-%m-%d')
return True, [collection, buy_date, sell_date, hold_days, (close - buy_price) / buy_price, '']
el
if self.Sellcondition3(close):
#sell_date = sell_date.strftime('%Y-%m-%d')
#buy_date = buy_date.strftime('%Y-%m-%d')
return True, [collection, buy_date, sell_date, hold_days, (close - buy_price) / buy_price, '']
return False, None
'''
if self.moneyfundsellcondiction(close):
#sell_date = sell_date.strftime('%Y-%m-%d')
#buy_date = buy_date.strftime('%Y-%m-%d')
return True, [collection, buy_date, sell_date, hold_days, (close - buy_price) / buy_price, '']
return False, None
'''
elif self.holdingtime_condition(hold_days, dayout):
sell_date = sell_date.strftime('%Y-%m-%d')
buy_date = buy_date.strftime('%Y-%m-%d')
return True, [collection, buy_date, sell_date, hold_days, (close - buy_price) / buy_price, '']
'''
# 取实时数据,根据历史回测数据比较是否存在交易机会
def getprocedure(self, filename="procedure_records.csv", isdb=False, collection="processstatus", db="etfgrid"):
'''"stock","date","data","s_ema","f_ema","diff","dem","macd","status"
'''
buy = []
sell = []
newlist = []
newdatalist = []
#out = ["stock", "date", "close", "startprice", "buytimes", "selltimes"]
'''
if isdb:
# df = self._getdata(collection, db, out=out, isfilt=False)[out]
#df = self._getdata(collection, db, out=out, isfilt=False)
df = self._getdata(collection, 'tushare')
# print(df)
else:
# df = pd.read_csv(filename)[out]
df = pd.read_csv(filename)
'''
# df.to_csv(collection)
#df_len = len(df.index)
#stock = str(df['stock'].iloc[0])
#print(stock)
#pre_close = float(df['close'].iloc[df_len-1])
#print(pre_close)
print(collection)
print('159005')
new_df = ts.get_realtime_quotes(collection)
print(new_df)
pre_close = float(new_df['pre_close'].iloc[0])
print('pre_close:' + str(pre_close))
price = float(new_df['ask'].iloc[0])
print('price:' + str(price))
high = float(new_df['high'].iloc[0])
# price = 0.89
#df_len = len(df.index) - 1
#if df_len < 200: return buy, sell
#startprice = df['startprice'].iloc[df_len]
#buynumber = df['buytimes'].iloc[df_len]
#sellnumber = df['selltimes'].iloc[df_len]
if price - pre_close < -0.1:
buy.append(collection)
# print(buy)
return buy, sell
def stopgain_condition(self, buy_price, current_price, grads=0.1):
if (current_price - buy_price) / buy_price >= grads:
return True
return False
def stoploss_condition(self, buy_price, current_price, grads=-0.05):
if (current_price - buy_price) / buy_price <= grads:
return True
return False
def holdingtime_condition(self, hold_days, dayout=10):
if hold_days >= dayout:
return True
return False
def mean_volume(self, data):
m_vol = sum(data) / len(data)
return m_vol
def buy_condition1(self, vol, vol_data, vol_weight=1.2):
if vol >= vol_weight * self.mean_volume(vol_data):
return True
return False
def buy_condition2(self, close, last_high):
if close >= last_high:
return True
return False
def buy_condition3(self, close, high, grads=0.2):
if (high - close) / high >= grads:
return True
return False
def buy_condition4(self, close, low, grads=0.05):
if (close - low) / low <= grads:
return True
return False
def buy_condition5(self, currentday, highday, grads=60):
if currentday - highday >= grads:
return True
return False
def condition6(self, dat, startdate):
newdat = pd.to_datetime(dat)
newdat = newdat.strftime('%Y-%m-%d')
newstartdate = pd.to_datetime(startdate)
newstartdate = newstartdate.strftime('%Y-%m-%d')
# print(newdat)
# print(newstartdate)
if newdat > newstartdate:
#print(newdat)
return True
return False
def condition7(self, close, cashprice):
if close < cashprice:
return True
return False
def condition8(self, close, low, pre_close):
if low > pre_close:
return True
return False
def condition9(self, close, pre_close):
if (close - pre_close) / pre_close < 0.099:
return True
return False
def condition10(self, close):
if close < 100:
return True
return False
def MA_judge_result(self, lst, count):
self.curr_MA = self.MA_condition(lst,count)
if self.pre_MA == False and self.curr_MA == True:
self.pre_MA = self.curr_MA
return True
self.pre_MA = self.curr_MA
return False
def MA_condition(self,lst,count):
if self.MA_result(lst,count,5) > self.MA_result(lst,count, 10) and \
self.MA_result(lst, count, 10) > self.MA_result(lst, count, 20) and \
self.MA_result(lst, count, 20) > self.MA_result(lst, count, 30):
#print(count)
return True
return False
def MA_result(self, lst,count, meanday):
meanlist = [i[2] for i in lst[count - meanday + 1:count + 1]]
return sum(meanlist) / meanday
def moneyfundbuycondiction(self, close, pre_close):
if close - pre_close < -0.05 and \
close - pre_close > -1:
return True
return False
def Sellcondition1(self, lst, high, count, maxday):
meanlist = [i[2] for i in lst[count - maxday + 1:count + 1]]
if high > max(meanlist):
return True
return False
def Sellcondition2(self, lst, high, low, close):
if high - low > 0:
if (close-low)/(high-close) < 0.2:
return True
return False
def Sellcondition3(self, close):
if close > 130:
return True
return False
def moneyfundsellcondiction(self, close):
if close > self.buyprice and \
close - self.buyprice < 1:
return True
return False
if __name__ == '__main__':
s = moneyfundstrategy()
df_stocklist = s.import_stocklist("moneyfundstrategy")
print(df_stocklist)
#s.setlooplist()
#getprocedure(self, filename="procedure_records.csv", isdb=False, collection="processstatus", db="etfgrid")
s.looplist_historyreturn(df_stocklist, actiontype="trade")
#s.looplist_historyreturn(df_stocklist)
s.savetrading2csv()
#s.saveholding2csv()
#report = reportforms(df)
#report.cumulative_graph()
#report.positiongain(100)
|
bsd-2-clause
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.