repo_name
stringlengths 6
103
| path
stringlengths 5
191
| copies
stringlengths 1
4
| size
stringlengths 4
6
| content
stringlengths 986
970k
| license
stringclasses 15
values |
---|---|---|---|---|---|
moonbury/notebooks | github/MasteringMLWithScikit-learn/8365OS_07_Codes/iris-pca-dimensionality-reduction.py | 3 | 1671 | """
>>> import matplotlib.pyplot as plt
>>> from sklearn.decomposition import PCA
>>> from sklearn.datasets import load_iris
>>> data = load_iris()
>>> y = data.target
>>> X = data.data
>>> pca = PCA(n_components=2)
>>> reduced_X = pca.fit_transform(X)
>>> red_x, red_y = [], []
>>> blue_x, blue_y = [], []
>>> green_x, green_y = [], []
>>> for i in range(len(reduced_X)):
>>> if y[i] == 0:
>>> red_x.append(reduced_X[i][0])
>>> red_y.append(reduced_X[i][1])
>>> elif y[i] == 1:
>>> blue_x.append(reduced_X[i][0])
>>> blue_y.append(reduced_X[i][1])
>>> else:
>>> green_x.append(reduced_X[i][0])
>>> green_y.append(reduced_X[i][1])
>>> plt.scatter(red_x, red_y, c='r', marker='x')
>>> plt.scatter(blue_x, blue_y, c='b', marker='D')
>>> plt.scatter(green_x, green_y, c='g', marker='.')
>>> plt.show()
"""
import matplotlib
matplotlib.use('Qt4Agg')
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.datasets import load_iris
data = load_iris()
y = data.target
X = data.data
pca = PCA(n_components=2)
reduced_X = pca.fit_transform(X)
red_x, red_y = [], []
blue_x, blue_y = [], []
green_x, green_y = [], []
for i in range(len(reduced_X)):
if y[i] == 0:
red_x.append(reduced_X[i][0])
red_y.append(reduced_X[i][1])
elif y[i] == 1:
blue_x.append(reduced_X[i][0])
blue_y.append(reduced_X[i][1])
else:
green_x.append(reduced_X[i][0])
green_y.append(reduced_X[i][1])
plt.scatter(red_x, red_y, c='r', marker='x')
plt.scatter(blue_x, blue_y, c='b', marker='D')
plt.scatter(green_x, green_y, c='g', marker='.')
plt.show()
| gpl-3.0 |
herilalaina/scikit-learn | sklearn/semi_supervised/label_propagation.py | 4 | 18837 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semi-supervised classification algorithms. At a high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset given
label assignments over an initial subset. In one variant, the algorithm does
not allow for any errors in the initial assignment (hard-clamping) while
in another variant, the algorithm allows for some wiggle room for the initial
assignments, allowing them to change by a fraction alpha in each iteration
(soft-clamping).
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supports RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> rng = np.random.RandomState(42)
>>> random_unlabeled_points = rng.rand(len(iris.target)) < 0.3
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <[email protected]>
# Utkarsh Upadhyay <[email protected]>
# License: BSD
from abc import ABCMeta, abstractmethod
import warnings
import numpy as np
from scipy import sparse
from scipy.sparse import csgraph
from ..base import BaseEstimator, ClassifierMixin
from ..externals import six
from ..metrics.pairwise import rbf_kernel
from ..neighbors.unsupervised import NearestNeighbors
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import check_classification_targets
from ..utils.validation import check_X_y, check_is_fitted, check_array
from ..exceptions import ConvergenceWarning
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf', callable}
String identifier for kernel function to use or the kernel function
itself. Only 'rbf' and 'knn' strings are valid inputs. The function
passed should take two inputs, each of shape [n_samples, n_features],
and return a [n_samples, n_samples] shaped weight matrix
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : integer
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3, n_jobs=1):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
self.n_jobs = n_jobs
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors,
n_jobs=self.n_jobs).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
elif callable(self.kernel):
if y is None:
return self.kernel(X, X)
else:
return self.kernel(X, y)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" or an explicit function "
" are supported at this time." % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
X_2d = check_array(X, accept_sparse=['csc', 'csr', 'coo', 'dok',
'bsr', 'lil', 'dia'])
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
check_classification_targets(y)
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
alpha = self.alpha
if self._variant == 'spreading' and \
(alpha is None or alpha <= 0.0 or alpha >= 1.0):
raise ValueError('alpha=%s is invalid: it must be inside '
'the open interval (0, 1)' % alpha)
y = np.asarray(y)
unlabeled = y == -1
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self._variant == 'propagation':
# LabelPropagation
y_static[unlabeled] = 0
else:
# LabelSpreading
y_static *= 1 - alpha
l_previous = np.zeros((self.X_.shape[0], n_classes))
unlabeled = unlabeled[:, np.newaxis]
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
for self.n_iter_ in range(self.max_iter):
if np.abs(self.label_distributions_ - l_previous).sum() < self.tol:
break
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
if self._variant == 'propagation':
normalizer = np.sum(
self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
self.label_distributions_ = np.where(unlabeled,
self.label_distributions_,
y_static)
else:
# clamp
self.label_distributions_ = np.multiply(
alpha, self.label_distributions_) + y_static
else:
warnings.warn(
'max_iter=%d was reached without convergence.' % self.max_iter,
category=ConvergenceWarning
)
self.n_iter_ += 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf', callable}
String identifier for kernel function to use or the kernel function
itself. Only 'rbf' and 'knn' strings are valid inputs. The function
passed should take two inputs, each of shape [n_samples, n_features],
and return a [n_samples, n_samples] shaped weight matrix.
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor.
.. deprecated:: 0.19
This parameter will be removed in 0.21.
'alpha' is fixed to zero in 'LabelPropagation'.
max_iter : integer
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> rng = np.random.RandomState(42)
>>> random_unlabeled_points = rng.rand(len(iris.target)) < 0.3
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
_variant = 'propagation'
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=None, max_iter=1000, tol=1e-3, n_jobs=1):
super(LabelPropagation, self).__init__(
kernel=kernel, gamma=gamma, n_neighbors=n_neighbors, alpha=alpha,
max_iter=max_iter, tol=tol, n_jobs=n_jobs)
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
def fit(self, X, y):
if self.alpha is not None:
warnings.warn(
"alpha is deprecated since 0.19 and will be removed in 0.21.",
DeprecationWarning
)
self.alpha = None
return super(LabelPropagation, self).fit(X, y)
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propagation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf', callable}
String identifier for kernel function to use or the kernel function
itself. Only 'rbf' and 'knn' strings are valid inputs. The function
passed should take two inputs, each of shape [n_samples, n_features],
and return a [n_samples, n_samples] shaped weight matrix
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
Clamping factor. A value in [0, 1] that specifies the relative amount
that an instance should adopt the information from its neighbors as
opposed to its initial label.
alpha=0 means keeping the initial label information; alpha=1 means
replacing all initial information.
max_iter : integer
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> rng = np.random.RandomState(42)
>>> random_unlabeled_points = rng.rand(len(iris.target)) < 0.3
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
_variant = 'spreading'
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3, n_jobs=1):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol,
n_jobs=n_jobs)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = csgraph.laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
linsalrob/EdwardsLab | matplotlib graphs/plot_16S_coverage_kernelregression.py | 1 | 2033 | import matplotlib.pyplot as plt
from scipy.stats.kde import gaussian_kde
import numpy as np
from sklearn.grid_search import GridSearchCV
from sklearn.kernel_ridge import KernelRidge
import sys
x = []
y = []
with open('/home/redwards/Desktop/genus_species_analysis/ecoli_coverage.tsv', 'r') as fin:
#with open('/home/redwards/Desktop/genus_species_analysis/pseudo_coverage.txt', 'r') as fin:
for l in fin:
p=l.strip().split("\t")
x.append(float(p[0]))
y.append(float(p[1]))
ny = np.array(y)
nx = np.array(x)
grid = GridSearchCV(
KernelRidge(kernel='rbf', gamma=1e-4),
param_grid={"alpha": [0.1, 0.01, 0.001]},
cv=5) # 20-fold cross-validation
# param_grid={"alpha": np.logspace(-10, 10, 10),
# "gamma": np.logspace(-4, -3, 5)},
grid.fit(nx[:, None], ny[:, None])
print(grid.best_params_)
xaxis_pred = np.linspace(min(x), max(x), 10000)[:, None]
yaxis_pred = grid.predict(xaxis_pred)
fig = plt.figure()
ax = fig.add_subplot(111)
ax2=ax.twinx()
"""
These regions come from http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2562909/
v1: 66-99
v2: 137-242
v3: 433-497
v4: 576-682
v5: 822-879
v6: 986-1043
v7: 1117-1173
v8: 1243-1294
"""
regions = [
[66,99], [137, 242],
[433, 497], [576, 682],
[822, 879], [986, 1043],
[1117, 1173], [1243, 1294]
]
illumina = [
[517, 809],
]
if 0:
maxy = max(y)
for r in illumina:
for ix in range(r[0], r[1]):
ax.bar(ix, maxy, color='lightgrey', edgecolor='lightgrey')
for r in regions:
for ix in range(r[0], r[1]):
ax.bar(ix, maxy, color='lightblue', edgecolor='lightblue')
ax.plot(x, y, color='r')
ax2.plot(xaxis_pred, yaxis_pred, color='blue')
ax2.set_ylabel("predictions")
ax2.set_ylim([0, 5000])
ax.set_xlabel("Position in the E. coli 16S gene")
ax.set_ylabel("Coverage")
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
fig.set_facecolor('white')
plt.show()
# plt.savefig(args.o)
| mit |
stevenliuit/neon | neon/layers/boltzmann.py | 13 | 3836 | # ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Boltzmann distribution based layers.
"""
import logging
from neon.layers.layer import WeightLayer
from neon.util.param import opt_param
logger = logging.getLogger(__name__)
class RBMLayer(WeightLayer):
"""
CD1 training layer for RBM
"""
def initialize(self, kwargs):
super(RBMLayer, self).initialize(kwargs)
self.set_weight_shape()
self.allocate_output_bufs()
self.allocate_param_bufs()
self.p_hid_plus = self.backend.empty((self.nout, self.batch_size))
self.s_hid_plus = self.backend.empty((self.nout, self.batch_size))
self.p_hid_minus = self.backend.empty((self.nout, self.batch_size))
self.p_plus = self.backend.empty((self.nout, self.nin))
self.p_minus = self.backend.empty((self.nout, self.nin))
self.diff = self.backend.empty((self.nout, self.nin))
self.learning_rule.allocate_state(self.diff)
self.neg_pre_act = self.backend.empty((self.nin, self.batch_size))
self.x_minus = self.backend.empty((self.nin, self.batch_size))
self.output = self.backend.empty((self.nin, self.batch_size))
def set_weight_shape(self):
opt_param(self, ['weight_shape'], (self.nout, self.nin))
def positive(self, inputs):
"""
Positive / upward pass of the CD1 RBM
Arguments:
inputs (neon.datasets.dataset.Dataset): dataset upon which
to operate
"""
self.backend.dot(self.weights, inputs, out=self.pre_act)
self.activation.apply_function(self.backend, self.pre_act,
self.p_hid_plus)
self.backend.dot(self.p_hid_plus, inputs.transpose(), out=self.p_plus)
self.random_numbers = self.backend.uniform(size=self.p_hid_plus.shape)
self.backend.greater(self.p_hid_plus, self.random_numbers,
out=self.s_hid_plus)
def negative(self, inputs):
"""
Negative / downward pass of the CD1 RBM
Arguments:
inputs (neon.datasets.dataset.Dataset): dataset upon which
to operate
"""
self.backend.dot(self.weights.transpose(), self.s_hid_plus,
out=self.neg_pre_act)
self.activation.apply_function(self.backend, self.neg_pre_act,
self.x_minus)
self.backend.dot(self.weights, self.x_minus, out=self.pre_act)
self.activation.apply_function(self.backend, self.pre_act,
self.p_hid_minus)
self.output[:] = self.x_minus
def update(self, epoch):
"""
CD1 weight update
Arguments:
epoch: not used, for future compatibility
"""
self.backend.dot(self.p_hid_minus, self.x_minus.transpose(),
out=self.p_minus)
self.backend.subtract(self.p_plus, self.p_minus, out=self.diff)
self.learning_rule.apply_rule([self.weights], [self.diff], epoch)
| apache-2.0 |
pravsripad/mne-python | examples/inverse/morph_volume_stc.py | 13 | 6236 | # -*- coding: utf-8 -*-
"""
.. _ex-morph-volume:
================================
Morph volumetric source estimate
================================
This example demonstrates how to morph an individual subject's
:class:`mne.VolSourceEstimate` to a common reference space. We achieve this
using :class:`mne.SourceMorph`. Data will be morphed based on
an affine transformation and a nonlinear registration method
known as Symmetric Diffeomorphic Registration (SDR) by
:footcite:`AvantsEtAl2008`.
Transformation is estimated from the subject's anatomical T1 weighted MRI
(brain) to `FreeSurfer's 'fsaverage' T1 weighted MRI (brain)
<https://surfer.nmr.mgh.harvard.edu/fswiki/FsAverage>`__.
Afterwards the transformation will be applied to the volumetric source
estimate. The result will be plotted, showing the fsaverage T1 weighted
anatomical MRI, overlaid with the morphed volumetric source estimate.
"""
# Author: Tommy Clausner <[email protected]>
#
# License: BSD-3-Clause
# %%
import os
import nibabel as nib
import mne
from mne.datasets import sample, fetch_fsaverage
from mne.minimum_norm import apply_inverse, read_inverse_operator
from nilearn.plotting import plot_glass_brain
print(__doc__)
# %%
# Setup paths
sample_dir_raw = sample.data_path()
sample_dir = os.path.join(sample_dir_raw, 'MEG', 'sample')
subjects_dir = os.path.join(sample_dir_raw, 'subjects')
fname_evoked = os.path.join(sample_dir, 'sample_audvis-ave.fif')
fname_inv = os.path.join(sample_dir, 'sample_audvis-meg-vol-7-meg-inv.fif')
fname_t1_fsaverage = os.path.join(subjects_dir, 'fsaverage', 'mri',
'brain.mgz')
fetch_fsaverage(subjects_dir) # ensure fsaverage src exists
fname_src_fsaverage = subjects_dir + '/fsaverage/bem/fsaverage-vol-5-src.fif'
# %%
# Compute example data. For reference see :ref:`ex-inverse-volume`.
#
# Load data:
evoked = mne.read_evokeds(fname_evoked, condition=0, baseline=(None, 0))
inverse_operator = read_inverse_operator(fname_inv)
# Apply inverse operator
stc = apply_inverse(evoked, inverse_operator, 1.0 / 3.0 ** 2, "dSPM")
# To save time
stc.crop(0.09, 0.09)
# %%
# Get a SourceMorph object for VolSourceEstimate
# ----------------------------------------------
#
# ``subject_from`` can typically be inferred from
# :class:`src <mne.SourceSpaces>`,
# and ``subject_to`` is set to 'fsaverage' by default. ``subjects_dir`` can be
# None when set in the environment. In that case SourceMorph can be initialized
# taking ``src`` as only argument. See :class:`mne.SourceMorph` for more
# details.
#
# The default parameter setting for *zooms* will cause the reference volumes
# to be resliced before computing the transform. A value of '5' would cause
# the function to reslice to an isotropic voxel size of 5 mm. The higher this
# value the less accurate but faster the computation will be.
#
# The recommended way to use this is to morph to a specific destination source
# space so that different ``subject_from`` morphs will go to the same space.`
# A standard usage for volumetric data reads:
src_fs = mne.read_source_spaces(fname_src_fsaverage)
morph = mne.compute_source_morph(
inverse_operator['src'], subject_from='sample', subjects_dir=subjects_dir,
niter_affine=[10, 10, 5], niter_sdr=[10, 10, 5], # just for speed
src_to=src_fs, verbose=True)
# %%
# Apply morph to VolSourceEstimate
# --------------------------------
#
# The morph can be applied to the source estimate data, by giving it as the
# first argument to the :meth:`morph.apply() <mne.SourceMorph.apply>` method.
#
# .. note::
# Volumetric morphing is much slower than surface morphing because the
# volume for each time point is individually resampled and SDR morphed.
# The :meth:`mne.SourceMorph.compute_vol_morph_mat` method can be used
# to compute an equivalent sparse matrix representation by computing the
# transformation for each source point individually. This generally takes
# a few minutes to compute, but can be
# :meth:`saved <mne.SourceMorph.save>` to disk and be reused. The
# resulting sparse matrix operation is very fast (about 400× faster) to
# :meth:`apply <mne.SourceMorph.apply>`. This approach is more efficient
# when the number of time points to be morphed exceeds the number of
# source space points, which is generally in the thousands. This can
# easily occur when morphing many time points and multiple conditions.
stc_fsaverage = morph.apply(stc)
# %%
# Convert morphed VolSourceEstimate into NIfTI
# --------------------------------------------
#
# We can convert our morphed source estimate into a NIfTI volume using
# :meth:`morph.apply(..., output='nifti1') <mne.SourceMorph.apply>`.
# Create mri-resolution volume of results
img_fsaverage = morph.apply(stc, mri_resolution=2, output='nifti1')
# %%
# Plot results
# ------------
# Load fsaverage anatomical image
t1_fsaverage = nib.load(fname_t1_fsaverage)
# Plot glass brain (change to plot_anat to display an overlaid anatomical T1)
display = plot_glass_brain(t1_fsaverage,
title='subject results to fsaverage',
draw_cross=False,
annotate=True)
# Add functional data as overlay
display.add_overlay(img_fsaverage, alpha=0.75)
# %%
# Reading and writing SourceMorph from and to disk
# ------------------------------------------------
#
# An instance of SourceMorph can be saved, by calling
# :meth:`morph.save <mne.SourceMorph.save>`.
#
# This methods allows for specification of a filename under which the ``morph``
# will be save in ".h5" format. If no file extension is provided, "-morph.h5"
# will be appended to the respective defined filename::
#
# >>> morph.save('my-file-name')
#
# Reading a saved source morph can be achieved by using
# :func:`mne.read_source_morph`::
#
# >>> morph = mne.read_source_morph('my-file-name-morph.h5')
#
# Once the environment is set up correctly, no information such as
# ``subject_from`` or ``subjects_dir`` must be provided, since it can be
# inferred from the data and used morph to 'fsaverage' by default, e.g.::
#
# >>> morph.apply(stc)
#
# References
# ----------
# .. footbibliography::
| bsd-3-clause |
keras-team/keras-io | examples/vision/mnist_convnet.py | 1 | 1914 | """
Title: Simple MNIST convnet
Author: [fchollet](https://twitter.com/fchollet)
Date created: 2015/06/19
Last modified: 2020/04/21
Description: A simple convnet that achieves ~99% test accuracy on MNIST.
"""
"""
## Setup
"""
import numpy as np
from tensorflow import keras
from tensorflow.keras import layers
"""
## Prepare the data
"""
# Model / data parameters
num_classes = 10
input_shape = (28, 28, 1)
# Load the data and split it between train and test sets
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Scale images to the [0, 1] range
x_train = x_train.astype("float32") / 255
x_test = x_test.astype("float32") / 255
# Make sure images have shape (28, 28, 1)
x_train = np.expand_dims(x_train, -1)
x_test = np.expand_dims(x_test, -1)
print("x_train shape:", x_train.shape)
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
"""
## Build the model
"""
model = keras.Sequential(
[
keras.Input(shape=input_shape),
layers.Conv2D(32, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Conv2D(64, kernel_size=(3, 3), activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2)),
layers.Flatten(),
layers.Dropout(0.5),
layers.Dense(num_classes, activation="softmax"),
]
)
model.summary()
"""
## Train the model
"""
batch_size = 128
epochs = 15
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.1)
"""
## Evaluate the trained model
"""
score = model.evaluate(x_test, y_test, verbose=0)
print("Test loss:", score[0])
print("Test accuracy:", score[1])
| apache-2.0 |
christopher-beckham/weka-pyscript | wekapyscript/pyscript.py | 2 | 4849 | from __future__ import print_function
from subprocess import call
import gzip
try:
import cPickle as pickle
except ImportError:
import pickle
import os
import sys
import time
import tempfile
import shutil
import numpy as np
def load_pkl(filename):
f = gzip.open(filename)
args = pickle.load(f)
f.close()
return args
class uses(object):
def __init__(self, used_args):
self.used_args = used_args
self.defaults = set([
"X_train", "y_train", "X_test", "class_type", "relation_name",
"attributes", "attr_values", "class", "num_classes", "attr_types"
])
def __call__(self, f):
def wrapped_f(*args):
args_variable = args[0]
for var in args_variable:
if var not in self.used_args and var not in self.defaults:
raise ValueError("This classifier does not use the non-default variable: '%s'" % var)
return f(*args)
return wrapped_f
class ArffToArgs(object):
def __init__(self):
self.input = ""
self.output = ""
self.class_index = "last"
self.standardize = ""
self.binarize = ""
self.impute = ""
self.debug = ""
self.arguments = ""
def set_standardize(self, b):
assert isinstance(b, bool)
self.standardize = "-standardize" if b else ""
def set_binarize(self, b):
assert isinstance(b, bool)
self.binarize = "-binarize" if b else ""
def set_impute(self, b):
assert isinstance(b, bool)
self.impute = "-impute" if b else ""
def set_input(self, filename):
self.input = filename
def set_debug(self, b):
assert isinstance(b, bool)
self.debug = "-debug" if b else ""
def set_class_index(self, class_index):
self.class_index = class_index
def set_arguments(self, arguments):
self.arguments = arguments
def get_args(self):
if self.input == "" or self.class_index == "":
raise ValueError("Make sure you have used set_input, and set_class_index at least")
self.output = tempfile.gettempdir() + os.path.sep + "%s_%f.pkl.gz" % ( os.path.basename(self.input), time.time() )
self.output = self.output.replace("\\", "\\\\") # for windows
driver = ["java", "weka.Run", "weka.pyscript.ArffToPickle",
"-i", self.input, "-o", self.output ]
if self.class_index != None:
driver.append("-c")
driver.append(self.class_index)
driver.append("-args")
driver.append(self.arguments)
driver.append(self.standardize)
driver.append(self.binarize)
driver.append(self.impute)
driver.append(self.debug)
sys.stderr.write("%s\n" % " ".join(driver))
result = call(driver)
if result != 0:
raise Exception("Error - Java call returned a non-zero value")
else:
return load_pkl(self.output)
def save(self, filename):
shutil.move(self.output, filename)
def close(self):
try:
os.remove(self.output)
except OSError:
pass
def get_header(args):
relation_name = args["relation_name"]
attributes = args["attributes"]
attr_types = args["attr_types"]
attr_values = args["attr_values"]
header = []
header.append("@relation %s" % relation_name)
for attribute in attributes:
if attribute in attr_values:
header.append( "@attribute %s {%s}" % (attribute, ",".join(attr_values[attribute]) ) )
else:
header.append( "@attribute %s numeric" % attribute )
header.append("@data")
return "\n".join(header)
def instance_to_string(x, y, args):
attributes = args["attributes"]
attr_values = args["attr_values"]
string_vector = []
for i in range(0, len(x)):
if np.isnan(x[i]):
string_vector.append("?")
else:
if attributes[i] in attr_values:
string_vector.append( str(attr_values[ attributes[i] ][ int(x[i]) ] ) )
else:
string_vector.append( str( x[i] ) )
if y != None: # y.shape[0] != 0
if np.isnan(y):
string_vector.append("?")
else:
if args["class_type"] == "nominal":
string_vector.append( attr_values[ args["class"] ][int(y[0])] )
else:
string_vector.append( str(y[0]) )
return ",".join(string_vector)
if __name__ == '__main__':
x = ArffToArgs()
x.set_input("../datasets/iris.arff")
#x.set_output("/tmp/iris.pkl.gz")
x.set_standardize(True)
x.set_binarize(True)
x.set_impute(True)
x.set_class_index("last")
x.set_arguments("a='\\'foo\\'';b='bar';c=0.001")
print(x.get_args().keys())
x.close()
| gpl-3.0 |
nhuntwalker/astroML | book_figures/chapter1/fig_moving_objects_multicolor.py | 4 | 4580 | """
SDSS Stripe 82 Moving Object Catalog
------------------------------------
Figure 1.12.
A multicolor scatter plot of the properties of asteroids from the SDSS Moving
Object Catalog (cf. figure 1.8). The left panel shows observational markers
of the chemical properties of the asteroids: two colors a* and i-z. The
right panel shows the orbital parameters: semimajor axis a vs. the sine of
the inclination. The color of points in the right panel reflects their
position in the left panel. This plot is similar to that used in
figures 3-4 of Parker et al 2008.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from astroML.datasets import fetch_moving_objects
from astroML.plotting.tools import devectorize_axes
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
def black_bg_subplot(*args, **kwargs):
"""Create a subplot with black background"""
kwargs['axisbg'] = 'k'
ax = plt.subplot(*args, **kwargs)
# set ticks and labels to white
for spine in ax.spines.values():
spine.set_color('w')
for tick in ax.xaxis.get_major_ticks() + ax.yaxis.get_major_ticks():
for child in tick.get_children():
child.set_color('w')
return ax
def compute_color(mag_a, mag_i, mag_z, a_crit=-0.1):
"""
Compute the scatter-plot color using code adapted from
TCL source used in Parker 2008.
"""
# define the base color scalings
R = np.ones_like(mag_i)
G = 0.5 * 10 ** (-2 * (mag_i - mag_z - 0.01))
B = 1.5 * 10 ** (-8 * (mag_a + 0.0))
# enhance green beyond the a_crit cutoff
G += 10. / (1 + np.exp((mag_a - a_crit) / 0.02))
# normalize color of each point to its maximum component
RGB = np.vstack([R, G, B])
RGB /= RGB.max(0)
# return an array of RGB colors, which is shape (n_points, 3)
return RGB.T
#------------------------------------------------------------
# Fetch data and extract the desired quantities
data = fetch_moving_objects(Parker2008_cuts=True)
mag_a = data['mag_a']
mag_i = data['mag_i']
mag_z = data['mag_z']
a = data['aprime']
sini = data['sin_iprime']
# dither: magnitudes are recorded only to +/- 0.01
np.random.seed(0)
mag_a += -0.005 + 0.01 * np.random.random(size=mag_a.shape)
mag_i += -0.005 + 0.01 * np.random.random(size=mag_i.shape)
mag_z += -0.005 + 0.01 * np.random.random(size=mag_z.shape)
# compute RGB color based on magnitudes
color = compute_color(mag_a, mag_i, mag_z)
#------------------------------------------------------------
# set up the plot
fig = plt.figure(figsize=(5, 2.2), facecolor='k')
fig.subplots_adjust(left=0.1, right=0.95, wspace=0.3,
bottom=0.2, top=0.93)
# plot the color-magnitude plot
ax = black_bg_subplot(121)
ax.scatter(mag_a, mag_i - mag_z,
c=color, s=0.5, lw=0)
devectorize_axes(ax, dpi=400)
ax.plot([0, 0], [-0.8, 0.6], '--w', lw=1)
ax.plot([0, 0.4], [-0.15, -0.15], '--w', lw=1)
ax.set_xlim(-0.3, 0.4)
ax.set_ylim(-0.8, 0.6)
ax.set_xlabel(r'${\rm a*}$', color='w')
ax.set_ylabel(r'${\rm i-z}$', color='w')
# plot the orbital parameters plot
ax = black_bg_subplot(122)
ax.scatter(a, sini,
c=color, s=0.5, lw=0, edgecolor='none')
devectorize_axes(ax, dpi=400)
ax.plot([2.5, 2.5], [-0.02, 0.3], '--w', lw=1)
ax.plot([2.82, 2.82], [-0.02, 0.3], '--w', lw=1)
ax.set_xlim(2.0, 3.3)
ax.set_ylim(-0.02, 0.3)
ax.set_xlabel(r'${\rm a (AU)}$', color='w')
ax.set_ylabel(r'${\rm sin(i)}$', color='w')
# label the plot
text_kwargs = dict(color='w', transform=plt.gca().transAxes,
ha='center', va='bottom')
ax.text(0.25, 1.02, 'Inner', **text_kwargs)
ax.text(0.53, 1.02, 'Mid', **text_kwargs)
ax.text(0.83, 1.02, 'Outer', **text_kwargs)
# Saving the black-background figure requires some extra arguments:
#fig.savefig('moving_objects.png',
# facecolor='black',
# edgecolor='none')
plt.show()
| bsd-2-clause |
pravsripad/mne-python | examples/inverse/resolution_metrics_eegmeg.py | 13 | 5767 | # -*- coding: utf-8 -*-
"""
.. _ex-res-metrics-meeg:
==============================================================
Compute spatial resolution metrics to compare MEG with EEG+MEG
==============================================================
Compute peak localisation error and spatial deviation for the point-spread
functions of dSPM and MNE. Plot their distributions and difference of
distributions. This example mimics some results from :footcite:`HaukEtAl2019`,
namely Figure 3 (peak localisation error for PSFs, L2-MNE vs dSPM) and Figure 4
(spatial deviation for PSFs, L2-MNE vs dSPM). It shows that combining MEG with
EEG reduces the point-spread function and increases the spatial resolution of
source imaging, especially for deeper sources.
"""
# Author: Olaf Hauk <[email protected]>
#
# License: BSD-3-Clause
# %%
import mne
from mne.datasets import sample
from mne.minimum_norm.resolution_matrix import make_inverse_resolution_matrix
from mne.minimum_norm.spatial_resolution import resolution_metrics
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path / 'subjects/'
meg_path = data_path / 'MEG' / 'sample'
fname_fwd_emeg = meg_path / 'sample_audvis-meg-eeg-oct-6-fwd.fif'
fname_cov = meg_path / 'sample_audvis-cov.fif'
fname_evo = meg_path / 'sample_audvis-ave.fif'
# read forward solution with EEG and MEG
forward_emeg = mne.read_forward_solution(fname_fwd_emeg)
# forward operator with fixed source orientations
forward_emeg = mne.convert_forward_solution(forward_emeg, surf_ori=True,
force_fixed=True)
# create a forward solution with MEG only
forward_meg = mne.pick_types_forward(forward_emeg, meg=True, eeg=False)
# noise covariance matrix
noise_cov = mne.read_cov(fname_cov)
# evoked data for info
evoked = mne.read_evokeds(fname_evo, 0)
# make inverse operator from forward solution for MEG and EEGMEG
inv_emeg = mne.minimum_norm.make_inverse_operator(
info=evoked.info, forward=forward_emeg, noise_cov=noise_cov, loose=0.,
depth=None)
inv_meg = mne.minimum_norm.make_inverse_operator(
info=evoked.info, forward=forward_meg, noise_cov=noise_cov, loose=0.,
depth=None)
# regularisation parameter
snr = 3.0
lambda2 = 1.0 / snr ** 2
# %%
# EEGMEG
# ------
# Compute resolution matrices, localization error, and spatial deviations
# for MNE:
rm_emeg = make_inverse_resolution_matrix(forward_emeg, inv_emeg,
method='MNE', lambda2=lambda2)
ple_psf_emeg = resolution_metrics(rm_emeg, inv_emeg['src'],
function='psf', metric='peak_err')
sd_psf_emeg = resolution_metrics(rm_emeg, inv_emeg['src'],
function='psf', metric='sd_ext')
del rm_emeg
# %%
# MEG
# ---
# Do the same for MEG:
rm_meg = make_inverse_resolution_matrix(forward_meg, inv_meg,
method='MNE', lambda2=lambda2)
ple_psf_meg = resolution_metrics(rm_meg, inv_meg['src'],
function='psf', metric='peak_err')
sd_psf_meg = resolution_metrics(rm_meg, inv_meg['src'],
function='psf', metric='sd_ext')
del rm_meg
# %%
# Visualization
# -------------
# Look at peak localisation error (PLE) across the whole cortex for PSF:
brain_ple_emeg = ple_psf_emeg.plot('sample', 'inflated', 'lh',
subjects_dir=subjects_dir, figure=1,
clim=dict(kind='value', lims=(0, 2, 4)))
brain_ple_emeg.add_text(0.1, 0.9, 'PLE PSF EMEG', 'title', font_size=16)
# %%
# For MEG only:
brain_ple_meg = ple_psf_meg.plot('sample', 'inflated', 'lh',
subjects_dir=subjects_dir, figure=2,
clim=dict(kind='value', lims=(0, 2, 4)))
brain_ple_meg.add_text(0.1, 0.9, 'PLE PSF MEG', 'title', font_size=16)
# %%
# Subtract the two distributions and plot this difference:
diff_ple = ple_psf_emeg - ple_psf_meg
brain_ple_diff = diff_ple.plot('sample', 'inflated', 'lh',
subjects_dir=subjects_dir, figure=3,
clim=dict(kind='value', pos_lims=(0., .5, 1.)),
smoothing_steps=20)
brain_ple_diff.add_text(0.1, 0.9, 'PLE EMEG-MEG', 'title', font_size=16)
# %%
# These plots show that with respect to peak localization error, adding EEG to
# MEG does not bring much benefit. Next let's visualise spatial deviation (SD)
# across the whole cortex for PSF:
brain_sd_emeg = sd_psf_emeg.plot('sample', 'inflated', 'lh',
subjects_dir=subjects_dir, figure=4,
clim=dict(kind='value', lims=(0, 2, 4)))
brain_sd_emeg.add_text(0.1, 0.9, 'SD PSF EMEG', 'title', font_size=16)
# %%
# For MEG only:
brain_sd_meg = sd_psf_meg.plot('sample', 'inflated', 'lh',
subjects_dir=subjects_dir, figure=5,
clim=dict(kind='value', lims=(0, 2, 4)))
brain_sd_meg.add_text(0.1, 0.9, 'SD PSF MEG', 'title', font_size=16)
# %%
# Subtract the two distributions and plot this difference:
diff_sd = sd_psf_emeg - sd_psf_meg
brain_sd_diff = diff_sd.plot('sample', 'inflated', 'lh',
subjects_dir=subjects_dir, figure=6,
clim=dict(kind='value', pos_lims=(0., .5, 1.)),
smoothing_steps=20)
brain_sd_diff.add_text(0.1, 0.9, 'SD EMEG-MEG', 'title', font_size=16)
# %%
# Adding EEG to MEG decreases the spatial extent of point-spread
# functions (lower spatial deviation, blue colors), thus increasing
# resolution, especially for deeper source locations.
#
# References
# ----------
# .. footbibliography::
| bsd-3-clause |
darionyaphet/spark | python/pyspark/ml/tuning.py | 6 | 32873 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import itertools
import sys
from multiprocessing.pool import ThreadPool
import numpy as np
from pyspark import since, keyword_only
from pyspark.ml import Estimator, Model
from pyspark.ml.common import _py2java, _java2py
from pyspark.ml.param import Params, Param, TypeConverters
from pyspark.ml.param.shared import HasCollectSubModels, HasParallelism, HasSeed
from pyspark.ml.util import *
from pyspark.ml.wrapper import JavaParams
from pyspark.sql.functions import rand
__all__ = ['ParamGridBuilder', 'CrossValidator', 'CrossValidatorModel', 'TrainValidationSplit',
'TrainValidationSplitModel']
def _parallelFitTasks(est, train, eva, validation, epm, collectSubModel):
"""
Creates a list of callables which can be called from different threads to fit and evaluate
an estimator in parallel. Each callable returns an `(index, metric)` pair.
:param est: Estimator, the estimator to be fit.
:param train: DataFrame, training data set, used for fitting.
:param eva: Evaluator, used to compute `metric`
:param validation: DataFrame, validation data set, used for evaluation.
:param epm: Sequence of ParamMap, params maps to be used during fitting & evaluation.
:param collectSubModel: Whether to collect sub model.
:return: (int, float, subModel), an index into `epm` and the associated metric value.
"""
modelIter = est.fitMultiple(train, epm)
def singleTask():
index, model = next(modelIter)
metric = eva.evaluate(model.transform(validation, epm[index]))
return index, metric, model if collectSubModel else None
return [singleTask] * len(epm)
class ParamGridBuilder(object):
r"""
Builder for a param grid used in grid search-based model selection.
>>> from pyspark.ml.classification import LogisticRegression
>>> lr = LogisticRegression()
>>> output = ParamGridBuilder() \
... .baseOn({lr.labelCol: 'l'}) \
... .baseOn([lr.predictionCol, 'p']) \
... .addGrid(lr.regParam, [1.0, 2.0]) \
... .addGrid(lr.maxIter, [1, 5]) \
... .build()
>>> expected = [
... {lr.regParam: 1.0, lr.maxIter: 1, lr.labelCol: 'l', lr.predictionCol: 'p'},
... {lr.regParam: 2.0, lr.maxIter: 1, lr.labelCol: 'l', lr.predictionCol: 'p'},
... {lr.regParam: 1.0, lr.maxIter: 5, lr.labelCol: 'l', lr.predictionCol: 'p'},
... {lr.regParam: 2.0, lr.maxIter: 5, lr.labelCol: 'l', lr.predictionCol: 'p'}]
>>> len(output) == len(expected)
True
>>> all([m in expected for m in output])
True
.. versionadded:: 1.4.0
"""
def __init__(self):
self._param_grid = {}
@since("1.4.0")
def addGrid(self, param, values):
"""
Sets the given parameters in this grid to fixed values.
param must be an instance of Param associated with an instance of Params
(such as Estimator or Transformer).
"""
if isinstance(param, Param):
self._param_grid[param] = values
else:
raise TypeError("param must be an instance of Param")
return self
@since("1.4.0")
def baseOn(self, *args):
"""
Sets the given parameters in this grid to fixed values.
Accepts either a parameter dictionary or a list of (parameter, value) pairs.
"""
if isinstance(args[0], dict):
self.baseOn(*args[0].items())
else:
for (param, value) in args:
self.addGrid(param, [value])
return self
@since("1.4.0")
def build(self):
"""
Builds and returns all combinations of parameters specified
by the param grid.
"""
keys = self._param_grid.keys()
grid_values = self._param_grid.values()
def to_key_value_pairs(keys, values):
return [(key, key.typeConverter(value)) for key, value in zip(keys, values)]
return [dict(to_key_value_pairs(keys, prod)) for prod in itertools.product(*grid_values)]
class _ValidatorParams(HasSeed):
"""
Common params for TrainValidationSplit and CrossValidator.
"""
estimator = Param(Params._dummy(), "estimator", "estimator to be cross-validated")
estimatorParamMaps = Param(Params._dummy(), "estimatorParamMaps", "estimator param maps")
evaluator = Param(
Params._dummy(), "evaluator",
"evaluator used to select hyper-parameters that maximize the validator metric")
@since("2.0.0")
def getEstimator(self):
"""
Gets the value of estimator or its default value.
"""
return self.getOrDefault(self.estimator)
@since("2.0.0")
def getEstimatorParamMaps(self):
"""
Gets the value of estimatorParamMaps or its default value.
"""
return self.getOrDefault(self.estimatorParamMaps)
@since("2.0.0")
def getEvaluator(self):
"""
Gets the value of evaluator or its default value.
"""
return self.getOrDefault(self.evaluator)
@classmethod
def _from_java_impl(cls, java_stage):
"""
Return Python estimator, estimatorParamMaps, and evaluator from a Java ValidatorParams.
"""
# Load information from java_stage to the instance.
estimator = JavaParams._from_java(java_stage.getEstimator())
evaluator = JavaParams._from_java(java_stage.getEvaluator())
epms = [estimator._transfer_param_map_from_java(epm)
for epm in java_stage.getEstimatorParamMaps()]
return estimator, epms, evaluator
def _to_java_impl(self):
"""
Return Java estimator, estimatorParamMaps, and evaluator from this Python instance.
"""
gateway = SparkContext._gateway
cls = SparkContext._jvm.org.apache.spark.ml.param.ParamMap
java_epms = gateway.new_array(cls, len(self.getEstimatorParamMaps()))
for idx, epm in enumerate(self.getEstimatorParamMaps()):
java_epms[idx] = self.getEstimator()._transfer_param_map_to_java(epm)
java_estimator = self.getEstimator()._to_java()
java_evaluator = self.getEvaluator()._to_java()
return java_estimator, java_epms, java_evaluator
class _CrossValidatorParams(_ValidatorParams):
"""
Params for :py:class:`CrossValidator` and :py:class:`CrossValidatorModel`.
.. versionadded:: 3.0.0
"""
numFolds = Param(Params._dummy(), "numFolds", "number of folds for cross validation",
typeConverter=TypeConverters.toInt)
@since("1.4.0")
def getNumFolds(self):
"""
Gets the value of numFolds or its default value.
"""
return self.getOrDefault(self.numFolds)
class CrossValidator(Estimator, _CrossValidatorParams, HasParallelism, HasCollectSubModels,
MLReadable, MLWritable):
"""
K-fold cross validation performs model selection by splitting the dataset into a set of
non-overlapping randomly partitioned folds which are used as separate training and test datasets
e.g., with k=3 folds, K-fold cross validation will generate 3 (training, test) dataset pairs,
each of which uses 2/3 of the data for training and 1/3 for testing. Each fold is used as the
test set exactly once.
>>> from pyspark.ml.classification import LogisticRegression
>>> from pyspark.ml.evaluation import BinaryClassificationEvaluator
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.tuning import CrossValidatorModel
>>> import tempfile
>>> dataset = spark.createDataFrame(
... [(Vectors.dense([0.0]), 0.0),
... (Vectors.dense([0.4]), 1.0),
... (Vectors.dense([0.5]), 0.0),
... (Vectors.dense([0.6]), 1.0),
... (Vectors.dense([1.0]), 1.0)] * 10,
... ["features", "label"])
>>> lr = LogisticRegression()
>>> grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
>>> evaluator = BinaryClassificationEvaluator()
>>> cv = CrossValidator(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator,
... parallelism=2)
>>> cvModel = cv.fit(dataset)
>>> cvModel.getNumFolds()
3
>>> cvModel.avgMetrics[0]
0.5
>>> path = tempfile.mkdtemp()
>>> model_path = path + "/model"
>>> cvModel.write().save(model_path)
>>> cvModelRead = CrossValidatorModel.read().load(model_path)
>>> cvModelRead.avgMetrics
[0.5, ...
>>> evaluator.evaluate(cvModel.transform(dataset))
0.8333...
.. versionadded:: 1.4.0
"""
@keyword_only
def __init__(self, estimator=None, estimatorParamMaps=None, evaluator=None, numFolds=3,
seed=None, parallelism=1, collectSubModels=False):
"""
__init__(self, estimator=None, estimatorParamMaps=None, evaluator=None, numFolds=3,\
seed=None, parallelism=1, collectSubModels=False)
"""
super(CrossValidator, self).__init__()
self._setDefault(numFolds=3, parallelism=1)
kwargs = self._input_kwargs
self._set(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, estimator=None, estimatorParamMaps=None, evaluator=None, numFolds=3,
seed=None, parallelism=1, collectSubModels=False):
"""
setParams(self, estimator=None, estimatorParamMaps=None, evaluator=None, numFolds=3,\
seed=None, parallelism=1, collectSubModels=False):
Sets params for cross validator.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setEstimator(self, value):
"""
Sets the value of :py:attr:`estimator`.
"""
return self._set(estimator=value)
@since("2.0.0")
def setEstimatorParamMaps(self, value):
"""
Sets the value of :py:attr:`estimatorParamMaps`.
"""
return self._set(estimatorParamMaps=value)
@since("2.0.0")
def setEvaluator(self, value):
"""
Sets the value of :py:attr:`evaluator`.
"""
return self._set(evaluator=value)
@since("1.4.0")
def setNumFolds(self, value):
"""
Sets the value of :py:attr:`numFolds`.
"""
return self._set(numFolds=value)
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
def setParallelism(self, value):
"""
Sets the value of :py:attr:`parallelism`.
"""
return self._set(parallelism=value)
def setCollectSubModels(self, value):
"""
Sets the value of :py:attr:`collectSubModels`.
"""
return self._set(collectSubModels=value)
def _fit(self, dataset):
est = self.getOrDefault(self.estimator)
epm = self.getOrDefault(self.estimatorParamMaps)
numModels = len(epm)
eva = self.getOrDefault(self.evaluator)
nFolds = self.getOrDefault(self.numFolds)
seed = self.getOrDefault(self.seed)
h = 1.0 / nFolds
randCol = self.uid + "_rand"
df = dataset.select("*", rand(seed).alias(randCol))
metrics = [0.0] * numModels
pool = ThreadPool(processes=min(self.getParallelism(), numModels))
subModels = None
collectSubModelsParam = self.getCollectSubModels()
if collectSubModelsParam:
subModels = [[None for j in range(numModels)] for i in range(nFolds)]
for i in range(nFolds):
validateLB = i * h
validateUB = (i + 1) * h
condition = (df[randCol] >= validateLB) & (df[randCol] < validateUB)
validation = df.filter(condition).cache()
train = df.filter(~condition).cache()
tasks = _parallelFitTasks(est, train, eva, validation, epm, collectSubModelsParam)
for j, metric, subModel in pool.imap_unordered(lambda f: f(), tasks):
metrics[j] += (metric / nFolds)
if collectSubModelsParam:
subModels[i][j] = subModel
validation.unpersist()
train.unpersist()
if eva.isLargerBetter():
bestIndex = np.argmax(metrics)
else:
bestIndex = np.argmin(metrics)
bestModel = est.fit(dataset, epm[bestIndex])
return self._copyValues(CrossValidatorModel(bestModel, metrics, subModels))
@since("1.4.0")
def copy(self, extra=None):
"""
Creates a copy of this instance with a randomly generated uid
and some extra params. This copies creates a deep copy of
the embedded paramMap, and copies the embedded and extra parameters over.
:param extra: Extra parameters to copy to the new instance
:return: Copy of this instance
"""
if extra is None:
extra = dict()
newCV = Params.copy(self, extra)
if self.isSet(self.estimator):
newCV.setEstimator(self.getEstimator().copy(extra))
# estimatorParamMaps remain the same
if self.isSet(self.evaluator):
newCV.setEvaluator(self.getEvaluator().copy(extra))
return newCV
@since("2.3.0")
def write(self):
"""Returns an MLWriter instance for this ML instance."""
return JavaMLWriter(self)
@classmethod
@since("2.3.0")
def read(cls):
"""Returns an MLReader instance for this class."""
return JavaMLReader(cls)
@classmethod
def _from_java(cls, java_stage):
"""
Given a Java CrossValidator, create and return a Python wrapper of it.
Used for ML persistence.
"""
estimator, epms, evaluator = super(CrossValidator, cls)._from_java_impl(java_stage)
numFolds = java_stage.getNumFolds()
seed = java_stage.getSeed()
parallelism = java_stage.getParallelism()
collectSubModels = java_stage.getCollectSubModels()
# Create a new instance of this stage.
py_stage = cls(estimator=estimator, estimatorParamMaps=epms, evaluator=evaluator,
numFolds=numFolds, seed=seed, parallelism=parallelism,
collectSubModels=collectSubModels)
py_stage._resetUid(java_stage.uid())
return py_stage
def _to_java(self):
"""
Transfer this instance to a Java CrossValidator. Used for ML persistence.
:return: Java object equivalent to this instance.
"""
estimator, epms, evaluator = super(CrossValidator, self)._to_java_impl()
_java_obj = JavaParams._new_java_obj("org.apache.spark.ml.tuning.CrossValidator", self.uid)
_java_obj.setEstimatorParamMaps(epms)
_java_obj.setEvaluator(evaluator)
_java_obj.setEstimator(estimator)
_java_obj.setSeed(self.getSeed())
_java_obj.setNumFolds(self.getNumFolds())
_java_obj.setParallelism(self.getParallelism())
_java_obj.setCollectSubModels(self.getCollectSubModels())
return _java_obj
class CrossValidatorModel(Model, _CrossValidatorParams, MLReadable, MLWritable):
"""
CrossValidatorModel contains the model with the highest average cross-validation
metric across folds and uses this model to transform input data. CrossValidatorModel
also tracks the metrics for each param map evaluated.
.. versionadded:: 1.4.0
"""
def __init__(self, bestModel, avgMetrics=[], subModels=None):
super(CrossValidatorModel, self).__init__()
#: best model from cross validation
self.bestModel = bestModel
#: Average cross-validation metrics for each paramMap in
#: CrossValidator.estimatorParamMaps, in the corresponding order.
self.avgMetrics = avgMetrics
#: sub model list from cross validation
self.subModels = subModels
def _transform(self, dataset):
return self.bestModel.transform(dataset)
@since("1.4.0")
def copy(self, extra=None):
"""
Creates a copy of this instance with a randomly generated uid
and some extra params. This copies the underlying bestModel,
creates a deep copy of the embedded paramMap, and
copies the embedded and extra parameters over.
It does not copy the extra Params into the subModels.
:param extra: Extra parameters to copy to the new instance
:return: Copy of this instance
"""
if extra is None:
extra = dict()
bestModel = self.bestModel.copy(extra)
avgMetrics = self.avgMetrics
subModels = self.subModels
return CrossValidatorModel(bestModel, avgMetrics, subModels)
@since("2.3.0")
def write(self):
"""Returns an MLWriter instance for this ML instance."""
return JavaMLWriter(self)
@classmethod
@since("2.3.0")
def read(cls):
"""Returns an MLReader instance for this class."""
return JavaMLReader(cls)
@classmethod
def _from_java(cls, java_stage):
"""
Given a Java CrossValidatorModel, create and return a Python wrapper of it.
Used for ML persistence.
"""
sc = SparkContext._active_spark_context
bestModel = JavaParams._from_java(java_stage.bestModel())
avgMetrics = _java2py(sc, java_stage.avgMetrics())
estimator, epms, evaluator = super(CrossValidatorModel, cls)._from_java_impl(java_stage)
py_stage = cls(bestModel=bestModel, avgMetrics=avgMetrics)._set(estimator=estimator)
py_stage = py_stage._set(estimatorParamMaps=epms)._set(evaluator=evaluator)
if java_stage.hasSubModels():
py_stage.subModels = [[JavaParams._from_java(sub_model)
for sub_model in fold_sub_models]
for fold_sub_models in java_stage.subModels()]
py_stage._resetUid(java_stage.uid())
return py_stage
def _to_java(self):
"""
Transfer this instance to a Java CrossValidatorModel. Used for ML persistence.
:return: Java object equivalent to this instance.
"""
sc = SparkContext._active_spark_context
_java_obj = JavaParams._new_java_obj("org.apache.spark.ml.tuning.CrossValidatorModel",
self.uid,
self.bestModel._to_java(),
_py2java(sc, self.avgMetrics))
estimator, epms, evaluator = super(CrossValidatorModel, self)._to_java_impl()
_java_obj.set("evaluator", evaluator)
_java_obj.set("estimator", estimator)
_java_obj.set("estimatorParamMaps", epms)
if self.subModels is not None:
java_sub_models = [[sub_model._to_java() for sub_model in fold_sub_models]
for fold_sub_models in self.subModels]
_java_obj.setSubModels(java_sub_models)
return _java_obj
class _TrainValidationSplitParams(_ValidatorParams):
"""
Params for :py:class:`TrainValidationSplit` and :py:class:`TrainValidationSplitModel`.
.. versionadded:: 3.0.0
"""
trainRatio = Param(Params._dummy(), "trainRatio", "Param for ratio between train and\
validation data. Must be between 0 and 1.", typeConverter=TypeConverters.toFloat)
@since("2.0.0")
def getTrainRatio(self):
"""
Gets the value of trainRatio or its default value.
"""
return self.getOrDefault(self.trainRatio)
class TrainValidationSplit(Estimator, _TrainValidationSplitParams, HasParallelism,
HasCollectSubModels, MLReadable, MLWritable):
"""
Validation for hyper-parameter tuning. Randomly splits the input dataset into train and
validation sets, and uses evaluation metric on the validation set to select the best model.
Similar to :class:`CrossValidator`, but only splits the set once.
>>> from pyspark.ml.classification import LogisticRegression
>>> from pyspark.ml.evaluation import BinaryClassificationEvaluator
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.ml.tuning import TrainValidationSplitModel
>>> import tempfile
>>> dataset = spark.createDataFrame(
... [(Vectors.dense([0.0]), 0.0),
... (Vectors.dense([0.4]), 1.0),
... (Vectors.dense([0.5]), 0.0),
... (Vectors.dense([0.6]), 1.0),
... (Vectors.dense([1.0]), 1.0)] * 10,
... ["features", "label"]).repartition(1)
>>> lr = LogisticRegression()
>>> grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1]).build()
>>> evaluator = BinaryClassificationEvaluator()
>>> tvs = TrainValidationSplit(estimator=lr, estimatorParamMaps=grid, evaluator=evaluator,
... parallelism=1, seed=42)
>>> tvsModel = tvs.fit(dataset)
>>> tvsModel.getTrainRatio()
0.75
>>> tvsModel.validationMetrics
[0.5, ...
>>> path = tempfile.mkdtemp()
>>> model_path = path + "/model"
>>> tvsModel.write().save(model_path)
>>> tvsModelRead = TrainValidationSplitModel.read().load(model_path)
>>> tvsModelRead.validationMetrics
[0.5, ...
>>> evaluator.evaluate(tvsModel.transform(dataset))
0.833...
.. versionadded:: 2.0.0
"""
@keyword_only
def __init__(self, estimator=None, estimatorParamMaps=None, evaluator=None, trainRatio=0.75,
parallelism=1, collectSubModels=False, seed=None):
"""
__init__(self, estimator=None, estimatorParamMaps=None, evaluator=None, trainRatio=0.75,\
parallelism=1, collectSubModels=False, seed=None)
"""
super(TrainValidationSplit, self).__init__()
self._setDefault(trainRatio=0.75, parallelism=1)
kwargs = self._input_kwargs
self._set(**kwargs)
@since("2.0.0")
@keyword_only
def setParams(self, estimator=None, estimatorParamMaps=None, evaluator=None, trainRatio=0.75,
parallelism=1, collectSubModels=False, seed=None):
"""
setParams(self, estimator=None, estimatorParamMaps=None, evaluator=None, trainRatio=0.75,\
parallelism=1, collectSubModels=False, seed=None):
Sets params for the train validation split.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setEstimator(self, value):
"""
Sets the value of :py:attr:`estimator`.
"""
return self._set(estimator=value)
@since("2.0.0")
def setEstimatorParamMaps(self, value):
"""
Sets the value of :py:attr:`estimatorParamMaps`.
"""
return self._set(estimatorParamMaps=value)
@since("2.0.0")
def setEvaluator(self, value):
"""
Sets the value of :py:attr:`evaluator`.
"""
return self._set(evaluator=value)
@since("2.0.0")
def setTrainRatio(self, value):
"""
Sets the value of :py:attr:`trainRatio`.
"""
return self._set(trainRatio=value)
def setSeed(self, value):
"""
Sets the value of :py:attr:`seed`.
"""
return self._set(seed=value)
def setParallelism(self, value):
"""
Sets the value of :py:attr:`parallelism`.
"""
return self._set(parallelism=value)
def setCollectSubModels(self, value):
"""
Sets the value of :py:attr:`collectSubModels`.
"""
return self._set(collectSubModels=value)
def _fit(self, dataset):
est = self.getOrDefault(self.estimator)
epm = self.getOrDefault(self.estimatorParamMaps)
numModels = len(epm)
eva = self.getOrDefault(self.evaluator)
tRatio = self.getOrDefault(self.trainRatio)
seed = self.getOrDefault(self.seed)
randCol = self.uid + "_rand"
df = dataset.select("*", rand(seed).alias(randCol))
condition = (df[randCol] >= tRatio)
validation = df.filter(condition).cache()
train = df.filter(~condition).cache()
subModels = None
collectSubModelsParam = self.getCollectSubModels()
if collectSubModelsParam:
subModels = [None for i in range(numModels)]
tasks = _parallelFitTasks(est, train, eva, validation, epm, collectSubModelsParam)
pool = ThreadPool(processes=min(self.getParallelism(), numModels))
metrics = [None] * numModels
for j, metric, subModel in pool.imap_unordered(lambda f: f(), tasks):
metrics[j] = metric
if collectSubModelsParam:
subModels[j] = subModel
train.unpersist()
validation.unpersist()
if eva.isLargerBetter():
bestIndex = np.argmax(metrics)
else:
bestIndex = np.argmin(metrics)
bestModel = est.fit(dataset, epm[bestIndex])
return self._copyValues(TrainValidationSplitModel(bestModel, metrics, subModels))
@since("2.0.0")
def copy(self, extra=None):
"""
Creates a copy of this instance with a randomly generated uid
and some extra params. This copies creates a deep copy of
the embedded paramMap, and copies the embedded and extra parameters over.
:param extra: Extra parameters to copy to the new instance
:return: Copy of this instance
"""
if extra is None:
extra = dict()
newTVS = Params.copy(self, extra)
if self.isSet(self.estimator):
newTVS.setEstimator(self.getEstimator().copy(extra))
# estimatorParamMaps remain the same
if self.isSet(self.evaluator):
newTVS.setEvaluator(self.getEvaluator().copy(extra))
return newTVS
@since("2.3.0")
def write(self):
"""Returns an MLWriter instance for this ML instance."""
return JavaMLWriter(self)
@classmethod
@since("2.3.0")
def read(cls):
"""Returns an MLReader instance for this class."""
return JavaMLReader(cls)
@classmethod
def _from_java(cls, java_stage):
"""
Given a Java TrainValidationSplit, create and return a Python wrapper of it.
Used for ML persistence.
"""
estimator, epms, evaluator = super(TrainValidationSplit, cls)._from_java_impl(java_stage)
trainRatio = java_stage.getTrainRatio()
seed = java_stage.getSeed()
parallelism = java_stage.getParallelism()
collectSubModels = java_stage.getCollectSubModels()
# Create a new instance of this stage.
py_stage = cls(estimator=estimator, estimatorParamMaps=epms, evaluator=evaluator,
trainRatio=trainRatio, seed=seed, parallelism=parallelism,
collectSubModels=collectSubModels)
py_stage._resetUid(java_stage.uid())
return py_stage
def _to_java(self):
"""
Transfer this instance to a Java TrainValidationSplit. Used for ML persistence.
:return: Java object equivalent to this instance.
"""
estimator, epms, evaluator = super(TrainValidationSplit, self)._to_java_impl()
_java_obj = JavaParams._new_java_obj("org.apache.spark.ml.tuning.TrainValidationSplit",
self.uid)
_java_obj.setEstimatorParamMaps(epms)
_java_obj.setEvaluator(evaluator)
_java_obj.setEstimator(estimator)
_java_obj.setTrainRatio(self.getTrainRatio())
_java_obj.setSeed(self.getSeed())
_java_obj.setParallelism(self.getParallelism())
_java_obj.setCollectSubModels(self.getCollectSubModels())
return _java_obj
class TrainValidationSplitModel(Model, _TrainValidationSplitParams, MLReadable, MLWritable):
"""
Model from train validation split.
.. versionadded:: 2.0.0
"""
def __init__(self, bestModel, validationMetrics=[], subModels=None):
super(TrainValidationSplitModel, self).__init__()
#: best model from train validation split
self.bestModel = bestModel
#: evaluated validation metrics
self.validationMetrics = validationMetrics
#: sub models from train validation split
self.subModels = subModels
def _transform(self, dataset):
return self.bestModel.transform(dataset)
@since("2.0.0")
def copy(self, extra=None):
"""
Creates a copy of this instance with a randomly generated uid
and some extra params. This copies the underlying bestModel,
creates a deep copy of the embedded paramMap, and
copies the embedded and extra parameters over.
And, this creates a shallow copy of the validationMetrics.
It does not copy the extra Params into the subModels.
:param extra: Extra parameters to copy to the new instance
:return: Copy of this instance
"""
if extra is None:
extra = dict()
bestModel = self.bestModel.copy(extra)
validationMetrics = list(self.validationMetrics)
subModels = self.subModels
return TrainValidationSplitModel(bestModel, validationMetrics, subModels)
@since("2.3.0")
def write(self):
"""Returns an MLWriter instance for this ML instance."""
return JavaMLWriter(self)
@classmethod
@since("2.3.0")
def read(cls):
"""Returns an MLReader instance for this class."""
return JavaMLReader(cls)
@classmethod
def _from_java(cls, java_stage):
"""
Given a Java TrainValidationSplitModel, create and return a Python wrapper of it.
Used for ML persistence.
"""
# Load information from java_stage to the instance.
sc = SparkContext._active_spark_context
bestModel = JavaParams._from_java(java_stage.bestModel())
validationMetrics = _java2py(sc, java_stage.validationMetrics())
estimator, epms, evaluator = super(TrainValidationSplitModel,
cls)._from_java_impl(java_stage)
# Create a new instance of this stage.
py_stage = cls(bestModel=bestModel,
validationMetrics=validationMetrics)._set(estimator=estimator)
py_stage = py_stage._set(estimatorParamMaps=epms)._set(evaluator=evaluator)
if java_stage.hasSubModels():
py_stage.subModels = [JavaParams._from_java(sub_model)
for sub_model in java_stage.subModels()]
py_stage._resetUid(java_stage.uid())
return py_stage
def _to_java(self):
"""
Transfer this instance to a Java TrainValidationSplitModel. Used for ML persistence.
:return: Java object equivalent to this instance.
"""
sc = SparkContext._active_spark_context
_java_obj = JavaParams._new_java_obj(
"org.apache.spark.ml.tuning.TrainValidationSplitModel",
self.uid,
self.bestModel._to_java(),
_py2java(sc, self.validationMetrics))
estimator, epms, evaluator = super(TrainValidationSplitModel, self)._to_java_impl()
_java_obj.set("evaluator", evaluator)
_java_obj.set("estimator", estimator)
_java_obj.set("estimatorParamMaps", epms)
if self.subModels is not None:
java_sub_models = [sub_model._to_java() for sub_model in self.subModels]
_java_obj.setSubModels(java_sub_models)
return _java_obj
if __name__ == "__main__":
import doctest
from pyspark.sql import SparkSession
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.tuning tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
sys.exit(-1)
| apache-2.0 |
ryanurbanowicz/exstracs | ExSTraCS_DataManagement.py | 1 | 25901 | """
Name: ExSTraCS_DataManagement.py
Authors: Ryan Urbanowicz - Written at Dartmouth College, Hanover, NH, USA
Contact: [email protected]
Created: April 25, 2014
Modified: August 25,2014
Description: Loads the dataset, characterizes and stores critical features of the datasets (including discrete vs. continuous attributes and phenotype), handles missing
data, and finally formats the data so that it may be conveniently utilized by ExSTraCS.
---------------------------------------------------------------------------------------------------------------------------------------------------------
ExSTraCS V2.0: Extended Supervised Tracking and Classifying System - An advanced LCS designed specifically for complex, noisy classification/data mining tasks,
such as biomedical/bioinformatics/epidemiological problem domains. This algorithm should be well suited to any supervised learning problem involving
classification, prediction, data mining, and knowledge discovery. This algorithm would NOT be suited to function approximation, behavioral modeling,
or other multi-step problems. This LCS algorithm is most closely based on the "UCS" algorithm, an LCS introduced by Ester Bernado-Mansilla and
Josep Garrell-Guiu (2003) which in turn is based heavily on "XCS", an LCS introduced by Stewart Wilson (1995).
Copyright (C) 2014 Ryan Urbanowicz
This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 3 of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABLILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation,
Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
---------------------------------------------------------------------------------------------------------------------------------------------------------
"""
#Import Required Modules-------------------------------
import math
import random
import sys
from ExSTraCS_Constants import *
#------------------------------------------------------
class DataManagement:
def __init__(self, trainFile, testFile, infoList = None):
#Set random seed if specified.-----------------------------------------------
if cons.useSeed:
random.seed(cons.randomSeed)
else:
random.seed(None)
if cons.offlineData:
#Initialize global variables-------------------------------------------------
self.numAttributes = None # Saves the number of attributes in the input file.
self.areInstanceIDs = False
self.instanceIDRef = None
self.phenotypeRef = None
self.discretePhenotype = True
self.attributeInfo = [] #Stores Discrete (0) vs. Continuous (1)
self.phenotypeList = [] #stores discrete phenotype values OR for continuous phenotype, max and min values
self.phenotypeRandomPick = None #Used to approximate random phenotype selection accuracy.
self.phenotypeRange = None
self.phenSD = None
self.labelMissingData = cons.labelMissingData
#Train/Test Specific-----------------------------------------------------------------------------
self.trainHeaderList = []
self.testHeaderList = []
self.numTrainInstances = None
self.numTestInstances = None
self.averageStateCount = None
self.discreteCount = 0
self.continuousCount = 0
self.classCount = {}
self.classPredictionWeights = {}
#Detect Features of training data--------------------------------------------------------------------------
print "----------------------------------------------------------------------------"
print "Environment: Formatting Data... "
rawTrainData = self.loadData(trainFile+'.txt', True) #Load the raw data.
self.characterizeDataset(rawTrainData) #Detect number of attributes, instances, and reference locations.
if cons.testFile == 'None': #If no testing data is available, formatting relies solely on training data.
data4Formating = rawTrainData
else:
rawTestData = self.loadData(testFile+'.txt', False) #Load the raw data.
self.compareDataset(rawTestData) #Ensure that key features are the same between training and testing datasets.
self.discriminatePhenotype(rawTrainData) #Determine if endpoint/phenotype is discrete or continuous.
if self.discretePhenotype:
self.discriminateClasses(rawTrainData) #Detect number of unique phenotype identifiers.
else:
print "DataManagement - Error: ExSTraCS 2.0 can not handle continuous endpoints."
self.discriminateAttributes(rawTrainData) #Detect whether attributes are discrete or continuous.
self.characterizeAttributes(rawTrainData) #Determine potential attribute states or ranges.
#Rule Specificity Limit (RSL) ----------------------------------------------------------------------------
if cons.RSL_Override > 0:
self.specLimit = cons.RSL_Override
else:
#Calculate Rule Specificity Limit --------------------------------------------------------------------
print "DataManagement: Estimating Classifier Specification Limit"
i = 1
uniqueCombinations = math.pow(self.averageStateCount, i)
while uniqueCombinations < self.numTrainInstances:
i += 1
uniqueCombinations = math.pow(self.averageStateCount,i)
self.specLimit = i
if self.numAttributes < self.specLimit: #Never allow the specLimit to be larger than the number of attributes in the dataset.
self.specLimit = self.numAttributes
print "DataManagement: Specification Limit = "+str(self.specLimit)
#Format and Shuffle Datasets----------------------------------------------------------------------------------------
if cons.testFile != 'None':
self.testFormatted = self.formatData(rawTestData, False) #Stores the formatted testing data set used throughout the algorithm.
self.trainFormatted = self.formatData(rawTrainData, True) #Stores the formatted training data set used throughout the algorithm.
print "----------------------------------------------------------------------------"
else:
#Initialize global variables-------------------------------------------------
self.numAttributes = infoList[0] # Saves the number of attributes in the input file.
self.areInstanceIDs = False
self.instanceIDRef = None
self.phenotypeRef = None
self.discretePhenotype = infoList[1]
self.attributeInfo = infoList[2] #Stores Discrete (0) vs. Continuous (1)
self.phenotypeList = infoList[3] #stores discrete phenotype values OR for continuous phenotype, max and min values
self.phenotypeRange = infoList[4]
self.trainHeaderList = infoList[5]
self.numTrainInstances = infoList[6]
def loadData(self, dataFile, doTrain):
""" Load the data file. """
print "DataManagement: Loading Data... " + str(dataFile)
try:
datasetList = []
f = open(dataFile,'rU')
if doTrain:
self.trainHeaderList = f.readline().rstrip('\n').split('\t') #strip off first row
else:
self.testHeaderList = f.readline().rstrip('\n').split('\t') #strip off first row
for line in f:
lineList = line.strip('\n').split('\t')
datasetList.append(lineList)
f.close()
except IOError, (errno, strerror):
print ("Could not Read File!")
print ("I/O error(%s): %s" % (errno, strerror))
raise
except ValueError:
print ("Could not convert data to an integer.")
raise
except:
print ("Unexpected error:", sys.exc_info()[0])
raise
return datasetList
def characterizeDataset(self, rawTrainData):
" Detect basic dataset parameters "
#Detect Instance ID's and save location if they occur.
if cons.labelInstanceID in self.trainHeaderList:
self.areInstanceIDs = True
self.instanceIDRef = self.trainHeaderList.index(cons.labelInstanceID)
print "DataManagement: Instance ID Column location = "+str(self.instanceIDRef)
self.numAttributes = len(self.trainHeaderList)-2 #one column for InstanceID and another for the phenotype.
else:
self.numAttributes = len(self.trainHeaderList)-1
if cons.labelPhenotype in self.trainHeaderList:
self.phenotypeRef = self.trainHeaderList.index(cons.labelPhenotype)
print "DataManagement: Phenotype Column Location = "+str(self.phenotypeRef)
else:
print "DataManagement: Error - Phenotype column not found! Check data set to ensure correct phenotype column label, or inclusion in the data."
if self.areInstanceIDs:
if self.phenotypeRef > self.instanceIDRef:
self.trainHeaderList.pop(self.phenotypeRef)
self.trainHeaderList.pop(self.instanceIDRef)
else:
self.trainHeaderList.pop(self.instanceIDRef)
self.trainHeaderList.pop(self.phenotypeRef)
else:
self.trainHeaderList.pop(self.phenotypeRef)
self.numTrainInstances = len(rawTrainData)
print "DataManagement: Number of Attributes = " + str(self.numAttributes) #DEBUG
print "DataManagement: Number of Instances = " + str(self.numTrainInstances) #DEBUG
def discriminatePhenotype(self, rawData):
""" Determine whether phenotype is Discrete(classes) or Continuous """
print "DataManagement: Analyzing Phenotype..."
inst = 0
classDict = {}
while len(classDict.keys()) <= cons.discreteAttributeLimit and inst < self.numTrainInstances: #Checks which discriminate between discrete and continuous attribute
target = rawData[inst][self.phenotypeRef]
if target in classDict.keys(): #Check if we've seen this attribute state yet.
classDict[target] += 1
elif target == cons.labelMissingData: #Ignore missing data
print "DataManagement: Warning - Individual detected with missing phenotype information!"
pass
else: #New state observed
classDict[target] = 1
inst += 1
if len(classDict.keys()) > cons.discreteAttributeLimit:
self.discretePhenotype = False
self.phenotypeList = [float(target),float(target)]
print "DataManagement: Phenotype Detected as Continuous."
else:
print "DataManagement: Phenotype Detected as Discrete."
def discriminateClasses(self, rawData):
""" Determines number of classes and their identifiers. Only used if phenotype is discrete. Requires both training and testing datasets in order to standardize formatting across both. """
print "DataManagement: Detecting Classes..."
inst = 0
while inst < self.numTrainInstances:
target = rawData[inst][self.phenotypeRef]
if target in self.phenotypeList:
self.classCount[target] += 1 #NOTE: Could potentially store state frequency information to guide learning.
self.classPredictionWeights[target] += 1
else:
self.phenotypeList.append(target)
self.classCount[target] = 1
self.classPredictionWeights[target] = 1
inst += 1
print "DataManagement: Following Classes Detected:"
print self.phenotypeList
total = 0
for each in self.classCount.keys():
total += self.classCount[each]
print "Class: "+str(each)+ " count = "+ str(self.classCount[each])
for each in self.classCount.keys():
self.classPredictionWeights[each] = 1- (self.classPredictionWeights[each] /float(total))
print self.classPredictionWeights
#Random Selection Determination (Not specifically adapted for class imbalance)
self.phenotypeRandomPick = 1 / float(len(self.phenotypeList))
def compareDataset(self, rawTestData):
" Ensures that key dataset parameters are indeed the same for training and testing datasets "
if self.areInstanceIDs:
if self.phenotypeRef > self.instanceIDRef:
self.testHeaderList.pop(self.phenotypeRef)
self.testHeaderList.pop(self.instanceIDRef)
else:
self.testHeaderList.pop(self.instanceIDRef)
self.testHeaderList.pop(self.phenotypeRef)
else:
self.testHeaderList.pop(self.phenotypeRef)
if self.trainHeaderList != self.testHeaderList:
print "DataManagement: Error - Training and Testing Dataset Headers are not equivalent"
self.numTestInstances = len(rawTestData)
print "DataManagement: Number of Attributes = " + str(self.numAttributes) #DEBUG
print "DataManagement: Number of Instances = " + str(self.numTestInstances) #DEBUG
def discriminateAttributes(self, rawData):
""" Determine whether attributes are Discrete or Continuous. Requires both training and testing datasets in order to standardize formatting across both. """
print "DataManagement: Detecting Attributes..."
self.discreteCount = 0
self.continuousCount = 0
for att in range(len(rawData[0])):
if att != self.instanceIDRef and att != self.phenotypeRef: #Get just the attribute columns (ignores phenotype and instanceID columns)
attIsDiscrete = True
inst = 0
stateDict = {}
while len(stateDict.keys()) <= cons.discreteAttributeLimit and inst < self.numTrainInstances: #Checks which discriminate between discrete and continuous attribute
target = rawData[inst][att]
if target in stateDict.keys(): #Check if we've seen this attribute state yet.
stateDict[target] += 1
elif target == cons.labelMissingData: #Ignore missing data
pass
else: #New state observed
stateDict[target] = 1
inst += 1
if len(stateDict.keys()) > cons.discreteAttributeLimit:
attIsDiscrete = False
if attIsDiscrete:
self.attributeInfo.append([0,[]])
self.discreteCount += 1
else:
self.attributeInfo.append([1,[float(target),float(target)]]) #[min,max]
self.continuousCount += 1
print "DataManagement: Identified "+str(self.discreteCount)+" discrete and "+str(self.continuousCount)+" continuous attributes." #Debug
def characterizeAttributes(self, rawData):
""" Determine range or states of each attribute. Requires both training and testing datasets in order to standardize formatting across both. """
print "DataManagement: Characterizing Attributes..."
attributeID = 0
self.averageStateCount = 0
for att in range(len(rawData[0])):
if att != self.instanceIDRef and att != self.phenotypeRef: #Get just the attribute columns (ignores phenotype and instanceID columns)
for inst in range(len(rawData)):
target = rawData[inst][att]
if not self.attributeInfo[attributeID][0]: #If attribute is discrete
if target in self.attributeInfo[attributeID][1] or target == cons.labelMissingData:
pass #NOTE: Could potentially store state frequency information to guide learning.
else:
self.attributeInfo[attributeID][1].append(target)
self.averageStateCount += 1
else: #If attribute is continuous
#Find Minimum and Maximum values for the continuous attribute so we know the range.
if target == cons.labelMissingData:
pass
elif float(target) > self.attributeInfo[attributeID][1][1]: #error
self.attributeInfo[attributeID][1][1] = float(target)
elif float(target) < self.attributeInfo[attributeID][1][0]:
self.attributeInfo[attributeID][1][0] = float(target)
else:
pass
if self.attributeInfo[attributeID][0]: #If attribute is continuous
self.averageStateCount += 2 #Simplify continuous attributes to be counted as two-state variables (high/low) for specLimit calculation.
attributeID += 1
self.averageStateCount = self.averageStateCount / float(self.numAttributes)
def calcSD(self, phenList):
""" Calculate the standard deviation of the continuous phenotype scores. """
for i in range(len(phenList)):
phenList[i] = float(phenList[i])
avg = float(sum(phenList)/len(phenList))
dev = []
for x in phenList:
dev.append(x-avg)
sqr = []
for x in dev:
sqr.append(x*x)
return math.sqrt(sum(sqr)/(len(sqr)-1))
def formatData(self,rawData,training):
""" Get the data into a format convenient for the algorithm to interact with. Our format is consistent with our rule representation, namely, Attribute-list knowledge representation (ALKR),"""
formatted = []
#Initialize data format---------------------------------------------------------
for i in range(len(rawData)):
formatted.append([None,None,None]) #[Attribute States, Phenotype, InstanceID]
for inst in range(len(rawData)):
stateList = []
attributeID = 0
for att in range(len(rawData[0])):
if att != self.instanceIDRef and att != self.phenotypeRef: #Get just the attribute columns (ignores phenotype and instanceID columns)
target = rawData[inst][att]
if self.attributeInfo[attributeID][0]: #If the attribute is continuous
if target == cons.labelMissingData:
stateList.append(target) #Missing data saved as text label
else:
stateList.append(float(target)) #Save continuous data as floats.
else: #If the attribute is discrete - Format the data to correspond to the GABIL (DeJong 1991)
stateList.append(target) #missing data, and discrete variables, all stored as string objects
attributeID += 1
#Final Format-----------------------------------------------
formatted[inst][0] = stateList #Attribute states stored here
if self.discretePhenotype:
formatted[inst][1] = rawData[inst][self.phenotypeRef] #phenotype stored here
else:
print "DataManagement - Error: ExSTraCS 2.0 can not handle continuous endpoints."
if self.areInstanceIDs:
formatted[inst][2] = rawData[inst][self.instanceIDRef] #Instance ID stored here
else: #An instance ID is required to tie instances to attribute tracking scores
formatted[inst][2] = inst #NOTE ID's are assigned before shuffle - id's capture order of instances in original dataset file.
#-----------------------------------------------------------
if training:
random.shuffle(formatted) #One time randomization of the order the of the instances in the data, so that if the data was ordered by phenotype, this potential learning bias (based on instance ordering) is eliminated.
return formatted
def saveTempTurfData(self):
""" Store and preserve original dataset formatting for TuRF EK generation. """
self.turfformatted = copy.deepcopy(self.trainFormatted)
self.turfHeaderList = copy.deepcopy(self.trainHeaderList)
self.turfNumAttributes = copy.deepcopy(self.numAttributes)
self.tierList = [] #will store attribute names from headerList
def returntoFullData(self):
""" Following TuRF completion, return to orignal complete dataset. """
self.trainFormatted = self.turfformatted
self.trainHeaderList = self.turfHeaderList
self.numAttributes = self.turfNumAttributes
def turfDataManagement(self, filterScores, turfPercent):
""" Add 'Turf' wrapper to any Relief Based algorithm, so that the respective algorithm is run iteratively, each iteration removing
a percentage of attributes from consideration, for recalculation of remaining attribute scores. For example, the ReliefF algorithm
with this wrapper is called Turf, The SURF algorithm with this wrapper is called SURFnTurf. The SURF* algorithm with this wrapper
is called SURF*nTurf."""
#Determine number of attributes to remove.
numRemove = int(self.numAttributes*turfPercent)
print "Removing "+str(numRemove)+" attribute(s)."
currentFilteredList = []
#Iterate through data removing lowest each time.
for i in range(0, numRemove):
lowVal = min(filterScores)
lowRef = filterScores.index(lowVal)
currentFilteredList.append(self.trainHeaderList.pop(lowRef))
self.numAttributes -= 1
for k in range(self.numTrainInstances):
self.trainFormatted[k][0].pop(lowRef)
filterScores.pop(lowRef)
self.tierList.append(currentFilteredList) #store filtered attributes as list of removed levels.
random.shuffle(self.trainFormatted) #Only makes a difference if a subset of instances is being used for calculations, this way a different subset will be used each time.
print str(self.numAttributes) + " remaining after turf iteration."
if self.numAttributes*float(turfPercent) < 1: #Prevent iterations that do not remove attributes (useful for smaller datasets)
keepGoing = False
else:
keepGoing = True
return keepGoing
def makeFilteredDataset(self, attsInData, fileName, filterScores):
""" Makes a new dataset, which has filtered out the lowest scoring attributes ( """
if attsInData > self.numAttributes:
print "NOTICE: Requested number of attributes ("+str(attsInData)+" in dataset not available. Returning total number of available attributes instead. ("+str(self.numAttributes)+")"
attsInData = self.numAttributes
try:
dataOut = open(fileName+'_filtered.txt','w')
except IOError, (errno, strerror):
print "I/O error(%s): %s" % (errno, strerror)
raise
if attsInData < self.numAttributes:
numRemove = self.numAttributes - attsInData
else:
numRemove = 0
#Iterate through data removing lowest each time.
for i in range(0, numRemove):
lowRef = 0
lowVal = filterScores[0]
for j in range(1,self.numAttributes):
if filterScores[j] < lowVal:
lowVal = filterScores[j]
lowRef = j
#Lowest Value found
self.trainHeaderList.pop(lowRef)
self.testHeaderList.pop(lowRef)
self.attributeInfo.pop(lowRef)
self.numAttributes -= 1
for k in range(self.numTrainInstances):
self.trainFormatted[k][0].pop(lowRef)
for k in range(self.numTestInstances):
self.testFormatted[k][0].pop(lowRef)
#numAttributes is now equal to the filtered attribute number specified.
for i in range(self.numAttributes):
dataOut.write(self.trainHeaderList[i]+'\t')
dataOut.write('Class'+'\n')
for i in range(self.numTrainInstances):
for j in range(self.numAttributes):
dataOut.write(str(self.trainFormatted[i][0][j])+'\t')
dataOut.write(str(self.trainFormatted[i][1])+'\n')
try:
dataOut.close()
except IOError, (errno, strerror):
print "I/O error(%s): %s" % (errno, strerror)
raise | gpl-3.0 |
herilalaina/scikit-learn | examples/ensemble/plot_adaboost_multiclass.py | 34 | 4126 | """
=====================================
Multi-class AdaBoosted Decision Trees
=====================================
This example reproduces Figure 1 of Zhu et al [1]_ and shows how boosting can
improve prediction accuracy on a multi-class problem. The classification
dataset is constructed by taking a ten-dimensional standard normal distribution
and defining three classes separated by nested concentric ten-dimensional
spheres such that roughly equal numbers of samples are in each class (quantiles
of the :math:`\chi^2` distribution).
The performance of the SAMME and SAMME.R [1]_ algorithms are compared. SAMME.R
uses the probability estimates to update the additive model, while SAMME uses
the classifications only. As the example illustrates, the SAMME.R algorithm
typically converges faster than SAMME, achieving a lower test error with fewer
boosting iterations. The error of each algorithm on the test set after each
boosting iteration is shown on the left, the classification error on the test
set of each tree is shown in the middle, and the boost weight of each tree is
shown on the right. All trees have a weight of one in the SAMME.R algorithm and
therefore are not shown.
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Noel Dawe <[email protected]>
#
# License: BSD 3 clause
from sklearn.externals.six.moves import zip
import matplotlib.pyplot as plt
from sklearn.datasets import make_gaussian_quantiles
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
X, y = make_gaussian_quantiles(n_samples=13000, n_features=10,
n_classes=3, random_state=1)
n_split = 3000
X_train, X_test = X[:n_split], X[n_split:]
y_train, y_test = y[:n_split], y[n_split:]
bdt_real = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1)
bdt_discrete = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1.5,
algorithm="SAMME")
bdt_real.fit(X_train, y_train)
bdt_discrete.fit(X_train, y_train)
real_test_errors = []
discrete_test_errors = []
for real_test_predict, discrete_train_predict in zip(
bdt_real.staged_predict(X_test), bdt_discrete.staged_predict(X_test)):
real_test_errors.append(
1. - accuracy_score(real_test_predict, y_test))
discrete_test_errors.append(
1. - accuracy_score(discrete_train_predict, y_test))
n_trees_discrete = len(bdt_discrete)
n_trees_real = len(bdt_real)
# Boosting might terminate early, but the following arrays are always
# n_estimators long. We crop them to the actual number of trees here:
discrete_estimator_errors = bdt_discrete.estimator_errors_[:n_trees_discrete]
real_estimator_errors = bdt_real.estimator_errors_[:n_trees_real]
discrete_estimator_weights = bdt_discrete.estimator_weights_[:n_trees_discrete]
plt.figure(figsize=(15, 5))
plt.subplot(131)
plt.plot(range(1, n_trees_discrete + 1),
discrete_test_errors, c='black', label='SAMME')
plt.plot(range(1, n_trees_real + 1),
real_test_errors, c='black',
linestyle='dashed', label='SAMME.R')
plt.legend()
plt.ylim(0.18, 0.62)
plt.ylabel('Test Error')
plt.xlabel('Number of Trees')
plt.subplot(132)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_errors,
"b", label='SAMME', alpha=.5)
plt.plot(range(1, n_trees_real + 1), real_estimator_errors,
"r", label='SAMME.R', alpha=.5)
plt.legend()
plt.ylabel('Error')
plt.xlabel('Number of Trees')
plt.ylim((.2,
max(real_estimator_errors.max(),
discrete_estimator_errors.max()) * 1.2))
plt.xlim((-20, len(bdt_discrete) + 20))
plt.subplot(133)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_weights,
"b", label='SAMME')
plt.legend()
plt.ylabel('Weight')
plt.xlabel('Number of Trees')
plt.ylim((0, discrete_estimator_weights.max() * 1.2))
plt.xlim((-20, n_trees_discrete + 20))
# prevent overlapping y-axis labels
plt.subplots_adjust(wspace=0.25)
plt.show()
| bsd-3-clause |
schets/scikit-learn | examples/cluster/plot_kmeans_stability_low_dim_dense.py | 335 | 4324 | """
============================================================
Empirical evaluation of the impact of k-means initialization
============================================================
Evaluate the ability of k-means initializations strategies to make
the algorithm convergence robust as measured by the relative standard
deviation of the inertia of the clustering (i.e. the sum of distances
to the nearest cluster center).
The first plot shows the best inertia reached for each combination
of the model (``KMeans`` or ``MiniBatchKMeans``) and the init method
(``init="random"`` or ``init="kmeans++"``) for increasing values of the
``n_init`` parameter that controls the number of initializations.
The second plot demonstrate one single run of the ``MiniBatchKMeans``
estimator using a ``init="random"`` and ``n_init=1``. This run leads to
a bad convergence (local optimum) with estimated centers stuck
between ground truth clusters.
The dataset used for evaluation is a 2D grid of isotropic Gaussian
clusters widely spaced.
"""
print(__doc__)
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from sklearn.utils import shuffle
from sklearn.utils import check_random_state
from sklearn.cluster import MiniBatchKMeans
from sklearn.cluster import KMeans
random_state = np.random.RandomState(0)
# Number of run (with randomly generated dataset) for each strategy so as
# to be able to compute an estimate of the standard deviation
n_runs = 5
# k-means models can do several random inits so as to be able to trade
# CPU time for convergence robustness
n_init_range = np.array([1, 5, 10, 15, 20])
# Datasets generation parameters
n_samples_per_center = 100
grid_size = 3
scale = 0.1
n_clusters = grid_size ** 2
def make_data(random_state, n_samples_per_center, grid_size, scale):
random_state = check_random_state(random_state)
centers = np.array([[i, j]
for i in range(grid_size)
for j in range(grid_size)])
n_clusters_true, n_features = centers.shape
noise = random_state.normal(
scale=scale, size=(n_samples_per_center, centers.shape[1]))
X = np.concatenate([c + noise for c in centers])
y = np.concatenate([[i] * n_samples_per_center
for i in range(n_clusters_true)])
return shuffle(X, y, random_state=random_state)
# Part 1: Quantitative evaluation of various init methods
fig = plt.figure()
plots = []
legends = []
cases = [
(KMeans, 'k-means++', {}),
(KMeans, 'random', {}),
(MiniBatchKMeans, 'k-means++', {'max_no_improvement': 3}),
(MiniBatchKMeans, 'random', {'max_no_improvement': 3, 'init_size': 500}),
]
for factory, init, params in cases:
print("Evaluation of %s with %s init" % (factory.__name__, init))
inertia = np.empty((len(n_init_range), n_runs))
for run_id in range(n_runs):
X, y = make_data(run_id, n_samples_per_center, grid_size, scale)
for i, n_init in enumerate(n_init_range):
km = factory(n_clusters=n_clusters, init=init, random_state=run_id,
n_init=n_init, **params).fit(X)
inertia[i, run_id] = km.inertia_
p = plt.errorbar(n_init_range, inertia.mean(axis=1), inertia.std(axis=1))
plots.append(p[0])
legends.append("%s with %s init" % (factory.__name__, init))
plt.xlabel('n_init')
plt.ylabel('inertia')
plt.legend(plots, legends)
plt.title("Mean inertia for various k-means init across %d runs" % n_runs)
# Part 2: Qualitative visual inspection of the convergence
X, y = make_data(random_state, n_samples_per_center, grid_size, scale)
km = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=1,
random_state=random_state).fit(X)
fig = plt.figure()
for k in range(n_clusters):
my_members = km.labels_ == k
color = cm.spectral(float(k) / n_clusters, 1)
plt.plot(X[my_members, 0], X[my_members, 1], 'o', marker='.', c=color)
cluster_center = km.cluster_centers_[k]
plt.plot(cluster_center[0], cluster_center[1], 'o',
markerfacecolor=color, markeredgecolor='k', markersize=6)
plt.title("Example cluster allocation with a single random init\n"
"with MiniBatchKMeans")
plt.show()
| bsd-3-clause |
PSFCPlasmaTools/eqtools | eqtools/pfilereader.py | 1 | 7911 | # This program is distributed under the terms of the GNU General Purpose License (GPL).
# Refer to http://www.gnu.org/licenses/gpl.txt
#
# This file is part of EqTools.
#
# EqTools is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# EqTools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EqTools. If not, see <http://www.gnu.org/licenses/>.
"""
This module contains the :py:class:`PFileReader` class, a lightweight data
handler for p-file (radial profile) datasets.
Classes:
PFileReader:
Data-storage class for p-file data. Reads
data from ASCII p-file, storing as copy-safe object
attributes.
"""
import numpy as np
import csv
import re
from collections import namedtuple
class PFileReader(object):
"""Class to read ASCII p-file (profile data storage) into lightweight,
user-friendly data structure.
P-files store data blocks containing the following: a header with parameter
name, parameter units, x-axis units, and number of data points, followed by
values of axis x, parameter y, and derivative dy/dx. Each parameter block
is read into a namedtuple storing
======== ==============
'name' parameter name
'npts' array size
'x' abscissa array
'y' data array
'dydx' data gradient
'xunits' abscissa units
'units' data units
======== ==============
with each namedtuple stored as an attribute of the PFileReader instance.
This gracefully handles variable formats of p-files (differing versions of
p-files will have different parameters stored). Data blocks are accessed
as attributes in a copy-safe manner.
Creates instance of PFileReader.
Args:
pfile (String): Path to ASCII p-file to be loaded.
Keyword Args:
verbose (Boolean): Option to print message on object creation
listing available data parameters. Defaults to True.
Examples:
Load p-file data located at `file_path`, while suppressing terminal
output of stored parameters::
pfr = eqtools.PFileReader(file_path,verbose=False)
Recover electron density data (for example)::
ne_data = pfr.ne
Recover abscissa and electron density data (for example)::
ne = pfr.ne.y
abscis = pfr.ne.x
Available parameters in pfr may be listed via the overridden __str__
command.
"""
def __init__(self, pfile, verbose=True):
self._pfile = pfile
self._params = []
with open(pfile, 'r') as readfile:
dia = csv.excel()
dia.skipinitialspace = True
reader = csv.reader(readfile, dia, delimiter=' ')
# define data structure as named tuple for storing parameter values
data = namedtuple(
'DataStruct',
['name', 'npts', 'units', 'xunits', 'x', 'y', 'dydx']
)
# iterate through lines of file, checking for a header line;
# at each header, read the next npts lines of data into
# appropriate arrays.
# continue until no headerline is found (throws StopIteration).
# Populate list of params with available variables.
while True:
try:
headerline = next(reader)
except StopIteration:
break
npts = int(headerline[0]) # size of abscissa, data arrays
abscis = headerline[1] # string name of abscissa variable (e.g. 'psinorm')
var = re.split(r'[\(\)]', headerline[2])
param = var[0] # string name of parameter (e.g. 'ne')
units = var[1] # string name of units (e.g. '10^20/m^3')
# read npts next lines, populate arrays
x = []
val = []
gradval = []
for j in range(npts):
dataline = next(reader)
x.append(float(dataline[0]))
val.append(float(dataline[1]))
gradval.append(float(dataline[2]))
x = np.array(x)
val = np.array(val)
gradval = np.array(gradval)
# collate into storage structure
vars(self)['_'+param] = data(name=param,
npts=npts,
units=units,
xunits=abscis,
x=x,
y=val,
dydx=gradval)
self._params.append(param)
if verbose:
print('P-file data loaded from '+self._pfile)
print('Available parameters:')
for par in self._params:
un = vars(self)['_'+par].units
xun = vars(self)['_'+par].xunits
print(str(par).ljust(8)+str(xun).ljust(12)+str(un))
def __str__(self):
"""overrides default string method for useful output.
"""
mes = 'P-file data from '+self._pfile+' containing parameters:\n'
for par in self._params:
un = vars(self)['_'+par].units
xun = vars(self)['_'+par].xunits
mes += str(par).ljust(8)+str(xun).ljust(12)+str(un)+'\n'
return mes
def __getattribute__(self, name):
"""Copy-safe attribute retrieval method overriding default
object.__getattribute__.
Tries to retrieve attribute as-written (first check for default object
attributes). If that fails, looks for pseudo-private attributes, marked
by preceding underscore, to retrieve data blocks. If this fails,
raise AttributeError.
Args:
name (String): Name (without leading underscore for data variables)
of attribute.
Raises:
AttributeError: if no attribute can be found.
"""
try:
return super(PFileReader, self).__getattribute__(name)
except AttributeError:
try:
attr = super(PFileReader, self).__getattribute__('_'+name)
if type(attr) is list:
return attr[:]
else:
return attr
except AttributeError:
raise AttributeError('No attribute "%s" found' % name)
def __setattr__(self, name, value):
"""Copy-safe attribute setting method overriding default
`object.__setattr__`.
Raises error if object already has attribute `_{name}` for input name,
as such an attribute would interfere with automatic property generation
in :py:meth:`__getattribute__`.
Args:
name (String): Attribute name.
Raises:
AttributeError: if attempting to create attribute with protected
pseudo-private name.
"""
if hasattr(self, '_'+name):
raise AttributeError(
"PFileReader object already has data attribute"
" '_%(n)s', creating attribute '%(n)s' will"
" conflict with automatic property generation."
% {'n': name}
)
else:
super(PFileReader, self).__setattr__(name, value)
| gpl-3.0 |
schets/scikit-learn | examples/datasets/plot_random_dataset.py | 345 | 2254 | """
==============================================
Plot randomly generated classification dataset
==============================================
Plot several randomly generated 2D classification datasets.
This example illustrates the :func:`datasets.make_classification`
:func:`datasets.make_blobs` and :func:`datasets.make_gaussian_quantiles`
functions.
For ``make_classification``, three binary and two multi-class classification
datasets are generated, with different numbers of informative features and
clusters per class. """
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.datasets import make_blobs
from sklearn.datasets import make_gaussian_quantiles
plt.figure(figsize=(8, 8))
plt.subplots_adjust(bottom=.05, top=.9, left=.05, right=.95)
plt.subplot(321)
plt.title("One informative feature, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=1,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(322)
plt.title("Two informative features, one cluster per class", fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(323)
plt.title("Two informative features, two clusters per class", fontsize='small')
X2, Y2 = make_classification(n_features=2, n_redundant=0, n_informative=2)
plt.scatter(X2[:, 0], X2[:, 1], marker='o', c=Y2)
plt.subplot(324)
plt.title("Multi-class, two informative features, one cluster",
fontsize='small')
X1, Y1 = make_classification(n_features=2, n_redundant=0, n_informative=2,
n_clusters_per_class=1, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(325)
plt.title("Three blobs", fontsize='small')
X1, Y1 = make_blobs(n_features=2, centers=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.subplot(326)
plt.title("Gaussian divided into three quantiles", fontsize='small')
X1, Y1 = make_gaussian_quantiles(n_features=2, n_classes=3)
plt.scatter(X1[:, 0], X1[:, 1], marker='o', c=Y1)
plt.show()
| bsd-3-clause |
herilalaina/scikit-learn | examples/neighbors/plot_lof.py | 23 | 2013 | """
=================================================
Anomaly detection with Local Outlier Factor (LOF)
=================================================
This example presents the Local Outlier Factor (LOF) estimator. The LOF
algorithm is an unsupervised outlier detection method which computes the local
density deviation of a given data point with respect to its neighbors.
It considers as outlier samples that have a substantially lower density than
their neighbors.
The number of neighbors considered, (parameter n_neighbors) is typically
chosen 1) greater than the minimum number of objects a cluster has to contain,
so that other objects can be local outliers relative to this cluster, and 2)
smaller than the maximum number of close by objects that can potentially be
local outliers.
In practice, such informations are generally not available, and taking
n_neighbors=20 appears to work well in general.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import LocalOutlierFactor
np.random.seed(42)
# Generate train data
X = 0.3 * np.random.randn(100, 2)
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
X = np.r_[X + 2, X - 2, X_outliers]
# fit the model
clf = LocalOutlierFactor(n_neighbors=20)
y_pred = clf.fit_predict(X)
y_pred_outliers = y_pred[200:]
# plot the level sets of the decision function
xx, yy = np.meshgrid(np.linspace(-5, 5, 50), np.linspace(-5, 5, 50))
Z = clf._decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Local Outlier Factor (LOF)")
plt.contourf(xx, yy, Z, cmap=plt.cm.Blues_r)
a = plt.scatter(X[:200, 0], X[:200, 1], c='white',
edgecolor='k', s=20)
b = plt.scatter(X[200:, 0], X[200:, 1], c='red',
edgecolor='k', s=20)
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a, b],
["normal observations",
"abnormal observations"],
loc="upper left")
plt.show()
| bsd-3-clause |
BRAINSia/ITK | Modules/Filtering/ImageIntensity/wrapping/test/itkImageFilterNumPyInputsTest.py | 6 | 4544 | # ==========================================================================
#
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==========================================================================*/
import itkConfig
itkConfig.LazyLoading = False
import itk
import numpy as np
itk.auto_progress(2)
array1 = np.ones((4, 4), dtype=np.uint8)
array2 = 2 * np.ones((4, 4), dtype=np.uint8)
added = itk.add_image_filter(array1, array2)
assert isinstance(added, np.ndarray)
assert np.all(added == 3)
added = itk.add_image_filter(Input1=array1, Input2=array2)
assert isinstance(added, np.ndarray)
assert np.all(added == 3)
# support kwargs with "image" in the name
masked = itk.mask_image_filter(array1, mask_image=array2)
try:
import xarray as xr
image1 = itk.image_from_array(array1)
data_array1 = itk.xarray_from_image(image1)
image2 = itk.image_from_array(array2)
data_array2 = itk.xarray_from_image(image2)
added = itk.add_image_filter(data_array1, data_array2)
assert isinstance(added, xr.DataArray)
assert np.all(added == 3)
added = itk.add_image_filter(Input1=data_array1, Input2=data_array2)
assert isinstance(added, xr.DataArray)
assert np.all(added == 3)
# support kwargs with "image" in the name
masked = itk.mask_image_filter(data_array1, mask_image=data_array2)
except ImportError:
# Could not import xarray
pass
try:
import torch
# construct normal, interleaved (RGBRGB) ITK image
arrayMultiChannelInterleaved = np.arange(0, 4 * 2 * 3, dtype=np.uint8).reshape(
(4, 2, 3)
)
print("arrayMultiChannelInterleaved:\n", arrayMultiChannelInterleaved)
image0 = itk.image_from_array(
np.zeros(arrayMultiChannelInterleaved.shape, dtype=np.uint8), is_vector=True
)
imageMCI = itk.image_from_array(arrayMultiChannelInterleaved, ttype=type(image0))
# construct contiguous (RRBBGG) torch tensor
arrayMultiChannelContiguous = np.copy(arrayMultiChannelInterleaved)
dest = list(range(arrayMultiChannelInterleaved.ndim))
source = dest.copy()
end = source.pop()
source.insert(0, end)
arrayMultiChannelContiguous = np.moveaxis(
arrayMultiChannelContiguous, source, dest
).copy()
print("arrayMultiChannelContiguous:\n", arrayMultiChannelContiguous)
tensorMCC = torch.from_numpy(arrayMultiChannelContiguous)
tensor0 = torch.from_numpy(
np.zeros(arrayMultiChannelContiguous.shape, dtype=np.uint8)
)
# sanity check: ITK image works with unary filter
luminanceITK = itk.rgb_to_luminance_image_filter(imageMCI)
assert isinstance(luminanceITK, itk.Image)
array = itk.array_view_from_image(luminanceITK)
# check that torch tensor works with unary filter
luminanceTensor = itk.rgb_to_luminance_image_filter(tensorMCC)
assert isinstance(luminanceTensor, torch.Tensor)
print("luminanceTensor:\n", luminanceTensor)
assert np.array_equal(luminanceTensor, array)
# sanity check: ITK images work with binary filter
image1 = itk.add_image_filter(image0, imageMCI)
assert isinstance(image1, itk.Image)
array = itk.array_view_from_image(image1)
assert np.array_equal(array, arrayMultiChannelInterleaved)
# check that ITK image and torch tensor work with binary filter
itkTensor = itk.add_image_filter(image0, tensorMCC)
assert isinstance(itkTensor, torch.Tensor)
print("itkTensor:\n", itkTensor)
assert np.array_equal(itkTensor, arrayMultiChannelContiguous)
# check that two torch tensors work with binary filter
tensor1 = itk.add_image_filter(Input1=tensorMCC, Input2=tensor0)
assert isinstance(tensor1, torch.Tensor)
assert np.array_equal(tensor1, itkTensor)
# check that torch tensor and ITK image work with binary filter
tensorITK = itk.add_image_filter(tensorMCC, image0)
assert isinstance(tensorITK, torch.Tensor)
assert np.array_equal(tensorITK, tensor1)
except ImportError:
# Could not import torch
pass
| apache-2.0 |
Marcello-Sega/pytim | pytim/utilities_dbscan.py | 1 | 3988 | # -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
from __future__ import print_function
import numpy as np
from scipy.cluster import vq
from scipy.spatial import cKDTree
from pytim_dbscan import dbscan_inner
def determine_samples(threshold_density, cluster_cut, n_neighbors):
if isinstance(threshold_density, type(None)):
return 2
if isinstance(threshold_density, (float, int)):
min_samples = threshold_density * 4. / 3. * np.pi * cluster_cut**3
elif (threshold_density == 'auto'):
modes = 2
centroid, _ = vq.kmeans2(
n_neighbors * 1.0, modes, iter=10, check_finite=False)
min_samples = np.max(centroid)
else:
raise ValueError("Wrong value of 'threshold_density' passed\
to do_cluster_analysis_DBSCAN() ")
return np.max([min_samples, 2])
def do_cluster_analysis_dbscan(group,
cluster_cut,
threshold_density=None,
molecular=True):
""" Performs a cluster analysis using DBSCAN
:returns [labels,counts,neighbors]: lists of the id of the cluster to
which every atom is belonging to, of the
number of elements in each cluster, and of
the number of neighbors for each atom
according to the specified criterion.
Uses a slightly modified version of DBSCAN from sklearn.cluster
that takes periodic boundary conditions into account (through
cKDTree's boxsize option) and collects also the sizes of all
clusters. This is on average O(N log N) thanks to the O(log N)
scaling of the kdtree.
"""
box = group.universe.dimensions[:3]
# NOTE: extra_cluster_groups are not yet implemented
points = group.atoms.positions[:]
tree = cKDTree(points, boxsize=box[:3])
neighborhoods = np.array([
np.array(neighbors)
for neighbors in tree.query_ball_point(points, cluster_cut, workers=-1)
],dtype=object)
if len(neighborhoods.shape) != 1:
raise ValueError("Error in do_cluster_analysis_DBSCAN(), the cutoff\
is probably too small")
if molecular is False:
n_neighbors = np.array([len(neighbors) for neighbors in neighborhoods])
else:
n_neighbors = np.array([
len(np.unique(group[neighbors].resids))
for neighbors in neighborhoods
])
min_samples = determine_samples(threshold_density, cluster_cut,
n_neighbors)
labels = -np.ones(points.shape[0], dtype=np.intp)
counts = np.zeros(points.shape[0], dtype=np.intp)
core_samples = np.asarray(n_neighbors >= min_samples, dtype=np.uint8)
dbscan_inner(core_samples, neighborhoods, labels, counts)
return labels, counts, n_neighbors
def _():
"""
This is a collection of tests to check
that the DBSCAN behavior is kept consistent
>>> import MDAnalysis as mda
>>> import pytim
>>> pytim.utilities_dbscan._() ; # coverage
>>> import numpy as np
>>> from pytim.datafiles import ILBENZENE_GRO
>>> from pytim.utilities import do_cluster_analysis_dbscan as DBScan
>>> u = mda.Universe(ILBENZENE_GRO)
>>> benzene = u.select_atoms('name C and resname LIG')
>>> u.atoms.positions = u.atoms.pack_into_box()
>>> l,c,n = DBScan(benzene, cluster_cut = 4.5, threshold_density = None)
>>> l1,c1,n1 = DBScan(benzene, cluster_cut = 8.5, threshold_density = 'auto')
>>> td = 0.009
>>> l2,c2,n2 = DBScan(benzene, cluster_cut = 8.5, threshold_density = td)
>>> print (np.sort(c)[-2:])
[ 12 14904]
>>> print (np.sort(c2)[-2:])
[ 0 9335]
>>> print ((np.all(c1==c2), np.all(l1==l2)))
(True, True)
"""
pass
| gpl-3.0 |
Lab41/pelops | testci/test_dgcars.py | 3 | 6598 | import pytest
import os.path
import json
import pelops.utils as utils
from pelops.datasets.dgcars import DGCarsDataset
from pelops.datasets.chip import Chip
from pelops.utils import SetType
@pytest.fixture
def dgcars(tmpdir):
# Define some test and training data, all will be the sum
TRAIN = [
{"url": "http://example.com/img.jpg", "hash": "2a8cedfa145b4345aed3fd9e82796c3e", "resnet50": "minivan", "model": "ZX2", "filename": "black/Ford/2a8cedfa145b4345aed3fd9e82796c3e.jpg", "make": "Ford", "color": "black"},
{"url": "http://example.com/img.jpg", "hash": "8241daf452ace679162c69386f26ddc7", "resnet50": "sports_car", "model": "Mazda6 Sport", "filename": "red/Mazda/8241daf452ace679162c69386f26ddc7.jpg", "make": "Mazda", "color": "red"},
{"url": "http://example.com/img.jpg", "hash": "e8dc3fb78206b14fe3568c1b28e5e5a1", "resnet50": "cab", "model": "XJ Series", "filename": "yellow/Jaguar/e8dc3fb78206b14fe3568c1b28e5e5a1.jpg", "make": "Jaguar", "color": "yellow"},
]
TEST = [
{"url": "http://example.com/img.jpg", "hash": "8881e7b561393f1d778a70dd449433e9", "resnet50": "racer", "model": "IS F", "filename": "yellow/Lexus/8881e7b561393f1d778a70dd449433e9.jpg", "make": "Lexus", "color": "yellow"},
{"url": "http://example.com/img.jpg", "hash": "38e857d5235afda4315676c0b7756832", "resnet50": "pickup", "model": "Mark VII", "filename": "silver/Lincoln/38e857d5235afda4315676c0b7756832.jpg", "make": "Lincoln", "color": "silver"},
{"url": "http://example.com/img.jpg", "hash": "6eb2b407cc398e70604bfd336bb2efad", "resnet50": "pickup", "model": "Lightning", "filename": "orange/Ford/6eb2b407cc398e70604bfd336bb2efad.jpg", "make": "Ford", "color": "orange"},
{"url": "http://example.com/img.jpg", "hash": "eb3811772ec012545c8952d88906d355", "resnet50": "racer", "model": "Rockette", "filename": "green/Fairthorpe/eb3811772ec012545c8952d88906d355.jpg", "make": "Fairthorpe", "color": "green"},
{"url": "http://example.com/img.jpg", "hash": "8dbbc1d930c7f2e4558efcc596728945", "resnet50": "minivan", "model": "S70", "filename": "white/Volvo/8dbbc1d930c7f2e4558efcc596728945.jpg", "make": "Volvo", "color": "white"},
{"url": "http://example.com/img.jpg", "hash": "ed45784812d1281bcb61f217f4422ab5", "resnet50": "convertible", "model": "A8", "filename": "green/Audi/ed45784812d1281bcb61f217f4422ab5.jpg", "make": "Audi", "color": "green"},
{"url": "http://example.com/img.jpg", "hash": "763ca4abbbb9b042b21f19fd80986179", "resnet50": "pickup", "model": "W126", "filename": "green/Mercedes-Benz/763ca4abbbb9b042b21f19fd80986179.jpg", "make": "Mercedes-Benz", "color": "green"},
]
WRITE_LIST = (
# filename, data list, settype
("allFiles", TRAIN + TEST, SetType.ALL),
("training", TRAIN, SetType.TRAIN),
("testing", TEST, SetType.TEST),
)
output_chips = {
SetType.ALL: [],
SetType.TRAIN: [],
SetType.TEST: [],
}
for filename, data_list, settype in WRITE_LIST:
fn = tmpdir.join(filename)
with open(fn.strpath, "w") as f:
for d in data_list:
# Write the data list files
line = json.dumps(d)
f.write(line + "\n")
# Make a chip
fp = os.path.join(tmpdir.strpath, d["filename"])
chip = Chip(fp, None, None, None, d)
output_chips[settype].append(chip)
# Instantiate a DGCarsDataset() class
output_classes = {
SetType.ALL: DGCarsDataset(tmpdir.strpath, SetType.ALL),
SetType.TRAIN: DGCarsDataset(tmpdir.strpath, SetType.TRAIN),
SetType.TEST: DGCarsDataset(tmpdir.strpath, SetType.TEST),
}
return (output_classes, output_chips)
def test_dgcars_chips_len(dgcars):
classes = dgcars[0]
answer_chips = dgcars[1]
# check that self.chips has been created, is not empty, and has the right
# number of entries
for key, cls in classes.items():
ans = answer_chips[key]
assert len(cls.chips) == len(ans)
def test_dgcars_chips_vals(dgcars):
classes = dgcars[0]
answer_chips = dgcars[1]
for key, cls in classes.items():
ans = answer_chips[key]
for chip in cls:
# The chip must match one of our hand built chips
assert chip in ans
# Various values are None
assert chip.car_id is None
assert chip.cam_id is None
assert chip.time is None
# Misc and filepath should exist
assert chip.filepath
assert chip.misc
# Misc is a dictionary like object
assert hasattr(chip.misc, "get")
def test_get_all_chips_by_car_id(dgcars):
classes = dgcars[0]
answer_chips = dgcars[1]
for key, cls in classes.items():
ans = answer_chips[key]
# All car_id values are None in DG Cars
all_chips = sorted(cls.get_all_chips_by_car_id(None))
assert all_chips == sorted(ans)
def test_get_all_chips_by_cam_id(dgcars):
classes = dgcars[0]
answer_chips = dgcars[1]
for key, cls in classes.items():
ans = answer_chips[key]
# All cam_id values are None in DG Cars
all_chips = sorted(cls.get_all_chips_by_cam_id(None))
assert all_chips == sorted(ans)
def test_get_distinct_cams_by_car_id(dgcars):
classes = dgcars[0]
answer_chips = dgcars[1]
for key, cls in classes.items():
ans = answer_chips[key]
# All car_id values are None in DG Cars
assert cls.get_distinct_cams_by_car_id(None) == {None}
def test_get_all_cam_ids(dgcars):
classes = dgcars[0]
answer_chips = dgcars[1]
for key, cls in classes.items():
ans = answer_chips[key]
# All cam_id values are None in DG Cars
assert cls.get_all_cam_ids() == [None]
def test_get_all_car_ids(dgcars):
classes = dgcars[0]
answer_chips = dgcars[1]
for key, cls in classes.items():
ans = answer_chips[key]
# All car_id values are None in DG Cars
assert cls.get_all_car_ids() == [None]
def test_dgcars_iter(dgcars):
classes = dgcars[0]
answer_chips = dgcars[1]
for key, cls in classes.items():
ans = answer_chips[key]
# Ensure that we can iterate and get all of the items
for chip in cls:
assert chip in ans
# Ensure list can access the iterator, and that there are no extra
# chips
cls_chips = list(cls)
for chip in ans:
assert chip in cls_chips
| apache-2.0 |
sangwook236/general-development-and-testing | sw_dev/python/rnd/test/machine_learning/keras/keras_transfer_learning.py | 2 | 3740 | #!/usr/bin/env python
# coding: UTF-8
from __future__ import print_function
import datetime
import tensorflow as tf
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras import backend as K
def train_model(model, train, test, num_classes, input_shape, batch_size, epochs):
x_train = train[0].reshape((train[0].shape[0],) + input_shape)
x_test = test[0].reshape((test[0].shape[0],) + input_shape)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = tf.keras.utils.to_categorical(train[1], num_classes)
y_test = tf.keras.utils.to_categorical(test[1], num_classes)
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
t = datetime.datetime.now()
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
print('Training time: %s' % (datetime.datetime.now() - t))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test score:', score[0])
print('Test accuracy:', score[1])
# REF [site] >> https://github.com/keras-team/keras/blob/master/examples/mnist_transfer_cnn.py
def simple_transfer_learning_cnn_mnist_example():
batch_size = 128
num_classes = 5
epochs = 5
# Input image dimensions.
img_rows, img_cols = 28, 28
# Number of convolutional filters to use.
filters = 32
# Size of pooling area for max pooling.
pool_size = 2
# Convolution kernel size.
kernel_size = 3
if K.image_data_format() == 'channels_first':
input_shape = (1, img_rows, img_cols)
else:
input_shape = (img_rows, img_cols, 1)
#--------------------
# The data, split between train and test sets.
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# Create two datasets one with digits below 5 and one with 5 and above.
x_train_lt5 = x_train[y_train < 5]
y_train_lt5 = y_train[y_train < 5]
x_test_lt5 = x_test[y_test < 5]
y_test_lt5 = y_test[y_test < 5]
x_train_gte5 = x_train[y_train >= 5]
y_train_gte5 = y_train[y_train >= 5] - 5
x_test_gte5 = x_test[y_test >= 5]
y_test_gte5 = y_test[y_test >= 5] - 5
#--------------------
# Define two groups of layers: feature (convolutions) and classification (dense).
feature_layers = [
Conv2D(filters, kernel_size, padding='valid', input_shape=input_shape),
Activation('relu'),
Conv2D(filters, kernel_size),
Activation('relu'),
MaxPooling2D(pool_size=pool_size),
Dropout(0.25),
Flatten(),
]
classification_layers = [
Dense(128),
Activation('relu'),
Dropout(0.5),
Dense(num_classes),
Activation('softmax')
]
# Create complete model.
model = Sequential(feature_layers + classification_layers)
#--------------------
# Train model for 5-digit classification [0..4].
train_model(model,
(x_train_lt5, y_train_lt5),
(x_test_lt5, y_test_lt5),
num_classes, input_shape, batch_size, epochs)
# Freeze feature layers and rebuild model.
for layer in feature_layers:
layer.trainable = False
#--------------------
# Transfer: train dense layers for new classification task [5..9].
train_model(model,
(x_train_gte5, y_train_gte5),
(x_test_gte5, y_test_gte5),
num_classes, input_shape, batch_size, epochs)
def main():
simple_transfer_learning_cnn_mnist_example()
#--------------------------------------------------------------------
if '__main__' == __name__:
main()
| gpl-2.0 |
herilalaina/scikit-learn | sklearn/svm/tests/test_bounds.py | 21 | 2390 | import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
from sklearn.utils.testing import assert_true, assert_raises
from sklearn.utils.testing import assert_raise_message
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['squared_hinge', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
# loss='l2' should raise ValueError
assert_raise_message(ValueError, "loss type not in",
l1_min_c, dense_X, Y1, "l2")
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'squared_hinge': LinearSVC(loss='squared_hinge',
penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
assert_raises(ValueError, l1_min_c, X, y)
def test_unsupported_loss():
assert_raises(ValueError, l1_min_c, dense_X, Y1, 'l1')
| bsd-3-clause |
schets/scikit-learn | examples/text/mlcomp_sparse_document_classification.py | 290 | 4498 | """
========================================================
Classification of text documents: using a MLComp dataset
========================================================
This is an example showing how the scikit-learn can be used to classify
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
The dataset used in this example is the 20 newsgroups dataset and should be
downloaded from the http://mlcomp.org (free registration required):
http://mlcomp.org/datasets/379
Once downloaded unzip the archive somewhere on your filesystem.
For instance in::
% mkdir -p ~/data/mlcomp
% cd ~/data/mlcomp
% unzip /path/to/dataset-379-20news-18828_XXXXX.zip
You should get a folder ``~/data/mlcomp/379`` with a file named ``metadata``
and subfolders ``raw``, ``train`` and ``test`` holding the text documents
organized by newsgroups.
Then set the ``MLCOMP_DATASETS_HOME`` environment variable pointing to
the root folder holding the uncompressed archive::
% export MLCOMP_DATASETS_HOME="~/data/mlcomp"
Then you are ready to run this example using your favorite python shell::
% ipython examples/mlcomp_sparse_document_classification.py
"""
# Author: Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from time import time
import sys
import os
import numpy as np
import scipy.sparse as sp
import pylab as pl
from sklearn.datasets import load_mlcomp
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.naive_bayes import MultinomialNB
print(__doc__)
if 'MLCOMP_DATASETS_HOME' not in os.environ:
print("MLCOMP_DATASETS_HOME not set; please follow the above instructions")
sys.exit(0)
# Load the training set
print("Loading 20 newsgroups training set... ")
news_train = load_mlcomp('20news-18828', 'train')
print(news_train.DESCR)
print("%d documents" % len(news_train.filenames))
print("%d categories" % len(news_train.target_names))
print("Extracting features from the dataset using a sparse vectorizer")
t0 = time()
vectorizer = TfidfVectorizer(encoding='latin1')
X_train = vectorizer.fit_transform((open(f).read()
for f in news_train.filenames))
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_train.shape)
assert sp.issparse(X_train)
y_train = news_train.target
print("Loading 20 newsgroups test set... ")
news_test = load_mlcomp('20news-18828', 'test')
t0 = time()
print("done in %fs" % (time() - t0))
print("Predicting the labels of the test set...")
print("%d documents" % len(news_test.filenames))
print("%d categories" % len(news_test.target_names))
print("Extracting features from the dataset using the same vectorizer")
t0 = time()
X_test = vectorizer.transform((open(f).read() for f in news_test.filenames))
y_test = news_test.target
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X_test.shape)
###############################################################################
# Benchmark classifiers
def benchmark(clf_class, params, name):
print("parameters:", params)
t0 = time()
clf = clf_class(**params).fit(X_train, y_train)
print("done in %fs" % (time() - t0))
if hasattr(clf, 'coef_'):
print("Percentage of non zeros coef: %f"
% (np.mean(clf.coef_ != 0) * 100))
print("Predicting the outcomes of the testing set")
t0 = time()
pred = clf.predict(X_test)
print("done in %fs" % (time() - t0))
print("Classification report on test set for classifier:")
print(clf)
print()
print(classification_report(y_test, pred,
target_names=news_test.target_names))
cm = confusion_matrix(y_test, pred)
print("Confusion matrix:")
print(cm)
# Show confusion matrix
pl.matshow(cm)
pl.title('Confusion matrix of the %s classifier' % name)
pl.colorbar()
print("Testbenching a linear classifier...")
parameters = {
'loss': 'hinge',
'penalty': 'l2',
'n_iter': 50,
'alpha': 0.00001,
'fit_intercept': True,
}
benchmark(SGDClassifier, parameters, 'SGD')
print("Testbenching a MultinomialNB classifier...")
parameters = {'alpha': 0.01}
benchmark(MultinomialNB, parameters, 'MultinomialNB')
pl.show()
| bsd-3-clause |
Yingmin-Li/keras | tests/auto/test_regularizers.py | 75 | 2372 | import unittest
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.models import Sequential
from keras.layers.core import Merge, Dense, Activation, Flatten, ActivityRegularization
from keras.layers.embeddings import Embedding
from keras.datasets import mnist
from keras.utils import np_utils
from keras import regularizers
nb_classes = 10
batch_size = 128
nb_epoch = 5
weighted_class = 9
standard_weight = 1
high_weight = 5
max_train_samples = 5000
max_test_samples = 1000
# the data, shuffled and split between tran and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)[:max_train_samples]
X_test = X_test.reshape(10000, 784)[:max_test_samples]
X_train = X_train.astype("float32") / 255
X_test = X_test.astype("float32") / 255
# convert class vectors to binary class matrices
y_train = y_train[:max_train_samples]
y_test = y_test[:max_test_samples]
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
test_ids = np.where(y_test == np.array(weighted_class))[0]
def create_model(weight_reg=None, activity_reg=None):
model = Sequential()
model.add(Dense(784, 50))
model.add(Activation('relu'))
model.add(Dense(50, 10, W_regularizer=weight_reg, activity_regularizer=activity_reg))
model.add(Activation('softmax'))
return model
class TestRegularizers(unittest.TestCase):
def test_W_reg(self):
for reg in [regularizers.identity(), regularizers.l1(), regularizers.l2(), regularizers.l1l2()]:
model = create_model(weight_reg=reg)
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
model.evaluate(X_test[test_ids, :], Y_test[test_ids, :], verbose=0)
def test_A_reg(self):
for reg in [regularizers.activity_l1(), regularizers.activity_l2()]:
model = create_model(activity_reg=reg)
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
model.evaluate(X_test[test_ids, :], Y_test[test_ids, :], verbose=0)
if __name__ == '__main__':
print('Test weight and activity regularizers')
unittest.main()
| mit |
JoaquimPatriarca/senpy-for-gis | setup.py | 1 | 2796 | #from distutils.core import setup
from setuptools import setup
setup(
name='gasp',
version='0.0.1',
description='GASP',
url='https://github.com/JoaquimPatriarca/GIS-SENPY',
author='Joaquim Patriarca',
author_email='[email protected]',
license='GPL',
packages=[
# Main module
'gasp',
'gasp.arcgis', 'gasp.arcgis._3D', 'gasp.arcgis._3D.manage',
'gasp.arcgis.analysis', 'gasp.arcgis.glg', 'gasp.arcgis.manage',
'gasp.arcgis.manage.rst', 'gasp.arcgis.maps', 'gasp.arcgis.netanlst',
'gasp.arcgis.spatial_anlst', 'gasp.arcgis.statistics',
'gasp.djg', 'gasp.djg.files', 'gasp.djg.gis', 'gasp.djg.models',
'gasp.fromapi', 'gasp.fromapi.facebook', 'gasp.fromapi.glg',
'gasp.fromapi.weather', 'gasp.fromdbf', 'gasp.fromrst', 'gasp.fromshp',
'gasp.fromtxt', 'gasp.fromxls',
'gasp.gdal', 'gasp.gdal.analysis', 'gasp.gdal.analysis.prox',
'gasp.gdal.glg', 'gasp.gdal.img', 'gasp.gdal.manage', 'gasp.gdal.properties',
'gasp.gdal.spatial_anlst', 'gasp.gdal.spatial_lite', 'gasp.gdal.sqdb',
'gasp.gdal.statistics', 'gasp.geopnd', 'gasp.geopnd.manage',
'gasp.geosrv', 'gasp.geosrv.stores', 'gasp.geosrv.styles',
'gasp.grs', 'gasp.grs.i', 'gasp.grs.r', 'gasp.grs.v',
'gasp.ine', 'gasp.landslides', 'gasp.mapseries', 'gasp.networks',
'gasp.odbc', 'gasp.ogc', 'gasp.ogc.sld',
'gasp.osm2lulc', 'gasp.osm2lulc.rules', 'gasp.oss',
'gasp.pgsql', 'gasp.pgsql.q', 'gasp.pgsql.tables',
'gasp.pnd', 'gasp.postgis', 'gasp.qgis', 'gasp.saga',
'gasp.saga.analysis', 'gasp.saga.spatial_anlst',
'gasp.shply', 'gasp.sklearn', 'gasp.sqlite', 'gasp.terrain',
'gasp.toascii', 'gasp.todbf', 'gasp.toemme', 'gasp.tojson', 'gasp.toosm',
'gasp.torst', 'gasp.toshp', 'gasp.tosqlite',
'gasp.totxt', 'gasp.toxls', 'gasp.transportation',
'gasp.transportation.gtfs', 'gasp.xls', 'gasp.xls.adv'
],
install_requires=[
'psycopg2==2.7',
'click==6.7', 'click-plugins==1.0.3', 'cligj==0.4.0',
'django==1.11.8', 'django-widget-tweaks==1.4.1',
'numpy==1.14.5',
'sqlalchemy==1.2.8', 'geoalchemy2==0.3.0',
'shapely==1.6.4',
'fiona==1.7.11', 'pyproj==1.9.5.1',
'pandas==0.18.1', 'geopandas==0.3.0',
'xlrd==1.0',
'xlwt==1.1.2',
#'pygdal==1.11.3.3',
'netCDF4==1.2.7',
'polyline==1.3.2',
'google-api-python-client',
'unidecode',
'flickrapi==2.1.2',
'six==1.10.0',
'requests==2.11.1',
'requests_oauthlib==0.7.0',
'requests_toolbelt==0.7.0',
'tweepy==3.6.0',
'pysocks==1.6.7'
],
include_package_data=True
)
| gpl-3.0 |
rflamary/POT | examples/plot_WDA.py | 4 | 3084 | # -*- coding: utf-8 -*-
"""
=================================
Wasserstein Discriminant Analysis
=================================
This example illustrate the use of WDA as proposed in [11].
[11] Flamary, R., Cuturi, M., Courty, N., & Rakotomamonjy, A. (2016).
Wasserstein Discriminant Analysis.
"""
# Author: Remi Flamary <[email protected]>
#
# License: MIT License
import numpy as np
import matplotlib.pylab as pl
from ot.dr import wda, fda
##############################################################################
# Generate data
# -------------
#%% parameters
n = 1000 # nb samples in source and target datasets
nz = 0.2
# generate circle dataset
t = np.random.rand(n) * 2 * np.pi
ys = np.floor((np.arange(n) * 1.0 / n * 3)) + 1
xs = np.concatenate(
(np.cos(t).reshape((-1, 1)), np.sin(t).reshape((-1, 1))), 1)
xs = xs * ys.reshape(-1, 1) + nz * np.random.randn(n, 2)
t = np.random.rand(n) * 2 * np.pi
yt = np.floor((np.arange(n) * 1.0 / n * 3)) + 1
xt = np.concatenate(
(np.cos(t).reshape((-1, 1)), np.sin(t).reshape((-1, 1))), 1)
xt = xt * yt.reshape(-1, 1) + nz * np.random.randn(n, 2)
nbnoise = 8
xs = np.hstack((xs, np.random.randn(n, nbnoise)))
xt = np.hstack((xt, np.random.randn(n, nbnoise)))
##############################################################################
# Plot data
# ---------
#%% plot samples
pl.figure(1, figsize=(6.4, 3.5))
pl.subplot(1, 2, 1)
pl.scatter(xt[:, 0], xt[:, 1], c=ys, marker='+', label='Source samples')
pl.legend(loc=0)
pl.title('Discriminant dimensions')
pl.subplot(1, 2, 2)
pl.scatter(xt[:, 2], xt[:, 3], c=ys, marker='+', label='Source samples')
pl.legend(loc=0)
pl.title('Other dimensions')
pl.tight_layout()
##############################################################################
# Compute Fisher Discriminant Analysis
# ------------------------------------
#%% Compute FDA
p = 2
Pfda, projfda = fda(xs, ys, p)
##############################################################################
# Compute Wasserstein Discriminant Analysis
# -----------------------------------------
#%% Compute WDA
p = 2
reg = 1e0
k = 10
maxiter = 100
Pwda, projwda = wda(xs, ys, p, reg, k, maxiter=maxiter)
##############################################################################
# Plot 2D projections
# -------------------
#%% plot samples
xsp = projfda(xs)
xtp = projfda(xt)
xspw = projwda(xs)
xtpw = projwda(xt)
pl.figure(2)
pl.subplot(2, 2, 1)
pl.scatter(xsp[:, 0], xsp[:, 1], c=ys, marker='+', label='Projected samples')
pl.legend(loc=0)
pl.title('Projected training samples FDA')
pl.subplot(2, 2, 2)
pl.scatter(xtp[:, 0], xtp[:, 1], c=ys, marker='+', label='Projected samples')
pl.legend(loc=0)
pl.title('Projected test samples FDA')
pl.subplot(2, 2, 3)
pl.scatter(xspw[:, 0], xspw[:, 1], c=ys, marker='+', label='Projected samples')
pl.legend(loc=0)
pl.title('Projected training samples WDA')
pl.subplot(2, 2, 4)
pl.scatter(xtpw[:, 0], xtpw[:, 1], c=ys, marker='+', label='Projected samples')
pl.legend(loc=0)
pl.title('Projected test samples WDA')
pl.tight_layout()
pl.show()
| mit |
schets/scikit-learn | examples/manifold/plot_lle_digits.py | 180 | 8510 | """
=============================================================================
Manifold learning on handwritten digits: Locally Linear Embedding, Isomap...
=============================================================================
An illustration of various embeddings on the digits dataset.
The RandomTreesEmbedding, from the :mod:`sklearn.ensemble` module, is not
technically a manifold embedding method, as it learn a high-dimensional
representation on which we apply a dimensionality reduction method.
However, it is often useful to cast a dataset into a representation in
which the classes are linearly-separable.
t-SNE will be initialized with the embedding that is generated by PCA in
this example, which is not the default setting. It ensures global stability
of the embedding, i.e., the embedding does not depend on random
initialization.
"""
# Authors: Fabian Pedregosa <[email protected]>
# Olivier Grisel <[email protected]>
# Mathieu Blondel <[email protected]>
# Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import (manifold, datasets, decomposition, ensemble, lda,
random_projection)
digits = datasets.load_digits(n_class=6)
X = digits.data
y = digits.target
n_samples, n_features = X.shape
n_neighbors = 30
#----------------------------------------------------------------------
# Scale and visualize the embedding vectors
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(digits.data.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 4e-3:
# don't show points that are too close
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
#----------------------------------------------------------------------
# Plot images of the digits
n_img_per_row = 20
img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
for i in range(n_img_per_row):
ix = 10 * i + 1
for j in range(n_img_per_row):
iy = 10 * j + 1
img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))
plt.imshow(img, cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('A selection from the 64-dimensional digits dataset')
#----------------------------------------------------------------------
# Random 2D projection using a random unitary matrix
print("Computing random projection")
rp = random_projection.SparseRandomProjection(n_components=2, random_state=42)
X_projected = rp.fit_transform(X)
plot_embedding(X_projected, "Random Projection of the digits")
#----------------------------------------------------------------------
# Projection on to the first 2 principal components
print("Computing PCA projection")
t0 = time()
X_pca = decomposition.TruncatedSVD(n_components=2).fit_transform(X)
plot_embedding(X_pca,
"Principal Components projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Projection on to the first 2 linear discriminant components
print("Computing LDA projection")
X2 = X.copy()
X2.flat[::X.shape[1] + 1] += 0.01 # Make X invertible
t0 = time()
X_lda = lda.LDA(n_components=2).fit_transform(X2, y)
plot_embedding(X_lda,
"Linear Discriminant projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Isomap projection of the digits dataset
print("Computing Isomap embedding")
t0 = time()
X_iso = manifold.Isomap(n_neighbors, n_components=2).fit_transform(X)
print("Done.")
plot_embedding(X_iso,
"Isomap projection of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Locally linear embedding of the digits dataset
print("Computing LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='standard')
t0 = time()
X_lle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_lle,
"Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Modified Locally linear embedding of the digits dataset
print("Computing modified LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='modified')
t0 = time()
X_mlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_mlle,
"Modified Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# HLLE embedding of the digits dataset
print("Computing Hessian LLE embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='hessian')
t0 = time()
X_hlle = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_hlle,
"Hessian Locally Linear Embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# LTSA embedding of the digits dataset
print("Computing LTSA embedding")
clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2,
method='ltsa')
t0 = time()
X_ltsa = clf.fit_transform(X)
print("Done. Reconstruction error: %g" % clf.reconstruction_error_)
plot_embedding(X_ltsa,
"Local Tangent Space Alignment of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# MDS embedding of the digits dataset
print("Computing MDS embedding")
clf = manifold.MDS(n_components=2, n_init=1, max_iter=100)
t0 = time()
X_mds = clf.fit_transform(X)
print("Done. Stress: %f" % clf.stress_)
plot_embedding(X_mds,
"MDS embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Random Trees embedding of the digits dataset
print("Computing Totally Random Trees embedding")
hasher = ensemble.RandomTreesEmbedding(n_estimators=200, random_state=0,
max_depth=5)
t0 = time()
X_transformed = hasher.fit_transform(X)
pca = decomposition.TruncatedSVD(n_components=2)
X_reduced = pca.fit_transform(X_transformed)
plot_embedding(X_reduced,
"Random forest embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# Spectral embedding of the digits dataset
print("Computing Spectral embedding")
embedder = manifold.SpectralEmbedding(n_components=2, random_state=0,
eigen_solver="arpack")
t0 = time()
X_se = embedder.fit_transform(X)
plot_embedding(X_se,
"Spectral embedding of the digits (time %.2fs)" %
(time() - t0))
#----------------------------------------------------------------------
# t-SNE embedding of the digits dataset
print("Computing t-SNE embedding")
tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
t0 = time()
X_tsne = tsne.fit_transform(X)
plot_embedding(X_tsne,
"t-SNE embedding of the digits (time %.2fs)" %
(time() - t0))
plt.show()
| bsd-3-clause |
franblas/facialrecoChallenge | evaluate.py | 1 | 1155 | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 16 17:45:25 2015
@author: Paco
"""
import matplotlib.pyplot as plt
import bisect
import numpy as np
from sklearn import metrics
class Evaluate(object):
_fpr = None
_tpr = None
def __init__(self): pass
def evaluation(self,pairs_label,dist):
fpr, tpr, thresholds = metrics.roc_curve(pairs_label, -dist)
self._fpr = fpr
self._tpr = tpr
def display_roc(self):
plt.clf()
plt.plot(self._fpr, self._tpr, label='ROC curve')
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.0])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.show()
def easy_score(self):
easy_score = 1.0 - self._tpr[bisect.bisect(self._fpr, 0.001) - 1]
print 'Easy score : '+str(easy_score)
return easy_score
def hard_score(self):
idx = (np.abs(self._fpr + self._tpr - 1.)).argmin()
hard_score = (self._fpr[idx]+(1-self._tpr[idx]))/2
print 'Hard score : '+str(hard_score)
return hard_score
| mit |
herilalaina/scikit-learn | sklearn/_build_utils/__init__.py | 76 | 2644 | """
Utilities useful during the build.
"""
# author: Andy Mueller, Gael Varoquaux
# license: BSD
from __future__ import division, print_function, absolute_import
import os
from distutils.version import LooseVersion
from numpy.distutils.system_info import get_info
DEFAULT_ROOT = 'sklearn'
CYTHON_MIN_VERSION = '0.23'
def get_blas_info():
def atlas_not_found(blas_info_):
def_macros = blas_info.get('define_macros', [])
for x in def_macros:
if x[0] == "NO_ATLAS_INFO":
# if x[1] != 1 we should have lapack
# how do we do that now?
return True
if x[0] == "ATLAS_INFO":
if "None" in x[1]:
# this one turned up on FreeBSD
return True
return False
blas_info = get_info('blas_opt', 0)
if (not blas_info) or atlas_not_found(blas_info):
cblas_libs = ['cblas']
blas_info.pop('libraries', None)
else:
cblas_libs = blas_info.pop('libraries', [])
return cblas_libs, blas_info
def build_from_c_and_cpp_files(extensions):
"""Modify the extensions to build from the .c and .cpp files.
This is useful for releases, this way cython is not required to
run python setup.py install.
"""
for extension in extensions:
sources = []
for sfile in extension.sources:
path, ext = os.path.splitext(sfile)
if ext in ('.pyx', '.py'):
if extension.language == 'c++':
ext = '.cpp'
else:
ext = '.c'
sfile = path + ext
sources.append(sfile)
extension.sources = sources
def maybe_cythonize_extensions(top_path, config):
"""Tweaks for building extensions between release and development mode."""
is_release = os.path.exists(os.path.join(top_path, 'PKG-INFO'))
if is_release:
build_from_c_and_cpp_files(config.ext_modules)
else:
message = ('Please install cython with a version >= {0} in order '
'to build a scikit-learn development version.').format(
CYTHON_MIN_VERSION)
try:
import Cython
if LooseVersion(Cython.__version__) < CYTHON_MIN_VERSION:
message += ' Your version of Cython was {0}.'.format(
Cython.__version__)
raise ValueError(message)
from Cython.Build import cythonize
except ImportError as exc:
exc.args += (message,)
raise
config.ext_modules = cythonize(config.ext_modules)
| bsd-3-clause |
xzturn/tensorflow | tensorflow/python/data/experimental/kernel_tests/parallel_interleave_test.py | 9 | 30715 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.experimental.parallel_interleave()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import threading
import time
from absl.testing import parameterized
import numpy as np
from six.moves import zip_longest
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.data.experimental.ops import testing
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
class ParallelInterleaveTest(test_base.DatasetTestBase, parameterized.TestCase):
def setUp(self):
self.error = None
self.repeat_count = 2
# Set up threading events used to sequence when items are produced that
# are subsequently interleaved. These events allow us to deterministically
# simulate slowdowns and force sloppiness.
self.read_coordination_events = {}
self.write_coordination_events = {}
# input values [4, 5, 6] are the common case for the tests; set defaults
for i in range(4, 7):
self.read_coordination_events[i] = threading.Semaphore(0)
self.write_coordination_events[i] = threading.Event()
def dataset_fn(self, input_values, cycle_length, block_length, sloppy,
buffer_output_elements, prefetch_input_elements):
def map_py_fn(x):
self.write_coordination_events[x].wait()
self.write_coordination_events[x].clear()
self.read_coordination_events[x].release()
if self.error:
err = self.error
self.error = None
raise err # pylint: disable=raising-bad-type
return x * x
def map_fn(x):
return script_ops.py_func(map_py_fn, [x], x.dtype)
def interleave_fn(x):
dataset = dataset_ops.Dataset.from_tensors(x)
dataset = dataset.repeat(x)
return dataset.map(map_fn)
return dataset_ops.Dataset.from_tensor_slices(input_values).repeat(
self.repeat_count).apply(
interleave_ops.parallel_interleave(
interleave_fn, cycle_length, block_length, sloppy,
buffer_output_elements, prefetch_input_elements))
def _interleave(self, lists, cycle_length, block_length):
"""Python implementation of interleave used for testing."""
num_open = 0
# `all_iterators` acts as a queue of iterators over each element of `lists`.
all_iterators = [iter(l) for l in lists]
# `open_iterators` are the iterators whose elements are currently being
# interleaved.
open_iterators = []
for i in range(cycle_length):
if all_iterators:
open_iterators.append(all_iterators.pop(0))
num_open += 1
else:
open_iterators.append(None)
while num_open or all_iterators:
for i in range(cycle_length):
if open_iterators[i] is None:
if all_iterators:
open_iterators[i] = all_iterators.pop(0)
num_open += 1
else:
continue
for _ in range(block_length):
try:
yield next(open_iterators[i])
except StopIteration:
open_iterators[i] = None
num_open -= 1
break
@combinations.generate(
combinations.times(
combinations.combine(
input_lists=[[[4, 4, 4, 4], [5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6],
[4, 4, 4, 4], [5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]]],
expected_elements=[[
4, 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 4, 4, 4, 4, 5, 5,
5, 5, 5, 6, 6, 6, 6, 6, 6
]],
cycle_length=1,
block_length=1) +
combinations.combine(
input_lists=[[[4, 4, 4, 4], [5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6],
[4, 4, 4, 4], [5, 5, 5, 5, 5], [6, 6, 6, 6, 6, 6]]],
expected_elements=[[
4, 5, 4, 5, 4, 5, 4, 5, 5, 6, 6, 4, 6, 4, 6, 4, 6, 4, 6, 5, 6,
5, 6, 5, 6, 5, 6, 5, 6, 6
]],
cycle_length=2,
block_length=1) + combinations.combine(
input_lists=[[[4] * 4, [5] * 5, [6] * 6] * 2],
expected_elements=[[
4, 4, 5, 5, 4, 4, 5, 5, 5, 6, 6, 4, 4, 6, 6, 4, 4, 6, 6,
5, 5, 6, 6, 5, 5, 6, 6, 5, 6, 6
]],
cycle_length=2,
block_length=2) +
combinations.combine(
input_lists=[[[4, 4, 4, 4], [], [6, 6, 6, 6, 6, 6], [4, 4, 4, 4],
[], [6, 6, 6, 6, 6, 6]]],
expected_elements=[[
4, 4, 6, 4, 6, 4, 6, 6, 4, 6, 4, 6, 4, 4, 6, 6, 6, 6, 6, 6
]],
cycle_length=2,
block_length=1)))
def testPythonImplementation(self, input_lists, expected_elements,
cycle_length, block_length):
for index, (expected, produced) in enumerate(
zip_longest(expected_elements,
self._interleave(input_lists, cycle_length, block_length))):
self.assertEqual(expected, produced, "Values differ at %s. %s != %s" %
(index, expected, produced))
def _clear_coordination_events(self):
for i in range(4, 7):
self.read_coordination_events[i] = threading.Semaphore(0)
self.write_coordination_events[i].clear()
def _allow_all_map_threads(self):
for i in range(4, 7):
self.write_coordination_events[i].set()
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
sloppy=[False, True], prefetch_input_elements=[0, 1])))
def testSingleThreaded(self, sloppy, prefetch_input_elements):
# cycle_length=1,block_length=1 acts like `Dataset.interleave()` and
# `Dataset.flat_map()` and is single-threaded. No synchronization required.
self.skipTest("b/131722904")
self._clear_coordination_events()
next_element = self.getNext(
self.dataset_fn(
input_values=np.int64([4, 5, 6]),
cycle_length=1,
block_length=1,
sloppy=sloppy,
buffer_output_elements=1,
prefetch_input_elements=prefetch_input_elements))
for expected_element in self._interleave(
[[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 1, 1):
self.write_coordination_events[expected_element].set()
self.assertEqual(expected_element * expected_element,
self.evaluate(next_element()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
@combinations.generate(test_base.default_test_combinations())
def testSingleThreadedRagged(self):
# Tests a sequence with wildly different elements per iterator.
self.skipTest("b/131722904")
self._clear_coordination_events()
next_element = self.getNext(
self.dataset_fn(
input_values=np.int64([3, 7, 4]),
cycle_length=2,
block_length=1,
sloppy=False,
buffer_output_elements=1,
prefetch_input_elements=1))
# Add coordination values for 3 and 7
self.read_coordination_events[3] = threading.Semaphore(0)
self.write_coordination_events[3] = threading.Event()
self.read_coordination_events[7] = threading.Semaphore(0)
self.write_coordination_events[7] = threading.Event()
for expected_element in self._interleave(
[[3] * 3, [7] * 7, [4] * 4] * self.repeat_count, 2, 1):
self.write_coordination_events[expected_element].set()
output = self.evaluate(next_element())
self.assertEqual(expected_element * expected_element, output)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(sloppy=[False, True])))
def testTwoThreadsNoContention(self, sloppy):
# num_threads > 1.
# Explicit coordination should result in `Dataset.interleave()` behavior
self.skipTest("b/131722904")
self._clear_coordination_events()
done_first_event = False
next_element = self.getNext(
self.dataset_fn(
input_values=np.int64([4, 5, 6]),
cycle_length=2,
block_length=1,
sloppy=sloppy,
buffer_output_elements=1,
prefetch_input_elements=1))
for i, expected_element in enumerate(
self._interleave([[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 2,
1)):
self.write_coordination_events[expected_element].set()
if done_first_event: # First event starts the worker threads.
self.read_coordination_events[expected_element].acquire()
actual_element = self.evaluate(next_element())
if not done_first_event:
self.read_coordination_events[expected_element].acquire()
done_first_event = True
self.assertEqual(
expected_element * expected_element, actual_element,
"At index %s: %s expected, got: %s" % (i, expected_element,
actual_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(sloppy=[False, True])))
def testTwoThreadsNoContentionWithRaces(self, sloppy):
"""Tests where all the workers race in producing elements.
Note: this is in contrast with the previous test which carefully sequences
the execution of the map functions.
Args:
sloppy: Whether to be sloppy or not.
"""
self.skipTest("b/131722904")
self._clear_coordination_events()
done_first_event = False
next_element = self.getNext(
self.dataset_fn(
input_values=np.int64([4, 5, 6]),
cycle_length=2,
block_length=1,
sloppy=sloppy,
buffer_output_elements=1,
prefetch_input_elements=1))
for i, expected_element in enumerate(
self._interleave([[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 2,
1)):
if done_first_event: # First event starts the worker threads.
self._allow_all_map_threads()
self.read_coordination_events[expected_element].acquire()
else:
self.write_coordination_events[expected_element].set()
time.sleep(0.5) # Sleep to consistently "avoid" the race condition.
actual_element = self.evaluate(next_element())
if not done_first_event:
done_first_event = True
self.assertTrue(
self.read_coordination_events[expected_element].acquire(False))
self.assertEqual(
expected_element * expected_element, actual_element,
"At index %s: %s expected, got: %s" % (i, expected_element,
actual_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(sloppy=[False, True])))
def testTwoThreadsNoContentionBlockLength(self, sloppy):
# num_threads > 1.
# Explicit coordination should result in `Dataset.interleave()` behavior
self.skipTest("b/131722904")
self._clear_coordination_events()
done_first_event = False
next_element = self.getNext(
self.dataset_fn(
input_values=np.int64([4, 5, 6]),
cycle_length=2,
block_length=2,
sloppy=sloppy,
buffer_output_elements=1,
prefetch_input_elements=1))
for i, expected_element in enumerate(
self._interleave([[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 2,
2)):
self.write_coordination_events[expected_element].set()
if done_first_event: # First event starts the worker threads.
self.read_coordination_events[expected_element].acquire()
actual_element = self.evaluate(next_element())
if not done_first_event:
done_first_event = True
self.read_coordination_events[expected_element].acquire()
self.assertEqual(
expected_element * expected_element, actual_element,
"At index %s: %s expected, got: %s" % (i, expected_element,
actual_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(sloppy=[False, True])))
def testTwoThreadsNoContentionWithRacesAndBlocking(self, sloppy):
"""Tests where all the workers race in producing elements.
Note: this is in contrast with the previous test which carefully sequences
the execution of the map functions.
Args:
sloppy: Whether to be sloppy or not.
"""
self.skipTest("b/131722904")
self._clear_coordination_events()
done_first_event = False
next_element = self.getNext(
self.dataset_fn(
input_values=np.int64([4, 5, 6]),
cycle_length=2,
block_length=2,
sloppy=sloppy,
buffer_output_elements=1,
prefetch_input_elements=1))
for i, expected_element in enumerate(
self._interleave([[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 2,
2)):
if done_first_event: # First event starts the worker threads.
self._allow_all_map_threads()
self.read_coordination_events[expected_element].acquire()
else:
self.write_coordination_events[expected_element].set()
time.sleep(0.5) # Sleep to consistently "avoid" the race condition.
actual_element = self.evaluate(next_element())
if not done_first_event:
done_first_event = True
self.assertTrue(
self.read_coordination_events[expected_element].acquire(False))
self.assertEqual(
expected_element * expected_element, actual_element,
"At index %s: %s expected, got: %s" % (i, expected_element,
actual_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(sloppy=[False, True])))
def testEmptyInput(self, sloppy):
# Empty input.
self._clear_coordination_events()
next_element = self.getNext(
self.dataset_fn(
input_values=np.int64([]),
cycle_length=2,
block_length=3,
sloppy=sloppy,
buffer_output_elements=1,
prefetch_input_elements=0))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(sloppy=[False, True])))
def _testNonEmptyInputIntoEmptyOutputs(self, sloppy):
# Non-empty input leading to empty output.
self._clear_coordination_events()
next_element = self.getNext(
self.dataset_fn(
input_values=np.int64([0, 0, 0]),
cycle_length=2,
block_length=3,
sloppy=sloppy,
buffer_output_elements=1,
prefetch_input_elements=0))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
sloppy=[False, True], prefetch_input_elements=[1, 0])))
def testPartiallyEmptyOutputs(self, sloppy, prefetch_input_elements):
race_indices = {2, 8, 14} # Sequence points when sloppy mode has race conds
# Mixture of non-empty and empty interleaved datasets.
self.skipTest("b/131722904")
self._clear_coordination_events()
done_first_event = False
next_element = self.getNext(
self.dataset_fn(
input_values=np.int64([4, 0, 6]),
cycle_length=2,
block_length=1,
sloppy=sloppy,
buffer_output_elements=1,
prefetch_input_elements=prefetch_input_elements))
for i, expected_element in enumerate(
self._interleave([[4] * 4, [], [6] * 6] * self.repeat_count, 2, 1)):
self.write_coordination_events[expected_element].set()
# First event starts the worker threads. Additionally, when running the
# sloppy case with prefetch_input_elements=0, we get stuck if we wait
# for the read coordination event for certain event orderings in the
# presence of finishing iterators.
if done_first_event and not (sloppy and (i in race_indices)):
self.read_coordination_events[expected_element].acquire()
actual_element = self.evaluate(next_element())
if not done_first_event or (sloppy and (i in race_indices)):
done_first_event = True
self.read_coordination_events[expected_element].acquire()
self.assertEqual(
expected_element * expected_element, actual_element,
"At index %s: %s expected, got: %s" % (i, expected_element,
actual_element))
@combinations.generate(test_base.default_test_combinations())
def testDelayedOutputSloppy(self):
# Explicitly control the sequence of events to ensure we correctly avoid
# head-of-line blocking.
self.skipTest("b/131722904")
self._clear_coordination_events()
next_element = self.getNext(
self.dataset_fn(
input_values=np.int64([4, 5, 6]),
cycle_length=2,
block_length=1,
sloppy=True,
buffer_output_elements=1,
prefetch_input_elements=0))
mis_ordering = [
4, 4, 5, 4, 5, 5, 4, 5, 6, 6, 6, 5, 4, 4, 6, 6, 4, 4, 6, 5, 6, 6, 6, 6,
5, 5, 5, 5, 6, 6
]
for element in mis_ordering:
self.write_coordination_events[element].set()
self.assertEqual(element * element, self.evaluate(next_element()))
self.assertTrue(self.read_coordination_events[element].acquire(False))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
@combinations.generate(test_base.default_test_combinations())
def testBlockLengthWithContentionSloppy(self):
self.skipTest("b/131722904")
self._clear_coordination_events()
done_first_event = False
next_element = self.getNext(
self.dataset_fn(
input_values=np.int64([4, 5, 6]),
cycle_length=2,
block_length=1,
sloppy=True,
buffer_output_elements=1,
prefetch_input_elements=1))
# Test against a generating sequence that differs from the uncontended
# case, in order to prove sloppy correctness.
for i, expected_element in enumerate(
self._interleave(
[[4] * 4, [5] * 5, [6] * 6] * self.repeat_count,
cycle_length=2,
block_length=3)):
self.write_coordination_events[expected_element].set()
if done_first_event: # First event starts the worker threads.
self.read_coordination_events[expected_element].acquire()
actual_element = self.evaluate(next_element())
if not done_first_event:
self.read_coordination_events[expected_element].acquire()
done_first_event = True
self.assertEqual(
expected_element * expected_element, actual_element,
"At index %s: %s expected, got: %s" % (i, expected_element,
actual_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(sloppy=[False, True])))
def testEarlyExit(self, sloppy):
# Exiting without consuming all input should not block
self.skipTest("b/131722904")
self._clear_coordination_events()
next_element = self.getNext(
self.dataset_fn(
input_values=np.int64([4, 5, 6]),
cycle_length=3,
block_length=2,
sloppy=sloppy,
buffer_output_elements=1,
prefetch_input_elements=0))
for i in range(4, 7):
self.write_coordination_events[i].set()
elem = self.evaluate(next_element()) # Start all workers
# Allow the one successful worker to progress beyond the py_func again.
elem = int(math.sqrt(elem))
self.write_coordination_events[elem].set()
self.read_coordination_events[elem].acquire()
# Allow the prefetch to succeed
for i in range(4, 7):
self.read_coordination_events[i].acquire()
self.write_coordination_events[i].set()
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(sloppy=[False, True])))
def testTooManyReaders(self, sloppy=False):
def interleave_fn(x):
dataset = dataset_ops.Dataset.from_tensors(x)
dataset = dataset.repeat(math_ops.cast(x, dtype=dtypes.int64))
return dataset
dataset = dataset_ops.Dataset.from_tensor_slices([4, 5, 6])
dataset = dataset.repeat(self.repeat_count)
dataset = dataset.apply(
interleave_ops.parallel_interleave(
interleave_fn, cycle_length=16, block_length=2, sloppy=sloppy))
get_next = self.getNext(dataset)
output_values = []
for _ in range(30):
output_values.append(self.evaluate(get_next()))
expected_values = self._interleave(
[[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 1, 2)
self.assertItemsEqual(output_values, expected_values)
@combinations.generate(test_base.default_test_combinations())
def testSparse(self):
def _map_fn(i):
return sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 1]], values=(i * [1, -1]), dense_shape=[2, 2])
def _interleave_fn(x):
return dataset_ops.Dataset.from_tensor_slices(
sparse_ops.sparse_to_dense(x.indices, x.dense_shape, x.values))
dataset = dataset_ops.Dataset.range(10).map(_map_fn).apply(
interleave_ops.parallel_interleave(_interleave_fn, cycle_length=1))
get_next = self.getNext(dataset)
for i in range(10):
for j in range(2):
expected = [i, 0] if j % 2 == 0 else [0, -i]
self.assertAllEqual(expected, self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(test_base.default_test_combinations())
def testErrorsInOutputFn(self):
self.skipTest("b/131722904")
self._clear_coordination_events()
next_element = self.getNext(
self.dataset_fn(
input_values=np.int64([4, 5, 6]),
cycle_length=2,
block_length=1,
sloppy=False,
buffer_output_elements=1,
prefetch_input_elements=0))
except_on_element_indices = set([3])
for i, expected_element in enumerate(
self._interleave([[4] * 4, [5] * 5, [6] * 6] * self.repeat_count, 2,
1)):
if i in except_on_element_indices:
self.error = ValueError()
self.write_coordination_events[expected_element].set()
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(next_element())
else:
self.write_coordination_events[expected_element].set()
actual_element = self.evaluate(next_element())
self.assertEqual(
expected_element * expected_element, actual_element,
"At index %s: %s expected, got: %s" % (i, expected_element,
actual_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
@combinations.generate(test_base.default_test_combinations())
def testErrorsInInputFn(self):
def map_py_fn(x):
if x == 5:
raise ValueError()
return x
def map_fn(x):
return script_ops.py_func(map_py_fn, [x], x.dtype)
def interleave_fn(x):
dataset = dataset_ops.Dataset.from_tensors(x)
dataset = dataset.repeat(x)
return dataset
def dataset_fn(input_values, cycle_length, block_length, sloppy,
buffer_output_elements, prefetch_input_elements):
return dataset_ops.Dataset.from_tensor_slices(input_values).map(
map_fn).repeat(self.repeat_count).apply(
interleave_ops.parallel_interleave(
interleave_fn, cycle_length, block_length, sloppy,
buffer_output_elements, prefetch_input_elements))
next_element = self.getNext(
dataset_fn(
input_values=np.int64([4, 5, 6]),
cycle_length=2,
block_length=1,
sloppy=False,
buffer_output_elements=1,
prefetch_input_elements=0))
for i, expected_element in enumerate(
self._interleave([[4] * 4, [5], [6] * 6] * self.repeat_count, 2, 1)):
if expected_element == 5:
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(next_element())
else:
actual_element = self.evaluate(next_element())
self.assertEqual(
expected_element, actual_element,
"At index %s: %s expected, got: %s" % (i, expected_element,
actual_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
@combinations.generate(test_base.default_test_combinations())
def testErrorsInInterleaveFn(self):
def map_py_fn(x):
if x == 5:
raise ValueError()
return x
def interleave_fn(x):
dataset = dataset_ops.Dataset.from_tensors(x)
y = script_ops.py_func(map_py_fn, [x], x.dtype)
dataset = dataset.repeat(y)
return dataset
def dataset_fn(input_values, cycle_length, block_length, sloppy,
buffer_output_elements, prefetch_input_elements):
return dataset_ops.Dataset.from_tensor_slices(input_values).repeat(
self.repeat_count).apply(
interleave_ops.parallel_interleave(
interleave_fn, cycle_length, block_length, sloppy,
buffer_output_elements, prefetch_input_elements))
next_element = self.getNext(
dataset_fn(
input_values=np.int64([4, 5, 6]),
cycle_length=2,
block_length=1,
sloppy=False,
buffer_output_elements=1,
prefetch_input_elements=0))
for i, expected_element in enumerate(
self._interleave([[4] * 4, [5], [6] * 6] * self.repeat_count, 2, 1)):
if expected_element == 5:
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(next_element())
else:
actual_element = self.evaluate(next_element())
self.assertEqual(
expected_element, actual_element,
"At index %s: %s expected, got: %s" % (i, expected_element,
actual_element))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
@combinations.generate(test_base.default_test_combinations())
def testShutdownRace(self):
dataset = dataset_ops.Dataset.range(20)
map_fn = lambda x: dataset_ops.Dataset.range(20 * x, 20 * (x + 1))
dataset = dataset.apply(
interleave_ops.parallel_interleave(
map_fn,
cycle_length=3,
sloppy=False,
buffer_output_elements=1,
prefetch_input_elements=0))
dataset = dataset.batch(32)
results = []
for _ in range(2):
elements = []
next_element = self.getNext(dataset)
try:
while True:
elements.extend(self.evaluate(next_element()))
except errors.OutOfRangeError:
pass
results.append(elements)
self.assertAllEqual(results[0], results[1])
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
sloppy=[None, True, False], global_determinism=[True, False])))
def testDeterminismConfiguration(self, sloppy, global_determinism):
if sloppy is None:
expect_determinism = global_determinism
else:
expect_determinism = not sloppy
elements = list(range(1000))
def dataset_fn(delay_ms):
def interleave_fn(x):
ds = dataset_ops.Dataset.from_tensors(x)
if math_ops.equal(x, 0):
ds = ds.apply(testing.sleep(delay_ms * 1000))
else:
ds = ds.apply(testing.sleep(0))
return ds
dataset = dataset_ops.Dataset.from_tensor_slices(elements)
dataset = dataset.apply(
interleave_ops.parallel_interleave(
interleave_fn, cycle_length=10, sloppy=sloppy))
opts = dataset_ops.Options()
opts.experimental_deterministic = global_determinism
dataset = dataset.with_options(opts)
return dataset
self.checkDeterminism(dataset_fn, expect_determinism, elements)
if __name__ == "__main__":
test.main()
| apache-2.0 |
palicand/mi-pdd | base/grouped_estimator.py | 2 | 1453 | import sklearn.base as base
class GroupedEstimator(base.BaseEstimator):
"""GroupedClassifier is meant to group together classifiers
that should run be fitted to the same data. It is meant
to make scoring of many classifiers easier"""
def __init__(self, estimators=None, labels=None, group_name=None):
super(GroupedEstimator, self).__init__()
if labels is None:
self.labels = self.__generate_labels(estimators)
elif len(labels) == len(estimators):
self.labels = labels
else:
raise ValueError('The length of estimators and labels must be the same')
self.estimators = {}
for idx, label in enumerate(self.labels):
self.estimators[label] = estimators[idx]
if group_name is None:
self.group_name = 'Group'
@staticmethod
def __generate_labels(estimators):
return ['estimator ' + str(i) for i in range(len(estimators))]
def add_estimator(self, estimator, label=None):
'''Adds a classifier to the group.
The classifier must be fitted to the same data
as the others, or fit method must be run afterwards
to fit all the classifiers to the same data'''
if label is None:
label = 'estimator ' + str(len(self.estimators))
self.estimators[label] = estimator
def clear(self):
'''Clears classifiers'''
self.estimators.clear()
| mit |
herilalaina/scikit-learn | sklearn/utils/testing.py | 1 | 30623 | """Testing utilities."""
# Copyright (c) 2011, 2012
# Authors: Pietro Berkes,
# Andreas Muller
# Mathieu Blondel
# Olivier Grisel
# Arnaud Joly
# Denis Engemann
# Giorgio Patrini
# Thierry Guillemot
# License: BSD 3 clause
import os
import inspect
import pkgutil
import warnings
import sys
import struct
import scipy as sp
import scipy.io
from functools import wraps
from operator import itemgetter
try:
# Python 2
from urllib2 import urlopen
from urllib2 import HTTPError
except ImportError:
# Python 3+
from urllib.request import urlopen
from urllib.error import HTTPError
import tempfile
import shutil
import os.path as op
import atexit
import unittest
# WindowsError only exist on Windows
try:
WindowsError
except NameError:
WindowsError = None
import sklearn
from sklearn.base import BaseEstimator
from sklearn.externals import joblib
from sklearn.utils import deprecated
additional_names_in_all = []
try:
from nose.tools import raises as _nose_raises
deprecation_message = (
'sklearn.utils.testing.raises has been deprecated in version 0.20 '
'and will be removed in 0.22. Please use '
'sklearn.utils.testing.assert_raises instead.')
raises = deprecated(deprecation_message)(_nose_raises)
additional_names_in_all.append('raises')
except ImportError:
pass
try:
from nose.tools import with_setup as _with_setup
deprecation_message = (
'sklearn.utils.testing.with_setup has been deprecated in version 0.20 '
'and will be removed in 0.22.'
'If your code relies on with_setup, please use'
' nose.tools.with_setup instead.')
with_setup = deprecated(deprecation_message)(_with_setup)
additional_names_in_all.append('with_setup')
except ImportError:
pass
from numpy.testing import assert_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_less
from numpy.testing import assert_approx_equal
import numpy as np
from sklearn.base import (ClassifierMixin, RegressorMixin, TransformerMixin,
ClusterMixin)
from sklearn.utils._unittest_backport import TestCase
__all__ = ["assert_equal", "assert_not_equal", "assert_raises",
"assert_raises_regexp", "assert_true",
"assert_false", "assert_almost_equal", "assert_array_equal",
"assert_array_almost_equal", "assert_array_less",
"assert_less", "assert_less_equal",
"assert_greater", "assert_greater_equal",
"assert_approx_equal", "SkipTest"]
__all__.extend(additional_names_in_all)
_dummy = TestCase('__init__')
assert_equal = _dummy.assertEqual
assert_not_equal = _dummy.assertNotEqual
assert_true = _dummy.assertTrue
assert_false = _dummy.assertFalse
assert_raises = _dummy.assertRaises
SkipTest = unittest.case.SkipTest
assert_dict_equal = _dummy.assertDictEqual
assert_in = _dummy.assertIn
assert_not_in = _dummy.assertNotIn
assert_less = _dummy.assertLess
assert_greater = _dummy.assertGreater
assert_less_equal = _dummy.assertLessEqual
assert_greater_equal = _dummy.assertGreaterEqual
assert_raises_regex = _dummy.assertRaisesRegex
# assert_raises_regexp is deprecated in Python 3.4 in favor of
# assert_raises_regex but lets keep the backward compat in scikit-learn with
# the old name for now
assert_raises_regexp = assert_raises_regex
def assert_warns(warning_class, func, *args, **kw):
"""Test that a certain warning occurs.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`
Returns
-------
result : the return value of `func`
"""
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = any(warning.category is warning_class for warning in w)
if not found:
raise AssertionError("%s did not give warning: %s( is %s)"
% (func.__name__, warning_class, w))
return result
def assert_warns_message(warning_class, message, func, *args, **kw):
# very important to avoid uncontrolled state propagation
"""Test that a certain warning occurs and with a certain message.
Parameters
----------
warning_class : the warning class
The class to test for, e.g. UserWarning.
message : str | callable
The entire message or a substring to test for. If callable,
it takes a string as argument and will trigger an assertion error
if it returns `False`.
func : callable
Calable object to trigger warnings.
*args : the positional arguments to `func`.
**kw : the keyword arguments to `func`.
Returns
-------
result : the return value of `func`
"""
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
if hasattr(np, 'VisibleDeprecationWarning'):
# Let's not catch the numpy internal DeprecationWarnings
warnings.simplefilter('ignore', np.VisibleDeprecationWarning)
# Trigger a warning.
result = func(*args, **kw)
# Verify some things
if not len(w) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
found = [issubclass(warning.category, warning_class) for warning in w]
if not any(found):
raise AssertionError("No warning raised for %s with class "
"%s"
% (func.__name__, warning_class))
message_found = False
# Checks the message of all warnings belong to warning_class
for index in [i for i, x in enumerate(found) if x]:
# substring will match, the entire message with typo won't
msg = w[index].message # For Python 3 compatibility
msg = str(msg.args[0] if hasattr(msg, 'args') else msg)
if callable(message): # add support for certain tests
check_in_message = message
else:
check_in_message = lambda msg: message in msg
if check_in_message(msg):
message_found = True
break
if not message_found:
raise AssertionError("Did not receive the message you expected "
"('%s') for <%s>, got: '%s'"
% (message, func.__name__, msg))
return result
# To remove when we support numpy 1.7
def assert_no_warnings(func, *args, **kw):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
result = func(*args, **kw)
if hasattr(np, 'VisibleDeprecationWarning'):
# Filter out numpy-specific warnings in numpy >= 1.9
w = [e for e in w
if e.category is not np.VisibleDeprecationWarning]
if len(w) > 0:
raise AssertionError("Got warnings when calling %s: [%s]"
% (func.__name__,
', '.join(str(warning) for warning in w)))
return result
def ignore_warnings(obj=None, category=Warning):
"""Context manager and decorator to ignore warnings.
Note. Using this (in both variants) will clear all warnings
from all python modules loaded. In case you need to test
cross-module-warning-logging this is not your tool of choice.
Parameters
----------
category : warning class, defaults to Warning.
The category to filter. If Warning, all categories will be muted.
Examples
--------
>>> with ignore_warnings():
... warnings.warn('buhuhuhu')
>>> def nasty_warn():
... warnings.warn('buhuhuhu')
... print(42)
>>> ignore_warnings(nasty_warn)()
42
"""
if callable(obj):
return _IgnoreWarnings(category=category)(obj)
else:
return _IgnoreWarnings(category=category)
class _IgnoreWarnings(object):
"""Improved and simplified Python warnings context manager and decorator.
This class allows to ignore the warnings raise by a function.
Copied from Python 2.7.5 and modified as required.
Parameters
----------
category : tuple of warning class, default to Warning
The category to filter. By default, all the categories will be muted.
"""
def __init__(self, category):
self._record = True
self._module = sys.modules['warnings']
self._entered = False
self.log = []
self.category = category
def __call__(self, fn):
"""Decorator to catch and hide warnings without visual nesting."""
@wraps(fn)
def wrapper(*args, **kwargs):
# very important to avoid uncontrolled state propagation
clean_warning_registry()
with warnings.catch_warnings():
warnings.simplefilter("ignore", self.category)
return fn(*args, **kwargs)
return wrapper
def __repr__(self):
args = []
if self._record:
args.append("record=True")
if self._module is not sys.modules['warnings']:
args.append("module=%r" % self._module)
name = type(self).__name__
return "%s(%s)" % (name, ", ".join(args))
def __enter__(self):
clean_warning_registry() # be safe and not propagate state + chaos
warnings.simplefilter("ignore", self.category)
if self._entered:
raise RuntimeError("Cannot enter %r twice" % self)
self._entered = True
self._filters = self._module.filters
self._module.filters = self._filters[:]
self._showwarning = self._module.showwarning
def __exit__(self, *exc_info):
if not self._entered:
raise RuntimeError("Cannot exit %r without entering first" % self)
self._module.filters = self._filters
self._module.showwarning = self._showwarning
self.log[:] = []
clean_warning_registry() # be safe and not propagate state + chaos
assert_less = _dummy.assertLess
assert_greater = _dummy.assertGreater
assert_allclose = np.testing.assert_allclose
def assert_raise_message(exceptions, message, function, *args, **kwargs):
"""Helper function to test error messages in exceptions.
Parameters
----------
exceptions : exception or tuple of exception
Name of the estimator
function : callable
Calable object to raise error
*args : the positional arguments to `function`.
**kw : the keyword arguments to `function`
"""
try:
function(*args, **kwargs)
except exceptions as e:
error_message = str(e)
if message not in error_message:
raise AssertionError("Error message does not include the expected"
" string: %r. Observed error message: %r" %
(message, error_message))
else:
# concatenate exception names
if isinstance(exceptions, tuple):
names = " or ".join(e.__name__ for e in exceptions)
else:
names = exceptions.__name__
raise AssertionError("%s not raised by %s" %
(names, function.__name__))
def assert_allclose_dense_sparse(x, y, rtol=1e-07, atol=1e-9, err_msg=''):
"""Assert allclose for sparse and dense data.
Both x and y need to be either sparse or dense, they
can't be mixed.
Parameters
----------
x : array-like or sparse matrix
First array to compare.
y : array-like or sparse matrix
Second array to compare.
rtol : float, optional
relative tolerance; see numpy.allclose
atol : float, optional
absolute tolerance; see numpy.allclose. Note that the default here is
more tolerant than the default for numpy.testing.assert_allclose, where
atol=0.
err_msg : string, default=''
Error message to raise.
"""
if sp.sparse.issparse(x) and sp.sparse.issparse(y):
x = x.tocsr()
y = y.tocsr()
x.sum_duplicates()
y.sum_duplicates()
assert_array_equal(x.indices, y.indices, err_msg=err_msg)
assert_array_equal(x.indptr, y.indptr, err_msg=err_msg)
assert_allclose(x.data, y.data, rtol=rtol, atol=atol, err_msg=err_msg)
elif not sp.sparse.issparse(x) and not sp.sparse.issparse(y):
# both dense
assert_allclose(x, y, rtol=rtol, atol=atol, err_msg=err_msg)
else:
raise ValueError("Can only compare two sparse matrices,"
" not a sparse matrix and an array.")
def fake_mldata(columns_dict, dataname, matfile, ordering=None):
"""Create a fake mldata data set.
Parameters
----------
columns_dict : dict, keys=str, values=ndarray
Contains data as columns_dict[column_name] = array of data.
dataname : string
Name of data set.
matfile : string or file object
The file name string or the file-like object of the output file.
ordering : list, default None
List of column_names, determines the ordering in the data set.
Notes
-----
This function transposes all arrays, while fetch_mldata only transposes
'data', keep that into account in the tests.
"""
datasets = dict(columns_dict)
# transpose all variables
for name in datasets:
datasets[name] = datasets[name].T
if ordering is None:
ordering = sorted(list(datasets.keys()))
# NOTE: setting up this array is tricky, because of the way Matlab
# re-packages 1D arrays
datasets['mldata_descr_ordering'] = sp.empty((1, len(ordering)),
dtype='object')
for i, name in enumerate(ordering):
datasets['mldata_descr_ordering'][0, i] = name
scipy.io.savemat(matfile, datasets, oned_as='column')
class mock_mldata_urlopen(object):
def __init__(self, mock_datasets):
"""Object that mocks the urlopen function to fake requests to mldata.
`mock_datasets` is a dictionary of {dataset_name: data_dict}, or
{dataset_name: (data_dict, ordering).
`data_dict` itself is a dictionary of {column_name: data_array},
and `ordering` is a list of column_names to determine the ordering
in the data set (see `fake_mldata` for details).
When requesting a dataset with a name that is in mock_datasets,
this object creates a fake dataset in a StringIO object and
returns it. Otherwise, it raises an HTTPError.
"""
self.mock_datasets = mock_datasets
def __call__(self, urlname):
dataset_name = urlname.split('/')[-1]
if dataset_name in self.mock_datasets:
resource_name = '_' + dataset_name
from io import BytesIO
matfile = BytesIO()
dataset = self.mock_datasets[dataset_name]
ordering = None
if isinstance(dataset, tuple):
dataset, ordering = dataset
fake_mldata(dataset, resource_name, matfile, ordering)
matfile.seek(0)
return matfile
else:
raise HTTPError(urlname, 404, dataset_name + " is not available",
[], None)
def install_mldata_mock(mock_datasets):
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = mock_mldata_urlopen(mock_datasets)
def uninstall_mldata_mock():
# Lazy import to avoid mutually recursive imports
from sklearn import datasets
datasets.mldata.urlopen = urlopen
# Meta estimators need another estimator to be instantiated.
META_ESTIMATORS = ["OneVsOneClassifier", "MultiOutputEstimator",
"MultiOutputRegressor", "MultiOutputClassifier",
"OutputCodeClassifier", "OneVsRestClassifier",
"RFE", "RFECV", "BaseEnsemble", "ClassifierChain"]
# estimators that there is no way to default-construct sensibly
OTHER = ["Pipeline", "FeatureUnion", "GridSearchCV", "RandomizedSearchCV",
"SelectFromModel"]
# some trange ones
DONT_TEST = ['SparseCoder', 'EllipticEnvelope', 'DictVectorizer',
'LabelBinarizer', 'LabelEncoder',
'MultiLabelBinarizer', 'TfidfTransformer',
'TfidfVectorizer', 'IsotonicRegression',
'OneHotEncoder', 'RandomTreesEmbedding', 'CategoricalEncoder',
'FeatureHasher', 'DummyClassifier', 'DummyRegressor',
'TruncatedSVD', 'PolynomialFeatures',
'GaussianRandomProjectionHash', 'HashingVectorizer',
'CheckingClassifier', 'PatchExtractor', 'CountVectorizer',
# GradientBoosting base estimators, maybe should
# exclude them in another way
'ZeroEstimator', 'ScaledLogOddsEstimator',
'QuantileEstimator', 'MeanEstimator',
'LogOddsEstimator', 'PriorProbabilityEstimator',
'_SigmoidCalibration', 'VotingClassifier']
def all_estimators(include_meta_estimators=False,
include_other=False, type_filter=None,
include_dont_test=False):
"""Get a list of all estimators from sklearn.
This function crawls the module and gets all classes that inherit
from BaseEstimator. Classes that are defined in test-modules are not
included.
By default meta_estimators such as GridSearchCV are also not included.
Parameters
----------
include_meta_estimators : boolean, default=False
Whether to include meta-estimators that can be constructed using
an estimator as their first argument. These are currently
BaseEnsemble, OneVsOneClassifier, OutputCodeClassifier,
OneVsRestClassifier, RFE, RFECV.
include_other : boolean, default=False
Wether to include meta-estimators that are somehow special and can
not be default-constructed sensibly. These are currently
Pipeline, FeatureUnion and GridSearchCV
include_dont_test : boolean, default=False
Whether to include "special" label estimator or test processors.
type_filter : string, list of string, or None, default=None
Which kind of estimators should be returned. If None, no filter is
applied and all estimators are returned. Possible values are
'classifier', 'regressor', 'cluster' and 'transformer' to get
estimators only of these specific types, or a list of these to
get the estimators that fit at least one of the types.
Returns
-------
estimators : list of tuples
List of (name, class), where ``name`` is the class name as string
and ``class`` is the actuall type of the class.
"""
def is_abstract(c):
if not(hasattr(c, '__abstractmethods__')):
return False
if not len(c.__abstractmethods__):
return False
return True
all_classes = []
# get parent folder
path = sklearn.__path__
for importer, modname, ispkg in pkgutil.walk_packages(
path=path, prefix='sklearn.', onerror=lambda x: None):
if (".tests." in modname):
continue
module = __import__(modname, fromlist="dummy")
classes = inspect.getmembers(module, inspect.isclass)
all_classes.extend(classes)
all_classes = set(all_classes)
estimators = [c for c in all_classes
if (issubclass(c[1], BaseEstimator) and
c[0] != 'BaseEstimator')]
# get rid of abstract base classes
estimators = [c for c in estimators if not is_abstract(c[1])]
if not include_dont_test:
estimators = [c for c in estimators if not c[0] in DONT_TEST]
if not include_other:
estimators = [c for c in estimators if not c[0] in OTHER]
# possibly get rid of meta estimators
if not include_meta_estimators:
estimators = [c for c in estimators if not c[0] in META_ESTIMATORS]
if type_filter is not None:
if not isinstance(type_filter, list):
type_filter = [type_filter]
else:
type_filter = list(type_filter) # copy
filtered_estimators = []
filters = {'classifier': ClassifierMixin,
'regressor': RegressorMixin,
'transformer': TransformerMixin,
'cluster': ClusterMixin}
for name, mixin in filters.items():
if name in type_filter:
type_filter.remove(name)
filtered_estimators.extend([est for est in estimators
if issubclass(est[1], mixin)])
estimators = filtered_estimators
if type_filter:
raise ValueError("Parameter type_filter must be 'classifier', "
"'regressor', 'transformer', 'cluster' or "
"None, got"
" %s." % repr(type_filter))
# drop duplicates, sort for reproducibility
# itemgetter is used to ensure the sort does not extend to the 2nd item of
# the tuple
return sorted(set(estimators), key=itemgetter(0))
def set_random_state(estimator, random_state=0):
"""Set random state of an estimator if it has the `random_state` param.
"""
if "random_state" in estimator.get_params():
estimator.set_params(random_state=random_state)
def if_matplotlib(func):
"""Test decorator that skips test if matplotlib not installed."""
@wraps(func)
def run_test(*args, **kwargs):
try:
import matplotlib
matplotlib.use('Agg', warn=False)
# this fails if no $DISPLAY specified
import matplotlib.pyplot as plt
plt.figure()
except ImportError:
raise SkipTest('Matplotlib not available.')
else:
return func(*args, **kwargs)
return run_test
def skip_if_32bit(func):
"""Test decorator that skips tests on 32bit platforms."""
@wraps(func)
def run_test(*args, **kwargs):
bits = 8 * struct.calcsize("P")
if bits == 32:
raise SkipTest('Test skipped on 32bit platforms.')
else:
return func(*args, **kwargs)
return run_test
def if_safe_multiprocessing_with_blas(func):
"""Decorator for tests involving both BLAS calls and multiprocessing.
Under POSIX (e.g. Linux or OSX), using multiprocessing in conjunction with
some implementation of BLAS (or other libraries that manage an internal
posix thread pool) can cause a crash or a freeze of the Python process.
In practice all known packaged distributions (from Linux distros or
Anaconda) of BLAS under Linux seems to be safe. So we this problem seems to
only impact OSX users.
This wrapper makes it possible to skip tests that can possibly cause
this crash under OS X with.
Under Python 3.4+ it is possible to use the `forkserver` start method
for multiprocessing to avoid this issue. However it can cause pickling
errors on interactively defined functions. It therefore not enabled by
default.
"""
@wraps(func)
def run_test(*args, **kwargs):
if sys.platform == 'darwin':
raise SkipTest(
"Possible multi-process bug with some BLAS")
return func(*args, **kwargs)
return run_test
def clean_warning_registry():
"""Safe way to reset warnings."""
warnings.resetwarnings()
reg = "__warningregistry__"
for mod_name, mod in list(sys.modules.items()):
if 'six.moves' in mod_name:
continue
if hasattr(mod, reg):
getattr(mod, reg).clear()
def check_skip_network():
if int(os.environ.get('SKLEARN_SKIP_NETWORK_TESTS', 0)):
raise SkipTest("Text tutorial requires large dataset download")
def check_skip_travis():
"""Skip test if being run on Travis."""
if os.environ.get('TRAVIS') == "true":
raise SkipTest("This test needs to be skipped on Travis")
def _delete_folder(folder_path, warn=False):
"""Utility function to cleanup a temporary folder if still existing.
Copy from joblib.pool (for independence).
"""
try:
if os.path.exists(folder_path):
# This can fail under windows,
# but will succeed when called by atexit
shutil.rmtree(folder_path)
except WindowsError:
if warn:
warnings.warn("Could not delete temporary folder %s" % folder_path)
class TempMemmap(object):
def __init__(self, data, mmap_mode='r'):
self.temp_folder = tempfile.mkdtemp(prefix='sklearn_testing_')
self.mmap_mode = mmap_mode
self.data = data
def __enter__(self):
fpath = op.join(self.temp_folder, 'data.pkl')
joblib.dump(self.data, fpath)
data_read_only = joblib.load(fpath, mmap_mode=self.mmap_mode)
atexit.register(lambda: _delete_folder(self.temp_folder, warn=True))
return data_read_only
def __exit__(self, exc_type, exc_val, exc_tb):
_delete_folder(self.temp_folder)
# Utils to test docstrings
def _get_args(function, varargs=False):
"""Helper to get function arguments"""
# NOTE this works only in python3.5
if sys.version_info < (3, 5):
NotImplementedError("_get_args is not available for python < 3.5")
params = inspect.signature(function).parameters
args = [key for key, param in params.items()
if param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)]
if varargs:
varargs = [param.name for param in params.values()
if param.kind == param.VAR_POSITIONAL]
if len(varargs) == 0:
varargs = None
return args, varargs
else:
return args
def _get_func_name(func, class_name=None):
"""Get function full name
Parameters
----------
func : callable
The function object.
class_name : string, optional (default: None)
If ``func`` is a class method and the class name is known specify
class_name for the error message.
Returns
-------
name : str
The function name.
"""
parts = []
module = inspect.getmodule(func)
if module:
parts.append(module.__name__)
if class_name is not None:
parts.append(class_name)
elif hasattr(func, 'im_class'):
parts.append(func.im_class.__name__)
parts.append(func.__name__)
return '.'.join(parts)
def check_docstring_parameters(func, doc=None, ignore=None, class_name=None):
"""Helper to check docstring
Parameters
----------
func : callable
The function object to test.
doc : str, optional (default: None)
Docstring if it is passed manually to the test.
ignore : None | list
Parameters to ignore.
class_name : string, optional (default: None)
If ``func`` is a class method and the class name is known specify
class_name for the error message.
Returns
-------
incorrect : list
A list of string describing the incorrect results.
"""
from numpydoc import docscrape
incorrect = []
ignore = [] if ignore is None else ignore
func_name = _get_func_name(func, class_name=class_name)
if (not func_name.startswith('sklearn.') or
func_name.startswith('sklearn.externals')):
return incorrect
# Don't check docstring for property-functions
if inspect.isdatadescriptor(func):
return incorrect
args = list(filter(lambda x: x not in ignore, _get_args(func)))
# drop self
if len(args) > 0 and args[0] == 'self':
args.remove('self')
if doc is None:
with warnings.catch_warnings(record=True) as w:
try:
doc = docscrape.FunctionDoc(func)
except Exception as exp:
incorrect += [func_name + ' parsing error: ' + str(exp)]
return incorrect
if len(w):
raise RuntimeError('Error for %s:\n%s' % (func_name, w[0]))
param_names = []
for name, type_definition, param_doc in doc['Parameters']:
if (type_definition.strip() == "" or
type_definition.strip().startswith(':')):
param_name = name.lstrip()
# If there was no space between name and the colon
# "verbose:" -> len(["verbose", ""][0]) -> 7
# If "verbose:"[7] == ":", then there was no space
if (':' not in param_name or
param_name[len(param_name.split(':')[0].strip())] == ':'):
incorrect += [func_name +
' There was no space between the param name and '
'colon ("%s")' % name]
else:
incorrect += [func_name + ' Incorrect type definition for '
'param: "%s" (type definition was "%s")'
% (name.split(':')[0], type_definition)]
if '*' not in name:
param_names.append(name.split(':')[0].strip('` '))
param_names = list(filter(lambda x: x not in ignore, param_names))
if len(param_names) != len(args):
bad = str(sorted(list(set(param_names) ^ set(args))))
incorrect += [func_name + ' arg mismatch: ' + bad]
else:
for n1, n2 in zip(param_names, args):
if n1 != n2:
incorrect += [func_name + ' ' + n1 + ' != ' + n2]
return incorrect
| bsd-3-clause |
pravsripad/mne-python | mne/tests/test_morph_map.py | 9 | 2210 | # Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD-3-Clause
import os
import os.path as op
from shutil import copyfile
import pytest
import numpy as np
from numpy.testing import assert_allclose
from scipy import sparse
from mne.datasets import testing
from mne.utils import catch_logging, _record_warnings
from mne import read_morph_map
data_path = testing.data_path(download=False)
subjects_dir = op.join(data_path, 'subjects')
@pytest.mark.slowtest
@testing.requires_testing_data
def test_make_morph_maps(tmp_path):
"""Test reading and creating morph maps."""
# make a new fake subjects_dir
tempdir = str(tmp_path)
for subject in ('sample', 'sample_ds', 'fsaverage_ds'):
os.mkdir(op.join(tempdir, subject))
os.mkdir(op.join(tempdir, subject, 'surf'))
regs = ('reg', 'left_right') if subject == 'fsaverage_ds' else ('reg',)
for hemi in ['lh', 'rh']:
for reg in regs:
args = [subject, 'surf', hemi + '.sphere.' + reg]
copyfile(op.join(subjects_dir, *args),
op.join(tempdir, *args))
for subject_from, subject_to, xhemi in (
('fsaverage_ds', 'sample_ds', False),
('fsaverage_ds', 'fsaverage_ds', True)):
# trigger the creation of morph-maps dir and create the map
with catch_logging() as log:
mmap = read_morph_map(subject_from, subject_to, tempdir,
xhemi=xhemi, verbose=True)
log = log.getvalue()
assert 'does not exist' in log
assert 'Creating' in log
mmap2 = read_morph_map(subject_from, subject_to, subjects_dir,
xhemi=xhemi)
assert len(mmap) == len(mmap2)
for m1, m2 in zip(mmap, mmap2):
# deal with sparse matrix stuff
diff = (m1 - m2).data
assert_allclose(diff, np.zeros_like(diff), atol=1e-3, rtol=0)
# This will also trigger creation, but it's trivial
with _record_warnings():
mmap = read_morph_map('sample', 'sample', subjects_dir=tempdir)
for mm in mmap:
assert (mm - sparse.eye(mm.shape[0], mm.shape[0])).sum() == 0
| bsd-3-clause |
pravsripad/mne-python | mne/io/tests/test_what.py | 9 | 1747 | # Authors: Eric Larson <[email protected]>
# License: BSD
import glob
import os.path as op
import numpy as np
import pytest
from mne import what, create_info
from mne.datasets import testing
from mne.io import RawArray
from mne.preprocessing import ICA
from mne.utils import requires_sklearn, _record_warnings
data_path = testing.data_path(download=False)
@pytest.mark.slowtest
@requires_sklearn
@testing.requires_testing_data
def test_what(tmp_path, verbose_debug):
"""Test mne.what."""
# ICA
ica = ICA(max_iter=1)
raw = RawArray(np.random.RandomState(0).randn(3, 10),
create_info(3, 1000., 'eeg'))
with _record_warnings(): # convergence sometimes
ica.fit(raw)
fname = op.join(str(tmp_path), 'x-ica.fif')
ica.save(fname)
assert what(fname) == 'ica'
# test files
fnames = glob.glob(
op.join(data_path, 'MEG', 'sample', '*.fif'))
fnames += glob.glob(
op.join(data_path, 'subjects', 'sample', 'bem', '*.fif'))
fnames = sorted(fnames)
want_dict = dict(eve='events', ave='evoked', cov='cov', inv='inverse',
fwd='forward', trans='transform', proj='proj',
raw='raw', meg='raw', sol='bem solution',
bem='bem surfaces', src='src', dense='bem surfaces',
sparse='bem surfaces', head='bem surfaces',
fiducials='fiducials')
for fname in fnames:
kind = op.splitext(fname)[0].split('-')[-1]
if len(kind) > 5:
kind = kind.split('_')[-1]
this = what(fname)
assert this == want_dict[kind]
fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave_xfit.dip')
assert what(fname) == 'unknown'
| bsd-3-clause |
jcalbert/TextBlob | textblob/en/sentiments.py | 16 | 3129 | # -*- coding: utf-8 -*-
"""Sentiment analysis implementations.
.. versionadded:: 0.5.0
"""
from __future__ import absolute_import
from collections import namedtuple
import nltk
from textblob.en import sentiment as pattern_sentiment
from textblob.tokenizers import word_tokenize
from textblob.decorators import requires_nltk_corpus
from textblob.base import BaseSentimentAnalyzer, DISCRETE, CONTINUOUS
class PatternAnalyzer(BaseSentimentAnalyzer):
"""Sentiment analyzer that uses the same implementation as the
pattern library. Returns results as a named tuple of the form:
``Sentiment(polarity, subjectivity)``
"""
kind = CONTINUOUS
#: Return type declaration
RETURN_TYPE = namedtuple('Sentiment', ['polarity', 'subjectivity'])
def analyze(self, text):
"""Return the sentiment as a named tuple of the form:
``Sentiment(polarity, subjectivity)``.
"""
return self.RETURN_TYPE(*pattern_sentiment(text))
def _default_feature_extractor(words):
"""Default feature extractor for the NaiveBayesAnalyzer."""
return dict(((word, True) for word in words))
class NaiveBayesAnalyzer(BaseSentimentAnalyzer):
"""Naive Bayes analyzer that is trained on a dataset of movie reviews.
Returns results as a named tuple of the form:
``Sentiment(classification, p_pos, p_neg)``
:param callable feature_extractor: Function that returns a dictionary of
features, given a list of words.
"""
kind = DISCRETE
#: Return type declaration
RETURN_TYPE = namedtuple('Sentiment', ['classification', 'p_pos', 'p_neg'])
def __init__(self, feature_extractor=_default_feature_extractor):
super(NaiveBayesAnalyzer, self).__init__()
self._classifier = None
self.feature_extractor = feature_extractor
@requires_nltk_corpus
def train(self):
"""Train the Naive Bayes classifier on the movie review corpus."""
super(NaiveBayesAnalyzer, self).train()
neg_ids = nltk.corpus.movie_reviews.fileids('neg')
pos_ids = nltk.corpus.movie_reviews.fileids('pos')
neg_feats = [(self.feature_extractor(
nltk.corpus.movie_reviews.words(fileids=[f])), 'neg') for f in neg_ids]
pos_feats = [(self.feature_extractor(
nltk.corpus.movie_reviews.words(fileids=[f])), 'pos') for f in pos_ids]
train_data = neg_feats + pos_feats
self._classifier = nltk.classify.NaiveBayesClassifier.train(train_data)
def analyze(self, text):
"""Return the sentiment as a named tuple of the form:
``Sentiment(classification, p_pos, p_neg)``
"""
# Lazily train the classifier
super(NaiveBayesAnalyzer, self).analyze(text)
tokens = word_tokenize(text, include_punc=False)
filtered = (t.lower() for t in tokens if len(t) >= 3)
feats = self.feature_extractor(filtered)
prob_dist = self._classifier.prob_classify(feats)
return self.RETURN_TYPE(
classification=prob_dist.max(),
p_pos=prob_dist.prob('pos'),
p_neg=prob_dist.prob("neg")
)
| mit |
pravsripad/mne-python | examples/forward/left_cerebellum_volume_source.py | 11 | 2190 | # -*- coding: utf-8 -*-
"""
.. _ex-cerebellum-source-space:
==============================================
Generate a left cerebellum volume source space
==============================================
Generate a volume source space of the left cerebellum and plot its vertices
relative to the left cortical surface source space and the FreeSurfer
segmentation file.
"""
# Author: Alan Leggitt <[email protected]>
#
# License: BSD-3-Clause
# %%
import mne
from mne import setup_source_space, setup_volume_source_space
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
subjects_dir = data_path / 'subjects'
subject = 'sample'
aseg_fname = subjects_dir / 'sample' / 'mri' / 'aseg.mgz'
# %%
# Setup the source spaces
# setup a cortical surface source space and extract left hemisphere
surf = setup_source_space(subject, subjects_dir=subjects_dir, add_dist=False)
lh_surf = surf[0]
# setup a volume source space of the left cerebellum cortex
volume_label = 'Left-Cerebellum-Cortex'
sphere = (0, 0, 0, 0.12)
lh_cereb = setup_volume_source_space(
subject, mri=aseg_fname, sphere=sphere, volume_label=volume_label,
subjects_dir=subjects_dir, sphere_units='m')
# Combine the source spaces
src = surf + lh_cereb
# %%
# Plot the positions of each source space
fig = mne.viz.plot_alignment(subject=subject, subjects_dir=subjects_dir,
surfaces='white', coord_frame='mri',
src=src)
mne.viz.set_3d_view(fig, azimuth=180, elevation=90,
distance=0.30, focalpoint=(-0.03, -0.01, 0.03))
# %%
# You can export source positions to a NIfTI file::
#
# >>> nii_fname = 'mne_sample_lh-cerebellum-cortex.nii'
# >>> src.export_volume(nii_fname, mri_resolution=True)
#
# And display source positions in freeview::
#
# >>> from mne.utils import run_subprocess
# >>> mri_fname = subjects_dir + '/sample/mri/brain.mgz'
# >>> run_subprocess(['freeview', '-v', mri_fname, '-v',
# '%s:colormap=lut:opacity=0.5' % aseg_fname, '-v',
# '%s:colormap=jet:colorscale=0,2' % nii_fname,
# '-slice', '157 75 105'])
| bsd-3-clause |
h2oai/h2o | py/h2o_methods.py | 8 | 64964 |
import os, sys, time, requests, zipfile, StringIO
import h2o_args
# from h2o_cmd import runInspect, infoFromSummary
import h2o_cmd, h2o_util
import h2o_browse as h2b
import h2o_print as h2p
from h2o_objects import H2O
from h2o_test import verboseprint, dump_json, check_sandbox_for_errors, get_sandbox_name, log
# print "h2o_methods"
def check_params_update_kwargs(params_dict, kw, function, print_params):
# only update params_dict..don't add
# throw away anything else as it should come from the model (propagating what RF used)
for k in kw:
if k in params_dict:
params_dict[k] = kw[k]
else:
raise Exception("illegal parameter '%s' in %s" % (k, function))
if print_params:
print "%s parameters:" % function, params_dict
sys.stdout.flush()
def get_cloud(self, noExtraErrorCheck=False, timeoutSecs=10):
# hardwire it to allow a 60 second timeout
a = self.do_json_request('Cloud.json', noExtraErrorCheck=noExtraErrorCheck, timeout=timeoutSecs)
version = a['version']
if version and version!='(unknown)' and version!='null' and version!='none':
if not version.startswith('2'):
h2p.red_print("h2o version at node[0] doesn't look like h2o version. (start with 2) %s" % version)
consensus = a['consensus']
locked = a['locked']
cloud_size = a['cloud_size']
cloud_name = a['cloud_name']
node_name = a['node_name']
node_id = self.node_id
verboseprint('%s%s %s%s %s%s %s%s %s%s' % (
"\tnode_id: ", node_id,
"\tcloud_size: ", cloud_size,
"\tconsensus: ", consensus,
"\tlocked: ", locked,
"\tversion: ", version,
))
return a
def h2o_log_msg(self, message=None, timeoutSecs=15):
if 1 == 0:
return
if not message:
message = "\n"
message += "\n#***********************"
message += "\npython_test_name: " + h2o_args.python_test_name
message += "\n#***********************"
params = {'message': message}
self.do_json_request('2/LogAndEcho', params=params, timeout=timeoutSecs)
def get_timeline(self):
return self.do_json_request('Timeline.json')
# Shutdown url is like a reset button. Doesn't send a response before it kills stuff
# safer if random things are wedged, rather than requiring response
# so request library might retry and get exception. allow that.
def shutdown_all(self):
try:
self.do_json_request('Shutdown.json', noExtraErrorCheck=True)
except:
pass
# don't want delayes between sending these to each node
# if you care, wait after you send them to each node
# Seems like it's not so good to just send to one node
# time.sleep(1) # a little delay needed?
return (True)
def put_value(self, value, key=None, repl=None):
return self.do_json_request(
'PutValue.json',
params={"value": value, "key": key, "replication_factor": repl},
extraComment=str(value) + "," + str(key) + "," + str(repl))
# {"Request2":0,"response_info":i
# {"h2o":"pytest-kevin-4530","node":"/192.168.0.37:54321","time":0,"status":"done","redirect_url":null},
# "levels":[null,null,null,null]}
# FIX! what is this for? R uses it. Get one per col? maybe something about enums
def levels(self, source=None):
return self.do_json_request(
'2/Levels2.json',
params={"source": source},
)
def export_files(self, print_params=True, timeoutSecs=60, **kwargs):
params_dict = {
'src_key': None,
'path': None,
'force': None,
}
check_params_update_kwargs(params_dict, kwargs, 'export_files', print_params)
return self.do_json_request(
'2/ExportFiles.json',
timeout=timeoutSecs,
params=params_dict,
)
def put_file(self, f, key=None, timeoutSecs=60):
if key is None:
key = os.path.basename(f)
### print "putfile specifying this key:", key
fileObj = open(f, 'rb')
resp = self.do_json_request(
'2/PostFile.json',
cmd='post',
timeout=timeoutSecs,
params={"key": key},
files={"file": fileObj},
extraComment=str(f))
verboseprint("\nput_file response: ", dump_json(resp))
fileObj.close()
return key
# noise is a 2-tuple ("StoreView", none) for url plus args for doing during poll to create noise
# so we can create noise with different urls!, and different parms to that url
# no noise if None
def poll_url(self, response,
timeoutSecs=10, retryDelaySecs=0.5, initialDelaySecs=0, pollTimeoutSecs=180,
noise=None, benchmarkLogging=None, noPoll=False, reuseFirstPollUrl=False, noPrint=False):
verboseprint('poll_url input: response:', dump_json(response))
### print "poll_url: pollTimeoutSecs", pollTimeoutSecs
### print "at top of poll_url, timeoutSecs: ", timeoutSecs
# for the rev 2 stuff..the job_key, destination_key and redirect_url are just in the response
# look for 'response'..if not there, assume the rev 2
def get_redirect_url(response):
url = None
params = None
# StoreView has old style, while beta_features
if 'response_info' in response:
response_info = response['response_info']
if 'redirect_url' not in response_info:
raise Exception("Response during polling must have 'redirect_url'\n%s" % dump_json(response))
if response_info['status'] != 'done':
redirect_url = response_info['redirect_url']
if redirect_url:
url = self.url(redirect_url)
params = None
else:
if response_info['status'] != 'done':
raise Exception(
"'redirect_url' during polling is null but status!='done': \n%s" % dump_json(response))
else:
if 'response' not in response:
raise Exception("'response' not in response.\n%s" % dump_json(response))
if response['response']['status'] != 'done':
if 'redirect_request' not in response['response']:
raise Exception("'redirect_request' not in response. \n%s" % dump_json(response))
url = self.url(response['response']['redirect_request'])
params = response['response']['redirect_request_args']
return (url, params)
# if we never poll
msgUsed = None
if 'response_info' in response: # trigger v2 for GBM always?
status = response['response_info']['status']
progress = response.get('progress', "")
else:
r = response['response']
status = r['status']
progress = r.get('progress', "")
doFirstPoll = status != 'done'
(url, params) = get_redirect_url(response)
# no need to recreate the string for messaging, in the loop..
if params:
paramsStr = '&'.join(['%s=%s' % (k, v) for (k, v) in params.items()])
else:
paramsStr = ''
# FIX! don't do JStack noise for tests that ask for it. JStack seems to have problems
noise_enable = noise and noise != ("JStack", None)
if noise_enable:
print "Using noise during poll_url:", noise
# noise_json should be like "Storeview"
(noise_json, noiseParams) = noise
noiseUrl = self.url(noise_json + ".json")
if noiseParams is None:
noiseParamsStr = ""
else:
noiseParamsStr = '&'.join(['%s=%s' % (k, v) for (k, v) in noiseParams.items()])
start = time.time()
count = 0
if initialDelaySecs:
time.sleep(initialDelaySecs)
# can end with status = 'redirect' or 'done'
# Update: on DRF2, the first RF redirects to progress. So we should follow that, and follow any redirect to view?
# so for v2, we'll always follow redirects?
# For v1, we're not forcing the first status to be 'poll' now..so it could be redirect or done?(NN score? if blocking)
# Don't follow the Parse redirect to Inspect, because we want parseResult['destination_key'] to be the end.
# note this doesn't affect polling with Inspect? (since it doesn't redirect ?
while status == 'poll' or doFirstPoll or (status == 'redirect' and 'Inspect' not in url):
count += 1
if ((time.time() - start) > timeoutSecs):
# show what we're polling with
emsg = "Exceeded timeoutSecs: %d secs while polling." % timeoutSecs + \
"status: %s, url: %s?%s" % (status, urlUsed, paramsUsedStr)
raise Exception(emsg)
if benchmarkLogging:
import h2o
h2o.cloudPerfH2O.get_log_save(benchmarkLogging)
# every other one?
create_noise = noise_enable and ((count % 2) == 0)
if create_noise:
urlUsed = noiseUrl
paramsUsed = noiseParams
paramsUsedStr = noiseParamsStr
msgUsed = "\nNoise during polling with"
else:
urlUsed = url
paramsUsed = params
paramsUsedStr = paramsStr
msgUsed = "\nPolling with"
print status, progress, urlUsed
time.sleep(retryDelaySecs)
response = self.do_json_request(fullUrl=urlUsed, timeout=pollTimeoutSecs, params=paramsUsed)
verboseprint(msgUsed, urlUsed, paramsUsedStr, "Response:", dump_json(response))
# hey, check the sandbox if we've been waiting a long time...rather than wait for timeout
if ((count % 6) == 0):
check_sandbox_for_errors(python_test_name=h2o_args.python_test_name)
if (create_noise):
# this guarantees the loop is done, so we don't need to worry about
# a 'return r' being interpreted from a noise response
status = 'poll'
progress = ''
else:
doFirstPoll = False
status = response['response_info']['status']
progress = response.get('progress', "")
# get the redirect url
if not reuseFirstPollUrl: # reuse url for all v1 stuff
(url, params) = get_redirect_url(response)
if noPoll:
return response
# won't print if we didn't poll
if msgUsed:
verboseprint(msgUsed, urlUsed, paramsUsedStr, "Response:", dump_json(response))
return response
# this is only for 2 (fvec)
def kmeans_view(self, model=None, timeoutSecs=30, **kwargs):
# defaults
params_dict = {
'_modelKey': model,
}
browseAlso = kwargs.get('browseAlso', False)
# only lets these params thru
check_params_update_kwargs(params_dict, kwargs, 'kmeans_view', print_params=True)
print "\nKMeans2ModelView params list:", params_dict
a = self.do_json_request('2/KMeans2ModelView.json', timeout=timeoutSecs, params=params_dict)
# kmeans_score doesn't need polling?
verboseprint("\nKMeans2Model View result:", dump_json(a))
if (browseAlso | h2o_args.browse_json):
print "Redoing the KMeans2ModelView through the browser, no results saved though"
h2b.browseJsonHistoryAsUrlLastMatch('KMeans2ModelView')
time.sleep(5)
return a
# additional params include: cols=.
# don't need to include in params_dict it doesn't need a default
# FIX! cols should be renamed in test for fvec
def kmeans(self, key, key2=None,
timeoutSecs=300, retryDelaySecs=0.2, initialDelaySecs=None, pollTimeoutSecs=180,
noise=None, benchmarkLogging=None, noPoll=False, **kwargs):
# defaults
# KMeans has more params than shown here
# KMeans2 has these params?
# max_iter=100&max_iter2=1&iterations=0
params_dict = {
'initialization': 'Furthest',
'k': 1,
'source': key,
'destination_key': key2,
'seed': None,
'cols': None,
'ignored_cols': None,
'ignored_cols_by_name': None,
'max_iter': None,
'normalize': None,
'drop_na_cols': None,
}
if key2 is not None: params_dict['destination_key'] = key2
browseAlso = kwargs.get('browseAlso', False)
# only lets these params thru
check_params_update_kwargs(params_dict, kwargs, 'kmeans', print_params=True)
algo = '2/KMeans2'
print "\n%s params list:" % algo, params_dict
a1 = self.do_json_request(algo + '.json',
timeout=timeoutSecs, params=params_dict)
if noPoll:
return a1
a1 = self.poll_url(a1, timeoutSecs=timeoutSecs, retryDelaySecs=retryDelaySecs,
initialDelaySecs=initialDelaySecs, pollTimeoutSecs=pollTimeoutSecs,
noise=noise, benchmarkLogging=benchmarkLogging)
print "For now, always dumping the last polled kmeans result ..are the centers good"
print "\n%s result:" % algo, dump_json(a1)
# if we want to return the model view like the browser
if 1==0:
# HACK! always do a model view. kmeans last result isn't good? (at least not always)
a = self.kmeans_view(model=a1['model']['_key'], timeoutSecs=30)
verboseprint("\n%s model view result:" % algo, dump_json(a))
else:
a = a1
if (browseAlso | h2o_args.browse_json):
print "Redoing the %s through the browser, no results saved though" % algo
h2b.browseJsonHistoryAsUrlLastMatch(algo)
time.sleep(5)
return a
# params:
# header=1,
# header_from_file
# separator=1 (hex encode?
# exclude=
# noise is a 2-tuple: ("StoreView",params_dict)
def parse(self, key, key2=None,
timeoutSecs=300, retryDelaySecs=0.2, initialDelaySecs=None, pollTimeoutSecs=180,
noise=None, benchmarkLogging=None, noPoll=False, **kwargs):
browseAlso = kwargs.pop('browseAlso', False)
# this doesn't work. webforums indicate max_retries might be 0 already? (as of 3 months ago)
# requests.defaults({max_retries : 4})
# https://github.com/kennethreitz/requests/issues/719
# it was closed saying Requests doesn't do retries. (documentation implies otherwise)
algo = "2/Parse2"
verboseprint("\n %s key: %s to key2: %s (if None, means default)" % (algo, key, key2))
# other h2o parse parameters, not in the defauls
# header
# exclude
params_dict = {
'blocking': None, # debug only
'source_key': key, # can be a regex
'destination_key': key2,
'parser_type': None,
'separator': None,
'header': None,
'single_quotes': None,
'header_from_file': None,
'exclude': None,
'delete_on_done': None,
'preview': None,
}
check_params_update_kwargs(params_dict, kwargs, 'parse', print_params=True)
# h2o requires header=1 if header_from_file is used. Force it here to avoid bad test issues
if kwargs.get('header_from_file'): # default None
kwargs['header'] = 1
if benchmarkLogging:
import h2o
h2o.cloudPerfH2O.get_log_save(initOnly=True)
a = self.do_json_request(algo + ".json", timeout=timeoutSecs, params=params_dict)
# Check that the response has the right Progress url it's going to steer us to.
verboseprint(algo + " result:", dump_json(a))
if noPoll:
return a
# noise is a 2-tuple ("StoreView, none) for url plus args for doing during poll to create noise
# no noise if None
verboseprint(algo + ' noise:', noise)
a = self.poll_url(a, timeoutSecs=timeoutSecs, retryDelaySecs=retryDelaySecs,
initialDelaySecs=initialDelaySecs, pollTimeoutSecs=pollTimeoutSecs,
noise=noise, benchmarkLogging=benchmarkLogging)
verboseprint("\n" + algo + " result:", dump_json(a))
return a
def netstat(self):
return self.do_json_request('Network.json')
def linux_info(self, timeoutSecs=30):
return self.do_json_request("CollectLinuxInfo.json", timeout=timeoutSecs)
def jstack(self, timeoutSecs=30):
return self.do_json_request("JStack.json", timeout=timeoutSecs)
def network_test(self, tdepth=5, timeoutSecs=30):
a = self.do_json_request("2/NetworkTest.json", params={}, timeout=timeoutSecs)
verboseprint("\n network test:", dump_json(a))
return(a)
def jprofile(self, depth=5, timeoutSecs=30):
return self.do_json_request("2/JProfile.json", params={'depth': depth}, timeout=timeoutSecs)
def iostatus(self):
return self.do_json_request("IOStatus.json")
# turns enums into expanded binary features
def one_hot(self, source, timeoutSecs=30, **kwargs):
params = {
"source": source,
}
a = self.do_json_request('2/OneHot.json',
params=params,
timeout=timeoutSecs
)
check_sandbox_for_errors(python_test_name=h2o_args.python_test_name)
return a
# &offset=
# &view=
# FIX! need to have max > 1000?
def inspect(self, key, offset=None, view=None, max_column_display=1000, ignoreH2oError=False,
timeoutSecs=30):
params = {
"src_key": key,
"offset": offset,
# view doesn't exist for 2. let it be passed here from old tests but not used
}
a = self.do_json_request('2/Inspect2.json',
params=params,
ignoreH2oError=ignoreH2oError,
timeout=timeoutSecs
)
return a
# can take a useful 'filter'
# FIX! current hack to h2o to make sure we get "all" rather than just
# default 20 the browser gets. set to max # by default (1024)
# There is a offset= param that's useful also, and filter=
def store_view(self, timeoutSecs=60, print_params=False, **kwargs):
params_dict = {
# now we should default to a big number, so we see everything
'filter': None,
'view': 10000,
'offset': 0,
}
# no checking on legal kwargs?
params_dict.update(kwargs)
if print_params:
print "\nStoreView params list:", params_dict
a = self.do_json_request('StoreView.json',
params=params_dict,
timeout=timeoutSecs)
return a
def rebalance(self, timeoutSecs=180, **kwargs):
params_dict = {
# now we should default to a big number, so we see everything
'source': None,
'after': None,
'chunks': None,
}
params_dict.update(kwargs)
a = self.do_json_request('2/ReBalance.json',
params=params_dict,
timeout=timeoutSecs
)
verboseprint("\n rebalance result:", dump_json(a))
return a
def to_int(self, timeoutSecs=60, **kwargs):
params_dict = {
'src_key': None,
'column_index': None, # ugh. takes 1 based indexing
}
params_dict.update(kwargs)
a = self.do_json_request('2/ToInt2.json', params=params_dict, timeout=timeoutSecs)
verboseprint("\n to_int result:", dump_json(a))
return a
def to_enum(self, timeoutSecs=60, **kwargs):
params_dict = {
'src_key': None,
'column_index': None, # ugh. takes 1 based indexing
}
params_dict.update(kwargs)
a = self.do_json_request('2/ToEnum2.json', params=params_dict, timeout=timeoutSecs)
verboseprint("\n to_int result:", dump_json(a))
return a
def unlock(self, timeoutSecs=30):
a = self.do_json_request('2/UnlockKeys.json', params=None, timeout=timeoutSecs)
return a
# There is also a RemoveAck in the browser, that asks for confirmation from
# the user. This is after that confirmation.
# UPDATE: ignore errors on remove..key might already be gone due to h2o removing it now
# after parse
def remove_key(self, key, timeoutSecs=120):
a = self.do_json_request('Remove.json',
params={"key": key}, ignoreH2oError=True, timeout=timeoutSecs)
self.unlock()
return a
# this removes all keys!
def remove_all_keys(self, timeoutSecs=120):
a = self.do_json_request('2/RemoveAll.json', timeout=timeoutSecs)
return a
# only model keys can be exported?
def export_hdfs(self, source_key, path):
a = self.do_json_request('ExportHdfs.json',
params={"source_key": source_key, "path": path})
verboseprint("\nexport_hdfs result:", dump_json(a))
return a
def export_s3(self, source_key, bucket, obj):
a = self.do_json_request('ExportS3.json',
params={"source_key": source_key, "bucket": bucket, "object": obj})
verboseprint("\nexport_s3 result:", dump_json(a))
return a
# the param name for ImportFiles is 'file', but it can take a directory or a file.
# 192.168.0.37:54323/ImportFiles.html?file=%2Fhome%2F0xdiag%2Fdatasets
def import_files(self, path, timeoutSecs=180):
a = self.do_json_request('2/ImportFiles2.json',
timeout=timeoutSecs,
params={"path": path}
)
verboseprint("\nimport_files result:", dump_json(a))
return a
# 'destination_key', 'escape_nan' 'expression'
def exec_query(self, timeoutSecs=20, ignoreH2oError=False, print_params=False, **kwargs):
# only v2 now
params_dict = {
'str': None,
}
browseAlso = kwargs.pop('browseAlso', False)
check_params_update_kwargs(params_dict, kwargs, 'exec_query', print_params=print_params)
a = self.do_json_request('2/Exec2.json',
timeout=timeoutSecs, ignoreH2oError=ignoreH2oError, params=params_dict)
verboseprint("\nexec_query result:", dump_json(a))
return a
def jobs_admin(self, timeoutSecs=120, **kwargs):
params_dict = {
# 'expression': None,
}
browseAlso = kwargs.pop('browseAlso', False)
params_dict.update(kwargs)
verboseprint("\njobs_admin:", params_dict)
a = self.do_json_request('Jobs.json', timeout=timeoutSecs, params=params_dict)
verboseprint("\njobs_admin result:", dump_json(a))
return a
def jobs_cancel(self, timeoutSecs=120, **kwargs):
params_dict = {
'key': None,
}
browseAlso = kwargs.pop('browseAlso', False)
check_params_update_kwargs(params_dict, kwargs, 'jobs_cancel', print_params=True)
a = self.do_json_request('Cancel.json', timeout=timeoutSecs, params=params_dict)
verboseprint("\njobs_cancel result:", dump_json(a))
print "Cancelled job:", params_dict['key']
return a
def create_frame(self, timeoutSecs=120, **kwargs):
params_dict = {
'key': None,
'rows': None,
'cols': None,
'seed': None,
'randomize': None,
'value': None,
'real_range': None,
'binary_fraction': None,
'categorical_fraction': None,
'factors': None,
'integer_fraction': None,
'integer_range': None,
'binary_fraction': None,
'binary_ones_fraction': None,
'missing_fraction': None,
'response_factors': None,
'has_response': None,
}
browseAlso = kwargs.pop('browseAlso', False)
check_params_update_kwargs(params_dict, kwargs, 'create_frame', print_params=True)
a = self.do_json_request('2/CreateFrame.json', timeout=timeoutSecs, params=params_dict)
verboseprint("\ncreate_frame result:", dump_json(a))
return a
def insert_missing_values(self, timeoutSecs=120, **kwargs):
params_dict = {
'key': None,
'seed': None,
'missing_fraction': None,
}
browseAlso = kwargs.pop('browseAlso', False)
check_params_update_kwargs(params_dict, kwargs, 'insert_missing_values', print_params=True)
a = self.do_json_request('2/InsertMissingValues.json', timeout=timeoutSecs, params=params_dict)
verboseprint("\ninsert_missing_values result:", dump_json(a))
return a
def impute(self, timeoutSecs=120, **kwargs):
params_dict = {
'source': None,
'column': None,
'method': None, # mean, mode, median
'group_by': None, # comma separated column names
}
browseAlso = kwargs.pop('browseAlso', False)
check_params_update_kwargs(params_dict, kwargs, 'impute', print_params=True)
a = self.do_json_request('2/Impute.json', timeout=timeoutSecs, params=params_dict)
verboseprint("\nimpute result:", dump_json(a))
return a
def frame_split(self, timeoutSecs=120, **kwargs):
params_dict = {
'source': None,
'ratios': None,
}
browseAlso = kwargs.pop('browseAlso', False)
check_params_update_kwargs(params_dict, kwargs, 'frame_split', print_params=True)
a = self.do_json_request('2/FrameSplitPage.json', timeout=timeoutSecs, params=params_dict)
verboseprint("\nframe_split result:", dump_json(a))
return a
def nfold_frame_extract(self, timeoutSecs=120, **kwargs):
params_dict = {
'source': None,
'nfolds': None,
'afold': None, # Split to extract
}
browseAlso = kwargs.pop('browseAlso', False)
check_params_update_kwargs(params_dict, kwargs, 'nfold_frame_extract', print_params=True)
a = self.do_json_request('2/NFoldFrameExtractPage.json', timeout=timeoutSecs, params=params_dict)
verboseprint("\nnfold_frame_extract result:", dump_json(a))
return a
def gap_statistic(self, timeoutSecs=120, retryDelaySecs=1.0, initialDelaySecs=None, pollTimeoutSecs=180,
noise=None, benchmarkLogging=None, noPoll=False,
print_params=True, noPrint=False, **kwargs):
params_dict = {
'source': None,
'destination_key': None,
'k_max': None,
'b_max': None,
'bootstrap_fraction': None,
'seed': None,
'cols': None,
'ignored_cols': None,
'ignored_cols_by_name': None,
}
browseAlso = kwargs.pop('browseAlso', False)
check_params_update_kwargs(params_dict, kwargs, 'gap_statistic', print_params=True)
start = time.time()
a = self.do_json_request('2/GapStatistic.json', timeout=timeoutSecs, params=params_dict)
if noPoll:
return a
a = self.poll_url(a, timeoutSecs=timeoutSecs, retryDelaySecs=retryDelaySecs, benchmarkLogging=benchmarkLogging,
initialDelaySecs=initialDelaySecs, pollTimeoutSecs=pollTimeoutSecs)
verboseprint("\ngap_statistic result:", dump_json(a))
a['python_elapsed'] = time.time() - start
a['python_%timeout'] = a['python_elapsed'] * 100 / timeoutSecs
return a
def speedrf(self, data_key, ntrees=50, max_depth=20, timeoutSecs=300,
retryDelaySecs=1.0, initialDelaySecs=None, pollTimeoutSecs=180,
noise=None, benchmarkLogging=None, noPoll=False,
print_params=True, noPrint=False, **kwargs):
params_dict = {
'balance_classes': None,
'classification': 1,
'cols': None,
'destination_key': None,
'ignored_cols': None,
'ignored_cols_by_name': None,
'importance': 0,
'keep_cross_validation_splits': None,
'max_after_balance_size': None,
'max_depth': max_depth,
'mtries': -1.0,
'nbins': 1024.0,
'n_folds': None,
'ntrees': ntrees,
'oobee': 0,
'response': None,
'sample_rate': 0.67,
'sampling_strategy': 'RANDOM',
'score_pojo': None, # create the score pojo
'seed': -1.0,
'select_stat_type': 'ENTROPY', # GINI
'source': data_key,
'validation': None,
'verbose': None,
}
check_params_update_kwargs(params_dict, kwargs, 'SpeeDRF', print_params)
if print_params:
print "\n%s parameters:" % "SpeeDRF", params_dict
sys.stdout.flush()
rf = self.do_json_request('2/SpeeDRF.json', timeout=timeoutSecs, params=params_dict)
print "\n%s result:" % "SpeeDRF", dump_json(rf)
if noPoll:
print "Not polling SpeeDRF"
return rf
time.sleep(2)
rfView = self.poll_url(rf, timeoutSecs=timeoutSecs, retryDelaySecs=retryDelaySecs,
initialDelaySecs=initialDelaySecs, pollTimeoutSecs=pollTimeoutSecs,
noise=noise, benchmarkLogging=benchmarkLogging, noPrint=noPrint)
return rfView
# note ntree in kwargs can overwrite trees! (trees is legacy param)
def random_forest(self, data_key, trees=None,
timeoutSecs=300, retryDelaySecs=1.0, initialDelaySecs=None, pollTimeoutSecs=180,
noise=None, benchmarkLogging=None, noPoll=False, rfView=True,
print_params=True, noPrint=False, **kwargs):
print "at top of random_forest, timeoutSec: ", timeoutSecs
algo = '2/DRF'
algoView = '2/DRFView'
params_dict = {
# 'model': None,
'balance_classes': None,
'build_tree_one_node': None,
'classification': 1,
'cols': None,
'destination_key': None,
'ignored_cols': None,
'ignored_cols_by_name': None,
'importance': 1, # enable variable importance by default
'max_after_balance_size': None,
'max_depth': None,
'min_rows': None, # how many rows in leaves for stopping condition
'mtries': None,
'nbins': None,
'ntrees': trees,
'n_folds': None,
'response': None,
'sample_rate': None,
'score_each_iteration': None,
'seed': None,
'source': data_key,
'validation': None,
}
if 'model_key' in kwargs:
kwargs['destination_key'] = kwargs['model_key'] # hmm..should we switch test to new param?
browseAlso = kwargs.pop('browseAlso', False)
check_params_update_kwargs(params_dict, kwargs, 'random_forest', print_params)
# on v2, there is no default response. So if it's none, we should use the last column, for compatibility
inspect = h2o_cmd.runInspect(key=data_key)
# response only takes names. can't use col index..have to look it up
# or add last col
# mnist can be col 0 for response!
if ('response' not in params_dict) or (params_dict['response'] is None):
params_dict['response'] = str(inspect['cols'][-1]['name'])
elif isinstance(params_dict['response'], int):
params_dict['response'] = str(inspect['cols'][params_dict['response']]['name'])
if print_params:
print "\n%s parameters:" % algo, params_dict
sys.stdout.flush()
# always follow thru to rfview?
rf = self.do_json_request(algo + '.json', timeout=timeoutSecs, params=params_dict)
print "\n%s result:" % algo, dump_json(rf)
# noPoll and rfView=False are similar?
if (noPoll or not rfView):
# just return for now
print "no rfView:", rfView, "noPoll", noPoll
return rf
# since we don't know the model key from the rf response, we just let rf redirect us to completion
# if we want to do noPoll, we have to name the model, so we know what to ask for when we do the completion view
# HACK: wait more for first poll?
time.sleep(5)
rfView = self.poll_url(rf, timeoutSecs=timeoutSecs, retryDelaySecs=retryDelaySecs,
initialDelaySecs=initialDelaySecs, pollTimeoutSecs=pollTimeoutSecs,
noise=noise, benchmarkLogging=benchmarkLogging, noPrint=noPrint)
return rfView
def random_forest_view(self, data_key=None, model_key=None, timeoutSecs=300,
retryDelaySecs=0.2, initialDelaySecs=None, pollTimeoutSecs=180,
noise=None, benchmarkLogging=None, print_params=False, noPoll=False,
noPrint=False, **kwargs):
print "random_forest_view not supported in H2O fvec yet. hacking done response"
r = {'response': {'status': 'done'}, 'trees': {'number_built': 0}}
# return r
algo = '2/DRFModelView'
# No such thing as 2/DRFScore2
algoScore = '2/DRFScore2'
# is response_variable needed here? it shouldn't be
# do_json_request will ignore any that remain = None
params_dict = {
'_modelKey': model_key,
}
browseAlso = kwargs.pop('browseAlso', False)
# only update params_dict..don't add
# throw away anything else as it should come from the model (propagating what RF used)
for k in kwargs:
if k in params_dict:
params_dict[k] = kwargs[k]
if print_params:
print "\n%s parameters:" % algo, params_dict
sys.stdout.flush()
whichUsed = algo
# for drf2, you can't pass a new dataset here, compared to what you trained with.
# should complain or something if tried with a data_key
if data_key:
print "Can't pass a new data_key to random_forest_view for v2's DRFModelView. Not using"
a = self.do_json_request(whichUsed + ".json", timeout=timeoutSecs, params=params_dict)
verboseprint("\n%s result:" % whichUsed, dump_json(a))
if noPoll:
return a
# add a fake redirect_request and redirect_request_args
# to the RF response, to make it look like everyone else
rfView = self.poll_url(a, timeoutSecs=timeoutSecs, retryDelaySecs=retryDelaySecs,
initialDelaySecs=initialDelaySecs, pollTimeoutSecs=pollTimeoutSecs,
noPrint=noPrint, noise=noise, benchmarkLogging=benchmarkLogging)
drf_model = rfView['drf_model']
numberBuilt = drf_model['N']
# want to double check all this because it's new
# and we had problems with races/doneness before
errorInResponse = False
# numberBuilt<0 or ntree<0 or numberBuilt>ntree or \
# ntree!=rfView['ntree']
if errorInResponse:
raise Exception("\nBad values in %s.json\n" % whichUsed +
"progress: %s, progressTotal: %s, ntree: %s, numberBuilt: %s, status: %s" % \
(progress, progressTotal, ntree, numberBuilt, status))
if (browseAlso | h2o_args.browse_json):
h2b.browseJsonHistoryAsUrlLastMatch(whichUsed)
return rfView
def set_column_names(self, timeoutSecs=300, print_params=False, **kwargs):
params_dict = {
'copy_from': None,
'source': None,
'cols': None,
'comma_separated_list': None,
}
check_params_update_kwargs(params_dict, kwargs, 'set_column_names', print_params)
a = self.do_json_request('2/SetColumnNames2.json', timeout=timeoutSecs, params=params_dict)
verboseprint("\nset_column_names result:", dump_json(a))
return a
def quantiles(self, timeoutSecs=300, print_params=True, **kwargs):
params_dict = {
'source_key': None,
'column': None,
'quantile': None,
'max_qbins': None,
'interpolation_type': None,
'multiple_pass': None,
}
check_params_update_kwargs(params_dict, kwargs, 'quantiles', print_params)
a = self.do_json_request('2/QuantilesPage.json', timeout=timeoutSecs, params=params_dict)
verboseprint("\nquantiles result:", dump_json(a))
return a
def anomaly(self, timeoutSecs=300, retryDelaySecs=1, initialDelaySecs=5, pollTimeoutSecs=30,
noPoll=False, print_params=True, benchmarkLogging=None, **kwargs):
params_dict = {
'destination_key': None,
'source': None,
'dl_autoencoder_model': None,
'thresh': -1,
}
check_params_update_kwargs(params_dict, kwargs, 'anomaly', print_params)
a = self.do_json_request('2/Anomaly.json', timeout=timeoutSecs, params=params_dict)
if noPoll:
return a
a = self.poll_url(a, timeoutSecs=timeoutSecs, retryDelaySecs=retryDelaySecs, benchmarkLogging=benchmarkLogging,
initialDelaySecs=initialDelaySecs, pollTimeoutSecs=pollTimeoutSecs)
verboseprint("\nanomaly result:", dump_json(a))
return a
def deep_features(self, timeoutSecs=300, retryDelaySecs=1, initialDelaySecs=5, pollTimeoutSecs=30,
noPoll=False, print_params=True, benchmarkLogging=None, **kwargs):
params_dict = {
'destination_key': None,
'source': None,
'dl_model': None,
'layer': -1,
}
check_params_update_kwargs(params_dict, kwargs, 'deep_features', print_params)
a = self.do_json_request('2/DeepFeatures.json', timeout=timeoutSecs, params=params_dict)
if noPoll:
return a
a = self.poll_url(a, timeoutSecs=timeoutSecs, retryDelaySecs=retryDelaySecs, benchmarkLogging=benchmarkLogging,
initialDelaySecs=initialDelaySecs, pollTimeoutSecs=pollTimeoutSecs)
verboseprint("\ndeep_features result:", dump_json(a))
return a
def naive_bayes(self, timeoutSecs=300, retryDelaySecs=1, initialDelaySecs=5, pollTimeoutSecs=30,
noPoll=False, print_params=True, benchmarkLogging=None, **kwargs):
params_dict = {
'destination_key': None,
'source': None,
'response': None,
'cols': None,
'ignored_cols': None,
'ignored_cols_by_name': None,
'laplace': None,
'drop_na_cols': None,
'min_std_dev': None,
}
check_params_update_kwargs(params_dict, kwargs, 'naive_bayes', print_params)
a = self.do_json_request('2/NaiveBayes.json', timeout=timeoutSecs, params=params_dict)
if noPoll:
return a
a = self.poll_url(a, timeoutSecs=timeoutSecs, retryDelaySecs=retryDelaySecs, benchmarkLogging=benchmarkLogging,
initialDelaySecs=initialDelaySecs, pollTimeoutSecs=pollTimeoutSecs)
verboseprint("\nnaive_bayes result:", dump_json(a))
return a
def anomaly(self, timeoutSecs=300, retryDelaySecs=1, initialDelaySecs=5, pollTimeoutSecs=30,
noPoll=False, print_params=True, benchmarkLogging=None, **kwargs):
params_dict = {
'destination_key': None,
'source': None,
'dl_autoencoder_model': None,
'thresh': None,
}
check_params_update_kwargs(params_dict, kwargs, 'anomaly', print_params)
start = time.time()
a = self.do_json_request('2/Anomaly.json', timeout=timeoutSecs, params=params_dict)
if noPoll:
return a
a = self.poll_url(a, timeoutSecs=timeoutSecs, retryDelaySecs=retryDelaySecs, benchmarkLogging=benchmarkLogging,
initialDelaySecs=initialDelaySecs, pollTimeoutSecs=pollTimeoutSecs)
verboseprint("\nanomaly :result:", dump_json(a))
a['python_elapsed'] = time.time() - start
a['python_%timeout'] = a['python_elapsed'] * 100 / timeoutSecs
return a
def gbm_view(self, model_key, timeoutSecs=300, print_params=False, **kwargs):
params_dict = {
'_modelKey': model_key,
}
# only lets these params thru
check_params_update_kwargs(params_dict, kwargs, 'gbm_view', print_params)
a = self.do_json_request('2/GBMModelView.json', timeout=timeoutSecs, params=params_dict)
verboseprint("\ngbm_view result:", dump_json(a))
return a
def gbm_grid_view(self, timeoutSecs=300, print_params=False, **kwargs):
params_dict = {
'job_key': None,
'destination_key': None,
}
# only lets these params thru
check_params_update_kwargs(params_dict, kwargs, 'gbm_grid_view', print_params)
a = self.do_json_request('2/GridSearchProgress.json', timeout=timeoutSecs, params=params_dict)
print "\ngbm_grid_view result:", dump_json(a)
return a
def speedrf_view(self, modelKey, timeoutSecs=300, print_params=False, **kwargs):
params_dict = { '_modelKey': modelKey, }
check_params_update_kwargs(params_dict, kwargs, 'speedrf_view', print_params)
a = self.do_json_request('2/SpeeDRFModelView.json', timeout=timeoutSecs, params=params_dict)
verboseprint("\nspeedrf_view_result:", dump_json(a))
return a
def speedrf_grid_view(self, timeoutSecs=300, print_params=False, **kwargs):
params_dict = {
'job_key': None,
'destination_key': None,
}
# only lets these params thru
check_params_update_kwargs(params_dict, kwargs, 'speedrf_grid_view', print_params)
a = self.do_json_request('2/GridSearchProgress.json', timeout=timeoutSecs, params=params_dict)
print "\nspeedrf_grid_view result:", dump_json(a)
return a
def pca_view(self, modelKey, timeoutSecs=300, print_params=False, **kwargs):
#this function is only for pca on fvec! may replace in future.
params_dict = {
'_modelKey': modelKey,
}
check_params_update_kwargs(params_dict, kwargs, 'pca_view', print_params)
a = self.do_json_request('2/PCAModelView.json', timeout=timeoutSecs, params=params_dict)
verboseprint("\npca_view_result:", dump_json(a))
return a
def glm_grid_view(self, timeoutSecs=300, print_params=False, **kwargs):
#this function is only for glm2, may remove it in future.
params_dict = {
'grid_key': None,
}
check_params_update_kwargs(params_dict, kwargs, 'glm_grid_view', print_params)
a = self.do_json_request('2/GLMGridView.json', timeout=timeoutSecs, params=params_dict)
verboseprint("\nglm_grid_view result:", dump_json(a))
return a
def glm_view(self, modelKey=None, timeoutSecs=300, print_params=False, **kwargs):
#this function is only for glm2, may remove it in future.
params_dict = {
'_modelKey': modelKey,
}
check_params_update_kwargs(params_dict, kwargs, 'glm_view', print_params)
a = self.do_json_request('2/GLMModelView.json', timeout=timeoutSecs, params=params_dict)
verboseprint("\nglm_view result:", dump_json(a))
return a
def save_model(self, timeoutSecs=300, print_params=False, **kwargs):
#this function is only for glm2, may remove it in future.
params_dict = {
'model': None,
'path': None,
'force': None,
}
check_params_update_kwargs(params_dict, kwargs, 'save_model', print_params)
a = self.do_json_request('2/SaveModel.json', timeout=timeoutSecs, params=params_dict)
verboseprint("\nsave_model result:", dump_json(a))
return a
def load_model(self, timeoutSecs=300, print_params=False, **kwargs):
params_dict = {
'path': None,
}
check_params_update_kwargs(params_dict, kwargs, 'load_model', print_params)
a = self.do_json_request('2/LoadModel.json', timeout=timeoutSecs, params=params_dict)
verboseprint("\nload_model result:", dump_json(a))
return a
def generate_predictions(self, data_key, model_key, destination_key=None, timeoutSecs=300, print_params=True,
**kwargs):
algo = '2/Predict'
algoView = '2/Inspect2'
params_dict = {
'data': data_key,
'model': model_key,
# 'prediction_key': destination_key,
'prediction': destination_key,
}
browseAlso = kwargs.pop('browseAlso', False)
# only lets these params thru
check_params_update_kwargs(params_dict, kwargs, 'generate_predictions', print_params)
if print_params:
print "\n%s parameters:" % algo, params_dict
sys.stdout.flush()
a = self.do_json_request(
algo + '.json',
timeout=timeoutSecs,
params=params_dict)
verboseprint("\n%s result:" % algo, dump_json(a))
if (browseAlso | h2o_args.browse_json):
h2b.browseJsonHistoryAsUrlLastMatch(algo)
return a
def predict_confusion_matrix(self, timeoutSecs=300, print_params=True, **kwargs):
params_dict = {
'actual': None,
'vactual': 'predict',
'predict': None,
'vpredict': 'predict',
}
# everyone should move to using this, and a full list in params_dict
# only lets these params thru
check_params_update_kwargs(params_dict, kwargs, 'predict_confusion_matrix', print_params)
a = self.do_json_request('2/ConfusionMatrix.json', timeout=timeoutSecs, params=params_dict)
verboseprint("\nprediction_confusion_matrix result:", dump_json(a))
return a
def hit_ratio(self, timeoutSecs=300, print_params=True, **kwargs):
params_dict = {
'actual': None,
'vactual': 'predict',
'predict': None,
'max_k': seed,
'make_k': 'None',
}
check_params_update_kwargs(params_dict, kwargs, 'auc', print_params)
a = self.do_json_request('2/HitRatio.json', timeout=timeoutSecs, params=params_dict)
verboseprint("\nhit_ratio result:", dump_json(a))
return a
def generate_auc(self, timeoutSecs=300, print_params=True, **kwargs):
params_dict = {
'thresholds': None,
'actual': None,
'vactual': 'predict',
'predict': None,
'vpredict': 'predict',
}
check_params_update_kwargs(params_dict, kwargs, 'auc', print_params)
a = self.do_json_request('2/AUC.json', timeout=timeoutSecs, params=params_dict)
verboseprint("\nauc result:", dump_json(a))
return a
def gbm(self, data_key, timeoutSecs=600, retryDelaySecs=1, initialDelaySecs=5, pollTimeoutSecs=30,
noPoll=False, print_params=True, **kwargs):
params_dict = {
'balance_classes': None,
'checkpoint': None,
'classification': None,
'class_sampling_factors': None,
'cols': None,
'destination_key': None,
'distribution': None, # multinomial is a choice
'family': None, # can be 'bernoulli' or 'AUTO'
'grid_parallelism': None,
'group_split': None,
'grid_parallelism': None,
'group_split': None, # categoricals
'holdout_fraction': None,
'ignored_cols': None,
'ignored_cols_by_name': None, # either this or cols..not both
'importance': None,
'keep_cross_validation_splits': None,
'learn_rate': None,
'max_depth': None,
'max_after_balance_size': None,
'min_rows': None,
'nbins': None,
'ntrees': None,
'overwrite_checkpoint': None,
'response': None,
'score_each_iteration': None,
'seed': None,
'source': data_key,
'validation': None,
}
# only lets these params thru
check_params_update_kwargs(params_dict, kwargs, 'gbm', print_params)
if 'validation' not in kwargs:
kwargs['validation'] = data_key
start = time.time()
a = self.do_json_request('2/GBM.json', timeout=timeoutSecs, params=params_dict)
if noPoll:
a['python_elapsed'] = time.time() - start
a['python_%timeout'] = a['python_elapsed'] * 100 / timeoutSecs
return a
verboseprint("\nGBM first result:", dump_json(a))
a = self.poll_url(a, timeoutSecs=timeoutSecs, retryDelaySecs=retryDelaySecs,
initialDelaySecs=initialDelaySecs, pollTimeoutSecs=pollTimeoutSecs)
verboseprint("\nGBM result:", dump_json(a))
a['python_elapsed'] = time.time() - start
a['python_%timeout'] = a['python_elapsed'] * 100 / timeoutSecs
return a
def pca(self, data_key, timeoutSecs=600, retryDelaySecs=1, initialDelaySecs=5, pollTimeoutSecs=30,
noPoll=False, print_params=True, benchmarkLogging=None, returnFast=False, **kwargs):
params_dict = {
'destination_key': None,
'source': data_key,
'cols': None,
'ignored_cols': None,
'ignored_col_names': None,
'tolerance': None,
'max_pc': None,
'standardize': None,
}
# only lets these params thru
check_params_update_kwargs(params_dict, kwargs, 'pca', print_params)
start = time.time()
a = self.do_json_request('2/PCA.json', timeout=timeoutSecs, params=params_dict, returnFast=returnFast)
if noPoll:
#a['python_elapsed'] = time.time() - start
#a['python_%timeout'] = a['python_elapsed']*100 / timeoutSecs
return a
a = self.poll_url(a, timeoutSecs=timeoutSecs, retryDelaySecs=retryDelaySecs, benchmarkLogging=benchmarkLogging,
initialDelaySecs=initialDelaySecs, pollTimeoutSecs=pollTimeoutSecs)
verboseprint("\nPCA result:", dump_json(a))
a['python_elapsed'] = time.time() - start
a['python_%timeout'] = a['python_elapsed'] * 100 / timeoutSecs
return a
def pca_score(self, timeoutSecs=600, retryDelaySecs=1, initialDelaySecs=5, pollTimeoutSecs=30,
noPoll=False, print_params=True, **kwargs):
params_dict = {
'model': None,
'destination_key': None,
'source': None,
'num_pc': None,
}
# only lets these params thru
check_params_update_kwargs(params_dict, kwargs, 'pca_score', print_params)
start = time.time()
a = self.do_json_request('2/PCAScore.json', timeout=timeoutSecs, params=params_dict)
if noPoll:
a['python_elapsed'] = time.time() - start
a['python_%timeout'] = a['python_elapsed'] * 100 / timeoutSecs
return a
if 'response' not in a:
raise Exception("Can't tell where to go..No 'response' key in this polled json response: %s" % a)
a = self.poll_url(a, timeoutSecs=timeoutSecs, retryDelaySecs=retryDelaySecs,
initialDelaySecs=initialDelaySecs, pollTimeoutSecs=pollTimeoutSecs)
verboseprint("\nPCAScore result:", dump_json(a))
a['python_elapsed'] = time.time() - start
a['python_%timeout'] = a['python_elapsed'] * 100 / timeoutSecs
return a
def neural_net_score(self, key, model, timeoutSecs=60, retryDelaySecs=1, initialDelaySecs=5, pollTimeoutSecs=30,
noPoll=False, print_params=True, **kwargs):
params_dict = {
'source': key,
'destination_key': None,
'model': model,
'cols': None,
'ignored_cols': None,
'ignored_col_name': None,
'classification': None,
'response': None,
'max_rows': 0,
}
# only lets these params thru
check_params_update_kwargs(params_dict, kwargs, 'neural_net_score', print_params)
start = time.time()
a = self.do_json_request('2/NeuralNetScore.json', timeout=timeoutSecs, params=params_dict)
if noPoll:
a['python_elapsed'] = time.time() - start
a['python_%timeout'] = a['python_elapsed'] * 100 / timeoutSecs
return a
# no polling
# a = self.poll_url(a, timeoutSecs=timeoutSecs, retryDelaySecs=retryDelaySecs,
# initialDelaySecs=initialDelaySecs, pollTimeoutSecs=pollTimeoutSecs)
verboseprint("\nneural net score result:", dump_json(a))
a['python_elapsed'] = time.time() - start
a['python_%timeout'] = a['python_elapsed'] * 100 / timeoutSecs
return a
def neural_net(self, data_key, timeoutSecs=60, retryDelaySecs=1, initialDelaySecs=5, pollTimeoutSecs=30,
noPoll=False, print_params=True, **kwargs):
params_dict = {
'destination_key': None,
'source': data_key,
'cols': None,
'ignored_cols': None,
'ignored_cols_by_name': None,
'validation': None,
'classification': None,
'response': None,
'mode': None,
'activation': None,
'input_dropout_ratio': None,
'hidden': None,
'rate': None,
'rate_annealing': None,
'momentum_start': None,
'momentum_ramp': None,
'momentum_stable': None,
'l1': None,
'l2': None,
'seed': None,
'loss': None,
'max_w2': None,
'warmup_samples': None,
'initial_weight_distribution': None,
'initial_weight_scale': None,
'epochs': None,
}
# only lets these params thru
check_params_update_kwargs(params_dict, kwargs, 'neural_net', print_params)
if 'validation' not in kwargs:
kwargs['validation'] = data_key
start = time.time()
a = self.do_json_request('2/NeuralNet.json', timeout=timeoutSecs, params=params_dict)
if noPoll:
a['python_elapsed'] = time.time() - start
a['python_%timeout'] = a['python_elapsed'] * 100 / timeoutSecs
return a
a = self.poll_url(a, timeoutSecs=timeoutSecs, retryDelaySecs=retryDelaySecs,
initialDelaySecs=initialDelaySecs, pollTimeoutSecs=pollTimeoutSecs)
verboseprint("\nneural_net result:", dump_json(a))
a['python_elapsed'] = time.time() - start
a['python_%timeout'] = a['python_elapsed'] * 100 / timeoutSecs
return a
def deep_learning(self, data_key, timeoutSecs=60, retryDelaySecs=1, initialDelaySecs=5, pollTimeoutSecs=30,
noPoll=False, print_params=True, **kwargs):
params_dict = {
'autoencoder': None,
'destination_key': None,
'source': data_key,
'cols': None,
'ignored_cols': None,
'ignored_cols_by_name': None,
'validation': None,
'classification': None,
'response': None,
'expert_mode': None,
'activation': None,
'hidden': None,
'epochs': None,
'train_samples_per_iteration': None,
'seed': None,
'adaptive_rate': None,
'rho': None,
'epsilon': None,
'rate': None,
'rate_annealing': None,
'rate_decay': None,
'momentum_start': None,
'momentum_ramp': None,
'momentum_stable': None,
'nesterov_accelerated_gradient': None,
'input_dropout_ratio': None,
'hidden_dropout_ratios': None,
'l1': None,
'l2': None,
'max_w2': None,
'initial_weight_distribution': None,
'initial_weight_scale': None,
'loss': None,
'score_interval': None,
'score_training_samples': None,
'score_validation_samples': None,
'score_duty_cycle': None,
'classification_stop': None,
'regression_stop': None,
'quiet_mode': None,
'max_confusion_matrix_size': None,
'max_hit_ratio_k': None,
'balance_classes': None,
'max_after_balance_size': None,
'score_validation_sampling': None,
'diagnostics': None,
'variable_importances': None,
'fast_mode': None,
'ignore_const_cols': None,
'force_load_balance': None,
'replicate_training_data': None,
'single_node_mode': None,
'shuffle_training_data': None,
'n_folds': None,
}
# only lets these params thru
check_params_update_kwargs(params_dict, kwargs, 'deep_learning', print_params)
if 'validation' not in kwargs:
kwargs['validation'] = data_key
start = time.time()
a = self.do_json_request('2/DeepLearning.json', timeout=timeoutSecs, params=params_dict)
if noPoll:
a['python_elapsed'] = time.time() - start
a['python_%timeout'] = a['python_elapsed'] * 100 / timeoutSecs
return a
a = self.poll_url(a, timeoutSecs=timeoutSecs, retryDelaySecs=retryDelaySecs,
initialDelaySecs=initialDelaySecs, pollTimeoutSecs=pollTimeoutSecs)
verboseprint("\nneural_net result:", dump_json(a))
a['python_elapsed'] = time.time() - start
a['python_%timeout'] = a['python_elapsed'] * 100 / timeoutSecs
return a
def neural_view(self, model_key, timeoutSecs=300, print_params=False, **kwargs):
params_dict = {
'destination_key': model_key,
}
# only lets these params thru
check_params_update_kwargs(params_dict, kwargs, 'nn_view', print_params)
a = self.do_json_request('2/NeuralNetProgress.json', timeout=timeoutSecs, params=params_dict)
verboseprint("\nneural_view result:", dump_json(a))
return a
def summary_page(self, key, timeoutSecs=60, noPrint=True, useVA=False, numRows=None, numCols=None, **kwargs):
params_dict = {
'source': key,
'cols': None, # is this zero based like everything else?
'max_ncols': 1000 if not numCols else numCols,
'max_qbins': None,
}
browseAlso = kwargs.pop('browseAlso', False)
check_params_update_kwargs(params_dict, kwargs, 'summary_page', print_params=True)
a = self.do_json_request('2/SummaryPage2.json', timeout=timeoutSecs, params=params_dict)
h2o_cmd.infoFromSummary(a, noPrint=noPrint, numRows=numRows, numCols=numCols)
return a
def log_view(self, timeoutSecs=10, **kwargs):
browseAlso = kwargs.pop('browseAlso', False)
a = self.do_json_request('LogView.json', timeout=timeoutSecs)
verboseprint("\nlog_view result:", dump_json(a))
if (browseAlso | h2o_args.browse_json):
h2b.browseJsonHistoryAsUrlLastMatch("LogView")
time.sleep(3) # to be able to see it
return a
def csv_download(self, src_key, csvPathname, timeoutSecs=60, **kwargs):
# log it
params = {'src_key': src_key}
paramsStr = '?' + '&'.join(['%s=%s' % (k, v) for (k, v) in params.items()])
url = self.url('2/DownloadDataset.json')
log('Start ' + url + paramsStr, comment=csvPathname)
# do it (absorb in 1024 byte chunks)
r = requests.get(url, params=params, timeout=timeoutSecs)
print "csv_download r.headers:", r.headers
if r.status_code == 200:
f = open(csvPathname, 'wb')
for chunk in r.iter_content(1024):
f.write(chunk)
print csvPathname, "size:", h2o_util.file_size_formatted(csvPathname)
# shouldn't need params
def log_download(self, logDir=None, timeoutSecs=30, **kwargs):
if logDir == None:
logDir = get_sandbox_name()
url = self.url('LogDownload.json')
log('Start ' + url);
print "\nDownloading h2o log(s) using:", url
r = requests.get(url, timeout=timeoutSecs, **kwargs)
if not r or not r.ok:
raise Exception("Maybe bad url? no r in log_download %s in %s:" % inspect.stack()[1][3])
z = zipfile.ZipFile(StringIO.StringIO(r.content))
print "z.namelist:", z.namelist()
print "z.printdir:", z.printdir()
nameList = z.namelist()
# the first is the h2ologs dir name.
h2oLogDir = logDir + "/" + nameList.pop(0)
print "h2oLogDir:", h2oLogDir
print "logDir:", logDir
# it's a zip of zipped files
# first unzip it
z = zipfile.ZipFile(StringIO.StringIO(r.content))
z.extractall(logDir)
# unzipped file should be in LOG_DIR now
# now unzip the files in that directory
for zname in nameList:
resultList = h2o_util.flat_unzip(logDir + "/" + zname, logDir)
print "\nlogDir:", logDir
for logfile in resultList:
numLines = sum(1 for line in open(logfile))
print logfile, "Lines:", numLines
print
return resultList
# kwargs used to pass many params
def GLM_shared(self, key,
timeoutSecs=300, retryDelaySecs=0.5, initialDelaySecs=None, pollTimeoutSecs=180,
parentName=None, **kwargs):
browseAlso = kwargs.pop('browseAlso', False)
params_dict = {
'alpha': None,
'beta_epsilon': None, # GLMGrid doesn't use this name
'beta_constraints': None,
'cols': None,
'destination_key': None,
'disable_line_search': None,
'family': None,
'intercept': None, # use intercept in the model
'higher_accuracy': None, # use line search (use if no convergence otherwise)
'ignored_cols': None,
'ignored_cols_by_name': None,
'lambda': None,
'lambda_min_ratio': None, # min lambda used in lambda search, ratio of lambda_max
'lambda_search': None, # use lambda search, start at lambda max. lambda is used as lambda min
'link': None,
'max_iter': None,
'max_predictors': None, # lambda_search stop condition. Stop when more than this # of predictors.
'n_folds': None,
'nlambdas': None, # number of lambdas to be used in a search
'non_negative': None, # require coefficients to be non-negative
'prior': None, # prior probability for y=1. For logistic, if the data is sampled and mean is skewed
'response': None,
'source': key,
'standardize': None,
'strong_rules': None, # use strong rules to filter out inactive columns
'tweedie_variance_power': None,
'use_all_factor_levels': None, # normally first factor is skipped. Set to use all levels.
'variable_importances': None, # if use_all_factor_levels is off, base level is not shown
}
check_params_update_kwargs(params_dict, kwargs, parentName, print_params=True)
a = self.do_json_request(parentName + '.json', timeout=timeoutSecs, params=params_dict)
verboseprint(parentName, dump_json(a))
return a
def GLM(self, key,
timeoutSecs=300, retryDelaySecs=0.5, initialDelaySecs=None, pollTimeoutSecs=180,
noise=None, benchmarkLogging=None, noPoll=False, destination_key=None, **kwargs):
parentName = "2/GLM2"
a = self.GLM_shared(key, timeoutSecs, retryDelaySecs, initialDelaySecs, parentName=parentName,
destination_key=destination_key, **kwargs)
# Check that the response has the right Progress url it's going to steer us to.
if noPoll:
return a
a = self.poll_url(a, timeoutSecs=timeoutSecs, retryDelaySecs=retryDelaySecs,
initialDelaySecs=initialDelaySecs, pollTimeoutSecs=pollTimeoutSecs,
noise=noise, benchmarkLogging=benchmarkLogging)
verboseprint("GLM done:", dump_json(a))
browseAlso = kwargs.get('browseAlso', False)
if (browseAlso | h2o_args.browse_json):
print "Viewing the GLM result through the browser"
h2b.browseJsonHistoryAsUrlLastMatch('GLMProgressPage')
time.sleep(5)
return a
def GLMGrid_view(self, timeoutSecs=300, print_params=False, **kwargs):
params_dict = {
'job': None,
'destination_key': None,
}
# only lets these params thru
check_params_update_kwargs(params_dict, kwargs, 'GLMGridProgress', print_params)
a = self.do_json_request('GLMGridProgress.json', timeout=timeoutSecs, params=params_dict)
print "\nGLMGridProgress result:", dump_json(a)
return a
# GLMScore params
# model_key=__GLMModel_7a3a73c1-f272-4a2e-b37f-d2f371d304ba&
# key=cuse.hex&
# thresholds=0%3A1%3A0.01
def GLMScore(self, key, model_key, timeoutSecs=100, **kwargs):
# this isn't in fvec?
browseAlso = kwargs.pop('browseAlso', False)
# i guess key and model_key could be in kwargs, but
# maybe separate is more consistent with the core key behavior
# elsewhere
params_dict = {
'key': key,
'model_key': model_key,
}
params_dict.update(kwargs)
print "\nGLMScore params list:", params_dict
a = self.do_json_request('GLMScore.json', timeout=timeoutSecs, params=params_dict)
verboseprint("GLMScore:", dump_json(a))
browseAlso = kwargs.get('browseAlso', False)
if (browseAlso | h2o_args.browse_json):
print "Redoing the GLMScore through the browser, no results saved though"
h2b.browseJsonHistoryAsUrlLastMatch('GLMScore')
time.sleep(5)
return a
def models(self, timeoutSecs=10, **kwargs):
params_dict = {
'key': None,
'find_compatible_frames': 0,
'score_frame': None
}
check_params_update_kwargs(params_dict, kwargs, 'models', True)
result = self.do_json_request('2/Models', timeout=timeoutSecs, params=params_dict)
return result
def frames(self, timeoutSecs=10, **kwargs):
params_dict = {
'key': None,
'find_compatible_models': 0,
'score_model': None
}
check_params_update_kwargs(params_dict, kwargs, 'frames', True)
result = self.do_json_request('2/Frames', timeout=timeoutSecs, params=params_dict)
return result
#FIX! just here temporarily to get the response at the end of an algo, from job/destination_key
def completion_redirect(self, jsonRequest, params):
return self.do_json_request(jsonRequest=jsonRequest, params=params)
#******************************************************************************************8
# attach methods to H2O object
# this happens before any H2O instances are created
# this file is imported into h2o
H2O.anomaly = anomaly
H2O.completion_redirect = completion_redirect
H2O.create_frame = create_frame
H2O.csv_download = csv_download
H2O.deep_features = deep_features
H2O.deep_learning = deep_learning
H2O.exec_query = exec_query
H2O.export_files = export_files
H2O.export_hdfs = export_hdfs
H2O.export_s3 = export_s3
H2O.frames = frames
H2O.frame_split = frame_split
H2O.gap_statistic = gap_statistic
H2O.gbm = gbm
H2O.gbm_grid_view = gbm_grid_view
H2O.gbm_view = gbm_view
H2O.generate_auc = generate_auc
H2O.generate_predictions = generate_predictions
H2O.get_cloud = get_cloud
H2O.get_timeline = get_timeline
H2O.GLM = GLM
H2O.glm_grid_view = glm_grid_view
H2O.GLMGrid_view = GLMGrid_view
H2O.GLMScore = GLMScore
H2O.GLM_shared = GLM_shared
H2O.glm_view = glm_view
H2O.h2o_log_msg = h2o_log_msg
H2O.hit_ratio = hit_ratio
H2O.import_files = import_files
H2O.impute = impute
H2O.insert_missing_values = insert_missing_values
H2O.inspect = inspect
H2O.iostatus = iostatus
H2O.jobs_admin = jobs_admin
H2O.jobs_cancel = jobs_cancel
H2O.jprofile = jprofile
H2O.jstack = jstack
H2O.kmeans = kmeans
H2O.kmeans_view = kmeans_view
H2O.levels = levels
H2O.linux_info = linux_info
H2O.load_model = load_model
H2O.log_download = log_download
H2O.log_view = log_view
H2O.models = models
H2O.naive_bayes = naive_bayes
H2O.netstat = netstat
H2O.network_test = network_test
H2O.neural_net = neural_net
H2O.neural_net_score = neural_net_score
H2O.neural_view = neural_view
H2O.nfold_frame_extract = nfold_frame_extract
H2O.one_hot = one_hot
H2O.parse = parse
H2O.pca = pca
H2O.pca_score = pca_score
H2O.pca_view = pca_view
H2O.poll_url = poll_url
H2O.predict_confusion_matrix = predict_confusion_matrix
H2O.put_file = put_file
H2O.put_value = put_value
H2O.quantiles = quantiles
H2O.random_forest = random_forest
H2O.random_forest_view = random_forest_view
H2O.rebalance = rebalance
H2O.remove_all_keys = remove_all_keys
H2O.remove_key = remove_key
H2O.save_model = save_model
H2O.set_column_names = set_column_names
H2O.shutdown_all = shutdown_all
H2O.speedrf = speedrf
H2O.speedrf_grid_view = speedrf_grid_view
H2O.speedrf_view = speedrf_view
H2O.store_view = store_view
H2O.summary_page = summary_page
H2O.to_enum = to_enum
H2O.to_int = to_int
H2O.unlock = unlock
| apache-2.0 |
eyaler/tensorpack | examples/ResNet/cifar10-preact18-mixup.py | 1 | 5919 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: cifar10-preact18-mixup.py
# Author: Tao Hu <[email protected]>, Yauheni Selivonchyk <[email protected]>
import numpy as np
import argparse
import os
import tensorflow as tf
from tensorpack import *
from tensorpack.tfutils.summary import *
from tensorpack.dataflow import dataset
BATCH_SIZE = 128
CLASS_NUM = 10
LR_SCHEDULE = [(0, 0.1), (100, 0.01), (150, 0.001)]
WEIGHT_DECAY = 1e-4
FILTER_SIZES = [64, 128, 256, 512]
MODULE_SIZES = [2, 2, 2, 2]
def preactivation_block(input, num_filters, stride=1):
num_filters_in = input.get_shape().as_list()[1]
# residual
net = BNReLU(input)
residual = Conv2D('conv1', net, num_filters, kernel_size=3, strides=stride, use_bias=False, activation=BNReLU)
residual = Conv2D('conv2', residual, num_filters, kernel_size=3, strides=1, use_bias=False)
# identity
shortcut = input
if stride != 1 or num_filters_in != num_filters:
shortcut = Conv2D('shortcut', net, num_filters, kernel_size=1, strides=stride, use_bias=False)
return shortcut + residual
class ResNet_Cifar(ModelDesc):
def inputs(self):
return [tf.placeholder(tf.float32, [None, 32, 32, 3], 'input'),
tf.placeholder(tf.float32, [None, CLASS_NUM], 'label')]
def build_graph(self, image, label):
assert tf.test.is_gpu_available()
MEAN_IMAGE = tf.constant([0.4914, 0.4822, 0.4465], dtype=tf.float32)
STD_IMAGE = tf.constant([0.2023, 0.1994, 0.2010], dtype=tf.float32)
image = ((image / 255.0) - MEAN_IMAGE) / STD_IMAGE
image = tf.transpose(image, [0, 3, 1, 2])
pytorch_default_init = tf.variance_scaling_initializer(scale=1.0 / 3, mode='fan_in', distribution='uniform')
with argscope([Conv2D, BatchNorm, GlobalAvgPooling], data_format='channels_first'), \
argscope(Conv2D, kernel_initializer=pytorch_default_init):
net = Conv2D('conv0', image, 64, kernel_size=3, strides=1, use_bias=False)
for i, blocks_in_module in enumerate(MODULE_SIZES):
for j in range(blocks_in_module):
stride = 2 if j == 0 and i > 0 else 1
with tf.variable_scope("res%d.%d" % (i, j)):
net = preactivation_block(net, FILTER_SIZES[i], stride)
net = GlobalAvgPooling('gap', net)
logits = FullyConnected('linear', net, CLASS_NUM,
kernel_initializer=tf.random_normal_initializer(stddev=1e-3))
ce_cost = tf.nn.softmax_cross_entropy_with_logits(labels=label, logits=logits)
ce_cost = tf.reduce_mean(ce_cost, name='cross_entropy_loss')
single_label = tf.to_int32(tf.argmax(label, axis=1))
wrong = tf.to_float(tf.logical_not(tf.nn.in_top_k(logits, single_label, 1)), name='wrong_vector')
# monitor training error
add_moving_summary(tf.reduce_mean(wrong, name='train_error'), ce_cost)
add_param_summary(('.*/W', ['histogram']))
# weight decay on all W matrixes. including convolutional layers
wd_cost = tf.multiply(WEIGHT_DECAY, regularize_cost('.*', tf.nn.l2_loss), name='wd_cost')
return tf.add_n([ce_cost, wd_cost], name='cost')
def optimizer(self):
lr = tf.get_variable('learning_rate', initializer=0.1, trainable=False)
opt = tf.train.MomentumOptimizer(lr, 0.9)
return opt
def get_data(train_or_test, isMixup, alpha):
isTrain = train_or_test == 'train'
ds = dataset.Cifar10(train_or_test)
if isTrain:
augmentors = [
imgaug.CenterPaste((40, 40)),
imgaug.RandomCrop((32, 32)),
imgaug.Flip(horiz=True),
]
ds = AugmentImageComponent(ds, augmentors)
batch = BATCH_SIZE
ds = BatchData(ds, batch, remainder=not isTrain)
def f(dp):
images, labels = dp
one_hot_labels = np.eye(CLASS_NUM)[labels] # one hot coding
if not isTrain or not isMixup:
return [images, one_hot_labels]
# mixup implementation:
# Note that for larger images, it's more efficient to do mixup on GPUs (i.e. in the graph)
weight = np.random.beta(alpha, alpha, BATCH_SIZE)
x_weight = weight.reshape(BATCH_SIZE, 1, 1, 1)
y_weight = weight.reshape(BATCH_SIZE, 1)
index = np.random.permutation(BATCH_SIZE)
x1, x2 = images, images[index]
x = x1 * x_weight + x2 * (1 - x_weight)
y1, y2 = one_hot_labels, one_hot_labels[index]
y = y1 * y_weight + y2 * (1 - y_weight)
return [x, y]
ds = MapData(ds, f)
return ds
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.')
parser.add_argument('--load', help='load model')
parser.add_argument('--mixup', help='enable mixup', action='store_true')
parser.add_argument('--alpha', default=1, type=float, help='alpha in mixup')
args = parser.parse_args()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
log_folder = 'train_log/cifar10-preact18%s' % ('-mixup' if args.mixup else '')
logger.set_logger_dir(os.path.join(log_folder))
dataset_train = get_data('train', args.mixup, args.alpha)
dataset_test = get_data('test', args.mixup, args.alpha)
config = TrainConfig(
model=ResNet_Cifar(),
data=QueueInput(dataset_train),
callbacks=[
ModelSaver(),
InferenceRunner(dataset_test,
[ScalarStats('cost'), ClassificationError('wrong_vector')]),
ScheduledHyperParamSetter('learning_rate', LR_SCHEDULE)
],
max_epoch=200,
steps_per_epoch=len(dataset_train),
session_init=SaverRestore(args.load) if args.load else None
)
launch_train_with_config(config, SimpleTrainer())
| apache-2.0 |
schets/scikit-learn | examples/applications/plot_tomography_l1_reconstruction.py | 203 | 5442 | """
======================================================================
Compressive sensing: tomography reconstruction with L1 prior (Lasso)
======================================================================
This example shows the reconstruction of an image from a set of parallel
projections, acquired along different angles. Such a dataset is acquired in
**computed tomography** (CT).
Without any prior information on the sample, the number of projections
required to reconstruct the image is of the order of the linear size
``l`` of the image (in pixels). For simplicity we consider here a sparse
image, where only pixels on the boundary of objects have a non-zero
value. Such data could correspond for example to a cellular material.
Note however that most images are sparse in a different basis, such as
the Haar wavelets. Only ``l/7`` projections are acquired, therefore it is
necessary to use prior information available on the sample (its
sparsity): this is an example of **compressive sensing**.
The tomography projection operation is a linear transformation. In
addition to the data-fidelity term corresponding to a linear regression,
we penalize the L1 norm of the image to account for its sparsity. The
resulting optimization problem is called the :ref:`lasso`. We use the
class :class:`sklearn.linear_model.Lasso`, that uses the coordinate descent
algorithm. Importantly, this implementation is more computationally efficient
on a sparse matrix, than the projection operator used here.
The reconstruction with L1 penalization gives a result with zero error
(all pixels are successfully labeled with 0 or 1), even if noise was
added to the projections. In comparison, an L2 penalization
(:class:`sklearn.linear_model.Ridge`) produces a large number of labeling
errors for the pixels. Important artifacts are observed on the
reconstructed image, contrary to the L1 penalization. Note in particular
the circular artifact separating the pixels in the corners, that have
contributed to fewer projections than the central disk.
"""
print(__doc__)
# Author: Emmanuelle Gouillart <[email protected]>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import ndimage
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
import matplotlib.pyplot as plt
def _weights(x, dx=1, orig=0):
x = np.ravel(x)
floor_x = np.floor((x - orig) / dx)
alpha = (x - orig - floor_x * dx) / dx
return np.hstack((floor_x, floor_x + 1)), np.hstack((1 - alpha, alpha))
def _generate_center_coordinates(l_x):
X, Y = np.mgrid[:l_x, :l_x]
center = l_x / 2.
X += 0.5 - center
Y += 0.5 - center
return X, Y
def build_projection_operator(l_x, n_dir):
""" Compute the tomography design matrix.
Parameters
----------
l_x : int
linear size of image array
n_dir : int
number of angles at which projections are acquired.
Returns
-------
p : sparse matrix of shape (n_dir l_x, l_x**2)
"""
X, Y = _generate_center_coordinates(l_x)
angles = np.linspace(0, np.pi, n_dir, endpoint=False)
data_inds, weights, camera_inds = [], [], []
data_unravel_indices = np.arange(l_x ** 2)
data_unravel_indices = np.hstack((data_unravel_indices,
data_unravel_indices))
for i, angle in enumerate(angles):
Xrot = np.cos(angle) * X - np.sin(angle) * Y
inds, w = _weights(Xrot, dx=1, orig=X.min())
mask = np.logical_and(inds >= 0, inds < l_x)
weights += list(w[mask])
camera_inds += list(inds[mask] + i * l_x)
data_inds += list(data_unravel_indices[mask])
proj_operator = sparse.coo_matrix((weights, (camera_inds, data_inds)))
return proj_operator
def generate_synthetic_data():
""" Synthetic binary data """
rs = np.random.RandomState(0)
n_pts = 36.
x, y = np.ogrid[0:l, 0:l]
mask_outer = (x - l / 2) ** 2 + (y - l / 2) ** 2 < (l / 2) ** 2
mask = np.zeros((l, l))
points = l * rs.rand(2, n_pts)
mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
mask = ndimage.gaussian_filter(mask, sigma=l / n_pts)
res = np.logical_and(mask > mask.mean(), mask_outer)
return res - ndimage.binary_erosion(res)
# Generate synthetic images, and projections
l = 128
proj_operator = build_projection_operator(l, l / 7.)
data = generate_synthetic_data()
proj = proj_operator * data.ravel()[:, np.newaxis]
proj += 0.15 * np.random.randn(*proj.shape)
# Reconstruction with L2 (Ridge) penalization
rgr_ridge = Ridge(alpha=0.2)
rgr_ridge.fit(proj_operator, proj.ravel())
rec_l2 = rgr_ridge.coef_.reshape(l, l)
# Reconstruction with L1 (Lasso) penalization
# the best value of alpha was determined using cross validation
# with LassoCV
rgr_lasso = Lasso(alpha=0.001)
rgr_lasso.fit(proj_operator, proj.ravel())
rec_l1 = rgr_lasso.coef_.reshape(l, l)
plt.figure(figsize=(8, 3.3))
plt.subplot(131)
plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
plt.axis('off')
plt.title('original image')
plt.subplot(132)
plt.imshow(rec_l2, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L2 penalization')
plt.axis('off')
plt.subplot(133)
plt.imshow(rec_l1, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L1 penalization')
plt.axis('off')
plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,
right=1)
plt.show()
| bsd-3-clause |
adamlerer/examples | word_language_model/data.py | 9 | 1439 | import os
import torch
class Dictionary(object):
def __init__(self):
self.word2idx = {}
self.idx2word = []
def add_word(self, word):
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
return self.word2idx[word]
def __len__(self):
return len(self.idx2word)
class Corpus(object):
def __init__(self, path):
self.dictionary = Dictionary()
self.train = self.tokenize(os.path.join(path, 'train.txt'))
self.valid = self.tokenize(os.path.join(path, 'valid.txt'))
self.test = self.tokenize(os.path.join(path, 'test.txt'))
def tokenize(self, path):
"""Tokenizes a text file."""
assert os.path.exists(path)
# Add words to the dictionary
with open(path, 'r') as f:
tokens = 0
for line in f:
words = line.split() + ['<eos>']
tokens += len(words)
for word in words:
self.dictionary.add_word(word)
# Tokenize file content
with open(path, 'r') as f:
ids = torch.LongTensor(tokens)
token = 0
for line in f:
words = line.split() + ['<eos>']
for word in words:
ids[token] = self.dictionary.word2idx[word]
token += 1
return ids
| bsd-3-clause |
h2oai/h2o | py/testdir_single_jvm/test_speedrf_mnist_both.py | 9 | 7731 | import unittest
import random, sys, time, re
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i, h2o_glm, h2o_util, h2o_rf
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
# assume we're at 0xdata with it's hdfs namenode
h2o.init(1, java_heap_GB=14)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_RF_mnist_both(self):
importFolderPath = "mnist"
csvFilelist = [
# ("mnist_training.csv.gz", "mnist_testing.csv.gz", 600, 784834182943470027),
("mnist_training.csv.gz", "mnist_testing.csv.gz", 600, None, '*mnist*gz'),
# to see results a 2nd time
("mnist_training.csv.gz", "mnist_testing.csv.gz", 600, None, '*mnist*gz'),
]
# IMPORT**********************************************
# since H2O deletes the source key, we should re-import every iteration if we re-use the src in the list
(importFolderResult, importPattern) = h2i.import_only(bucket='home-0xdiag-datasets', path=importFolderPath + "/*")
### print "importHDFSResult:", h2o.dump_json(importFolderResult)
if 'files' in importFolderResult:
succeededList = importFolderResult['files']
else:
succeededList = importFolderResult['succeeded']
### print "succeededList:", h2o.dump_json(succeededList)
self.assertGreater(len(succeededList),1,"Should see more than 1 files in the import?")
# why does this hang? can't look at storeview after import?
print "\nTrying StoreView after the import folder"
h2o_cmd.runStoreView(timeoutSecs=30)
trial = 0
allDelta = []
for (trainCsvFilename, testCsvFilename, timeoutSecs, rfSeed, parsePattern) in csvFilelist:
trialStart = time.time()
# PARSE test****************************************
testKey2 = testCsvFilename + "_" + str(trial) + ".hex"
start = time.time()
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=importFolderPath+"/"+testCsvFilename,
hex_key=testKey2, timeoutSecs=timeoutSecs)
elapsed = time.time() - start
print "parse end on ", testCsvFilename, 'took', elapsed, 'seconds',\
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
print "parse result:", parseResult['destination_key']
print "We won't use this pruning of x on test data. See if it prunes the same as the training"
y = 0 # first column is pixel value
x = h2o_glm.goodXFromColumnInfo(y, key=parseResult['destination_key'], timeoutSecs=300)
# PARSE train****************************************
print "Use multi-file parse to grab both the mnist_testing.csv.gz and mnist_training.csv.gz for training"
trainKey2 = trainCsvFilename + "_" + str(trial) + ".hex"
start = time.time()
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=importFolderPath+"/"+parsePattern,
hex_key=trainKey2, timeoutSecs=timeoutSecs)
elapsed = time.time() - start
print "parse end on ", trainCsvFilename, 'took', elapsed, 'seconds',\
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
print "parse result:", parseResult['destination_key']
# RF+RFView (train)****************************************
# print "This is the 'ignore=' we'll use"
# no longer use. depend on h2o to get it right.
ntree = 25
params = {
'response': 0,
'ntrees': ntree,
# 'data_key='mnist_training.csv.hex'
'mtries': 28, # fix because we ignore some cols, which will change the srt(cols) calc?
'max_depth': 2147483647,
'select_stat_type': 'ENTROPY',
'sampling_strategy': 'RANDOM',
'sample_rate': 0.67,
'oobee': 1,
# 'model_key': '__RFModel_7055e6cf-a0de-44db-b165-f5994730ac77',
'destination_key': 'RF_model',
'nbins': 1024,
# 'seed': 784834182943470027,
# 'class_weights': '0=1.0,1=1.0,2=1.0,3=1.0,4=1.0,5=1.0,6=1.0,7=1.0,8=1.0,9=1.0',
}
if rfSeed is None:
params['seed'] = random.randint(0,sys.maxint)
else:
params['seed'] = rfSeed
print "RF seed:", params['seed']
kwargs = params.copy()
print "Trying rf"
timeoutSecs = 1800
start = time.time()
rfView = h2o_cmd.runSpeeDRF(parseResult=parseResult,
timeoutSecs=timeoutSecs, pollTimeoutSecs=180, retryDelaySecs=2, **kwargs)
elapsed = time.time() - start
print "RF completed in", elapsed, "seconds.", \
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
# RFView (score on test)****************************************
(classification_error, classErrorPctList, totalScores) = h2o_rf.simpleCheckRFView(None, rfView, **params)
# was 2.84
# sometimes get 2.87?
self.assertAlmostEqual(classification_error, 1.6, delta=1.6,
msg="Classification error %s differs too much" % classification_error)
treeStats = rfView['speedrf_model']['treeStats']
leaves = {'min': treeStats['minLeaves'], 'mean': treeStats['meanLeaves'], 'max': treeStats['maxLeaves']}
# Expected values are from this case:
# ("mnist_training.csv.gz", "mnist_testing.csv.gz", 600, 784834182943470027),
leavesExpected = {'min': 4996, 'mean': 5064.1, 'max': 5148}
for l in leaves:
# self.assertAlmostEqual(leaves[l], leavesExpected[l], delta=10, msg="leaves %s %s %s differs too much" % (l, leaves[l], leavesExpected[l]))
delta = ((leaves[l] - leavesExpected[l])/leaves[l]) * 100
d = "seed: %s %s leaves: %s expected: %s pct. different %s" % (params['seed'], l, leaves[l], leavesExpected[l], delta)
print d
allDelta.append(d)
depth = {'min': treeStats['minDepth'], 'mean': treeStats['meanDepth'], 'max': treeStats['maxDepth']}
depthExpected = {'min': 21, 'mean': 23.8, 'max': 25}
for l in depth:
# self.assertAlmostEqual(depth[l], depthExpected[l], delta=1, msg="depth %s %s %s differs too much" % (l, depth[l], depthExpected[l]))
delta = ((depth[l] - depthExpected[l])/leaves[l]) * 100
d = "seed: %s %s depth: %s expected: %s pct. different %s" % (params['seed'], l, depth[l], depthExpected[l], delta)
print d
allDelta.append(d)
# Predict (on test)****************************************
start = time.time()
modelKey = rfView['speedrf_model']['_key']
predict = h2o.nodes[0].generate_predictions(model_key=modelKey, data_key=testKey2, timeoutSecs=timeoutSecs)
elapsed = time.time() - start
print "generate_predictions in", elapsed, "secs", \
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
# Done *******************************************************
print "\nShowing the results again from all the trials, to see variance"
for d in allDelta:
print d
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
herilalaina/scikit-learn | sklearn/svm/setup.py | 79 | 3160 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('svm', parent_package, top_path)
config.add_subpackage('tests')
# Section LibSVM
# we compile both libsvm and libsvm_sparse
config.add_library('libsvm-skl',
sources=[join('src', 'libsvm', 'libsvm_template.cpp')],
depends=[join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')],
# Force C++ linking in case gcc is picked up instead
# of g++ under windows with some versions of MinGW
extra_link_args=['-lstdc++'],
)
libsvm_sources = ['libsvm.pyx']
libsvm_depends = [join('src', 'libsvm', 'libsvm_helper.c'),
join('src', 'libsvm', 'libsvm_template.cpp'),
join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')]
config.add_extension('libsvm',
sources=libsvm_sources,
include_dirs=[numpy.get_include(),
join('src', 'libsvm')],
libraries=['libsvm-skl'],
depends=libsvm_depends,
)
# liblinear module
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
liblinear_sources = ['liblinear.pyx',
join('src', 'liblinear', '*.cpp')]
liblinear_depends = [join('src', 'liblinear', '*.h'),
join('src', 'liblinear', 'liblinear_helper.c')]
config.add_extension('liblinear',
sources=liblinear_sources,
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
depends=liblinear_depends,
# extra_compile_args=['-O0 -fno-inline'],
** blas_info)
# end liblinear module
# this should go *after* libsvm-skl
libsvm_sparse_sources = ['libsvm_sparse.pyx']
config.add_extension('libsvm_sparse', libraries=['libsvm-skl'],
sources=libsvm_sparse_sources,
include_dirs=[numpy.get_include(),
join("src", "libsvm")],
depends=[join("src", "libsvm", "svm.h"),
join("src", "libsvm",
"libsvm_sparse_helper.c")])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
shareactorIO/pipeline | prediction.ml/tensorflow/src/main/python/model/model_server_python3.py | 1 | 7109 | #!/usr/bin/env python3
import os
import sys
import tornado.ioloop
import tornado.web
import tornado.httpserver
import tornado.httputil
import tornado.gen
import importlib.util
from grpc.beta import implementations
import asyncio
import tensorflow as tf
import predict_pb2
import prediction_service_pb2
import tarfile
import subprocess
class ModelPredictTensorFlowHandler(tornado.web.RequestHandler):
def initialize(self,
bundle_parent_path,
grpc_host,
grpc_port,
request_timeout):
self.bundle_parent_path = bundle_parent_path
self.grpc_host = grpc_host
self.grpc_port = grpc_port
self.request_timeout = request_timeout
self.registry = {}
@tornado.gen.coroutine
def post(self, model_type, model_namespace, model_name, model_version):
(model_base_path, transformers_module) = self.get_model_assets(model_type,
model_namespace,
model_name,
model_version)
output = yield self.do_post(self.request.body, model_base_path, transformers_module, model_name, model_version)
self.write(output)
@tornado.gen.coroutine
def do_post(self, inputs, model_base_path, transformers_module, model_name, model_version):
# TODO: don't create channel on every request
channel = implementations.insecure_channel(self.grpc_host, int(self.grpc_port))
stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
# Transform raw inputs to TensorFlow PredictRequest
transformed_inputs_request = transformers_module.transform_inputs(inputs)
transformed_inputs_request.model_spec.name = model_name
transformed_inputs_request.model_spec.version.value = int(model_version)
# Transform TensorFlow PredictResponse into output
outputs = stub.Predict(transformed_inputs_request, self.request_timeout)
transformed_outputs = transformers_module.transform_outputs(outputs)
return transformed_outputs
def get_model_assets(self, model_type, model_namespace, model_name, model_version):
model_key = '%s_%s_%s_%s' % (model_type, model_namespace, model_name, model_version)
if model_key in self.registry:
(model_base_path, transformers_module) = self.registry[model_key]
else:
model_base_path = os.path.join(self.bundle_parent_path, model_type)
model_base_path = os.path.join(model_base_path, model_namespace)
model_base_path = os.path.join(model_base_path, model_name)
model_base_path = os.path.join(model_base_path, model_version)
# Load model_io_transformers from model directory
transformers_module_name = 'model_io_transformers'
transformers_source_path = os.path.join(model_base_path, '%s.py' % transformers_module_name)
spec = importlib.util.spec_from_file_location(transformers_module_name, transformers_source_path)
transformers_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(transformers_module)
self.registry[model_key] = (model_base_path, transformers_module)
return self.registry[model_key]
class ModelDeployTensorFlowHandler(tornado.web.RequestHandler):
def initialize(self, bundle_parent_path):
self.bundle_parent_path = bundle_parent_path
def post(self, model_type, model_namespace, model_name, model_version):
fileinfo = self.request.files['bundle'][0]
model_file_source_bundle_path = fileinfo['filename']
(_, filename) = os.path.split(model_file_source_bundle_path)
bundle_path = os.path.join(self.bundle_parent_path, model_type)
bundle_path = os.path.join(bundle_path, model_namespace)
bundle_path = os.path.join(bundle_path, model_name)
bundle_path = os.path.join(bundle_path, model_version)
bundle_path_filename = os.path.join(bundle_path, filename)
try:
os.makedirs(bundle_path, exist_ok=True)
with open(bundle_path_filename, 'wb+') as fh:
fh.write(fileinfo['body'])
print("%s uploaded %s, saved as %s" %
( str(self.request.remote_ip),
str(filename),
bundle_path_filename) )
self.write("Uploading and extracting bundle '%s' into '%s'...\n" % (filename, bundle_path))
with tarfile.open(bundle_path_filename, "r:gz") as tar:
tar.extractall(path=bundle_path)
self.write('...Done!\n')
self.write('Installing bundle and updating environment...\n')
# TODO: Restart TensorFlow Model Serving and point to bundle_path_with_model_name
#completed_process = subprocess.run('cd %s && ./install.sh' % bundle_path,
# timeout=600,
# shell=True,
# stdout=subprocess.PIPE)
self.write('...Done!\n')
except IOError as e:
print('Failed to write file due to IOError %s' % str(e))
self.write('Failed to write file due to IOError %s' % str(e))
raise e
def write_error(self, status_code, **kwargs):
self.write('Error %s' % status_code)
if "exc_info" in kwargs:
self.write(", Exception: %s" % kwargs["exc_info"][0].__name__)
if __name__ == '__main__':
port = os.environ['PIO_MODEL_SERVER_PORT']
bundle_parent_path = os.environ['PIO_MODEL_STORE_HOME']
model_type = os.environ['PIO_MODEL_TYPE']
model_namespace = os.environ['PIO_MODEL_NAMESPACE']
model_name = os.environ['PIO_MODEL_NAME']
model_version = os.environ['PIO_MODEL_VERSION']
grpc_port = os.environ['PIO_MODEL_TENSORFLOW_SERVING_PORT']
app = tornado.web.Application([
# url: /v1/model/predict/tensorflow/$PIO_MODEL_NAMESPACE/$PIO_MODEL_NAME/$PIO_MODEL_VERSION/
(r"/v1/model/predict/([a-zA-Z\-0-9\.:,_]+)/([a-zA-Z\-0-9\.:,_]+)/([a-zA-Z\-0-9\.:,_]+)/([a-zA-Z\-0-9\.:,_]+)",
ModelPredictTensorFlowHandler, dict(bundle_parent_path=bundle_parent_path,
grpc_host='127.0.0.1',
grpc_port=grpc_port,
request_timeout=30)),
# TODO: Disable this if we're not explicitly in PIO_MODEL_ENVIRONMENT=dev mode
# url: /v1/model/deploy/tensorflow/$PIO_MODEL_NAMESPACE/$PIO_MODEL_NAME/$PIO_MODEL_VERSION/
(r"/v1/model/deploy/([a-zA-Z\-0-9\.:,_]+)/([a-zA-Z\-0-9\.:,_]+)/([a-zA-Z\-0-9\.:,_]+)/([a-zA-Z\-0-9\.:,_]+)",
ModelDeployTensorFlowHandler, dict(bundle_parent_path=bundle_parent_path))
])
app.listen(port)
print("")
print("Started Tornado-based Http Server on Port '%s'" % port)
print("")
tornado.ioloop.IOLoop.current().start()
| apache-2.0 |
herilalaina/scikit-learn | sklearn/tests/test_metaestimators.py | 28 | 5040 | """Common tests for metaestimators"""
import functools
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.externals.six import iterkeys
from sklearn.datasets import make_classification
from sklearn.utils.testing import assert_true, assert_false, assert_raises
from sklearn.utils.validation import check_is_fitted
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.feature_selection import RFE, RFECV
from sklearn.ensemble import BaggingClassifier
from sklearn.exceptions import NotFittedError
class DelegatorData(object):
def __init__(self, name, construct, skip_methods=(),
fit_args=make_classification()):
self.name = name
self.construct = construct
self.fit_args = fit_args
self.skip_methods = skip_methods
DELEGATING_METAESTIMATORS = [
DelegatorData('Pipeline', lambda est: Pipeline([('est', est)])),
DelegatorData('GridSearchCV',
lambda est: GridSearchCV(
est, param_grid={'param': [5]}, cv=2),
skip_methods=['score']),
DelegatorData('RandomizedSearchCV',
lambda est: RandomizedSearchCV(
est, param_distributions={'param': [5]}, cv=2, n_iter=1),
skip_methods=['score']),
DelegatorData('RFE', RFE,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('RFECV', RFECV,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('BaggingClassifier', BaggingClassifier,
skip_methods=['transform', 'inverse_transform', 'score',
'predict_proba', 'predict_log_proba',
'predict'])
]
def test_metaestimator_delegation():
# Ensures specified metaestimators have methods iff subestimator does
def hides(method):
@property
def wrapper(obj):
if obj.hidden_method == method.__name__:
raise AttributeError('%r is hidden' % obj.hidden_method)
return functools.partial(method, obj)
return wrapper
class SubEstimator(BaseEstimator):
def __init__(self, param=1, hidden_method=None):
self.param = param
self.hidden_method = hidden_method
def fit(self, X, y=None, *args, **kwargs):
self.coef_ = np.arange(X.shape[1])
return True
def _check_fit(self):
check_is_fitted(self, 'coef_')
@hides
def inverse_transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def predict(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_log_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def decision_function(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def score(self, X, *args, **kwargs):
self._check_fit()
return 1.0
methods = [k for k in iterkeys(SubEstimator.__dict__)
if not k.startswith('_') and not k.startswith('fit')]
methods.sort()
for delegator_data in DELEGATING_METAESTIMATORS:
delegate = SubEstimator()
delegator = delegator_data.construct(delegate)
for method in methods:
if method in delegator_data.skip_methods:
continue
assert_true(hasattr(delegate, method))
assert_true(hasattr(delegator, method),
msg="%s does not have method %r when its delegate does"
% (delegator_data.name, method))
# delegation before fit raises a NotFittedError
assert_raises(NotFittedError, getattr(delegator, method),
delegator_data.fit_args[0])
delegator.fit(*delegator_data.fit_args)
for method in methods:
if method in delegator_data.skip_methods:
continue
# smoke test delegation
getattr(delegator, method)(delegator_data.fit_args[0])
for method in methods:
if method in delegator_data.skip_methods:
continue
delegate = SubEstimator(hidden_method=method)
delegator = delegator_data.construct(delegate)
assert_false(hasattr(delegate, method))
assert_false(hasattr(delegator, method),
msg="%s has method %r when its delegate does not"
% (delegator_data.name, method))
| bsd-3-clause |
herilalaina/scikit-learn | examples/feature_selection/plot_permutation_test_for_classification.py | 54 | 2295 | """
=================================================================
Test with permutations the significance of a classification score
=================================================================
In order to test if a classification score is significative a technique
in repeating the classification procedure after randomizing, permuting,
the labels. The p-value is then given by the percentage of runs for
which the score obtained is greater than the classification score
obtained in the first place.
"""
# Author: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import permutation_test_score
from sklearn import datasets
# #############################################################################
# Loading a dataset
iris = datasets.load_iris()
X = iris.data
y = iris.target
n_classes = np.unique(y).size
# Some noisy data not correlated
random = np.random.RandomState(seed=0)
E = random.normal(size=(len(X), 2200))
# Add noisy data to the informative features for make the task harder
X = np.c_[X, E]
svm = SVC(kernel='linear')
cv = StratifiedKFold(2)
score, permutation_scores, pvalue = permutation_test_score(
svm, X, y, scoring="accuracy", cv=cv, n_permutations=100, n_jobs=1)
print("Classification score %s (pvalue : %s)" % (score, pvalue))
# #############################################################################
# View histogram of permutation scores
plt.hist(permutation_scores, 20, label='Permutation scores',
edgecolor='black')
ylim = plt.ylim()
# BUG: vlines(..., linestyle='--') fails on older versions of matplotlib
# plt.vlines(score, ylim[0], ylim[1], linestyle='--',
# color='g', linewidth=3, label='Classification Score'
# ' (pvalue %s)' % pvalue)
# plt.vlines(1.0 / n_classes, ylim[0], ylim[1], linestyle='--',
# color='k', linewidth=3, label='Luck')
plt.plot(2 * [score], ylim, '--g', linewidth=3,
label='Classification Score'
' (pvalue %s)' % pvalue)
plt.plot(2 * [1. / n_classes], ylim, '--k', linewidth=3, label='Luck')
plt.ylim(ylim)
plt.legend()
plt.xlabel('Score')
plt.show()
| bsd-3-clause |
xzturn/tensorflow | tensorflow/python/keras/utils/generic_utils.py | 2 | 27368 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python utilities required by Keras."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import binascii
import codecs
import marshal
import os
import re
import sys
import time
import types as python_types
import numpy as np
import six
from tensorflow.python.util import nest
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import keras_export
_GLOBAL_CUSTOM_OBJECTS = {}
_GLOBAL_CUSTOM_NAMES = {}
# Flag that determines whether to skip the NotImplementedError when calling
# get_config in custom models and layers. This is only enabled when saving to
# SavedModel, when the config isn't required.
_SKIP_FAILED_SERIALIZATION = False
# If a layer does not have a defined config, then the returned config will be a
# dictionary with the below key.
_LAYER_UNDEFINED_CONFIG_KEY = 'layer was saved without config'
@keras_export('keras.utils.CustomObjectScope')
class CustomObjectScope(object):
"""Provides a scope that changes to `_GLOBAL_CUSTOM_OBJECTS` cannot escape.
Code within a `with` statement will be able to access custom objects
by name. Changes to global custom objects persist
within the enclosing `with` statement. At end of the `with` statement,
global custom objects are reverted to state
at beginning of the `with` statement.
Example:
Consider a custom object `MyObject` (e.g. a class):
```python
with CustomObjectScope({'MyObject':MyObject}):
layer = Dense(..., kernel_regularizer='MyObject')
# save, load, etc. will recognize custom object by name
```
"""
def __init__(self, *args):
self.custom_objects = args
self.backup = None
def __enter__(self):
self.backup = _GLOBAL_CUSTOM_OBJECTS.copy()
for objects in self.custom_objects:
_GLOBAL_CUSTOM_OBJECTS.update(objects)
return self
def __exit__(self, *args, **kwargs):
_GLOBAL_CUSTOM_OBJECTS.clear()
_GLOBAL_CUSTOM_OBJECTS.update(self.backup)
@keras_export('keras.utils.custom_object_scope')
def custom_object_scope(*args):
"""Provides a scope that changes to `_GLOBAL_CUSTOM_OBJECTS` cannot escape.
Convenience wrapper for `CustomObjectScope`.
Code within a `with` statement will be able to access custom objects
by name. Changes to global custom objects persist
within the enclosing `with` statement. At end of the `with` statement,
global custom objects are reverted to state
at beginning of the `with` statement.
Example:
Consider a custom object `MyObject`
```python
with custom_object_scope({'MyObject':MyObject}):
layer = Dense(..., kernel_regularizer='MyObject')
# save, load, etc. will recognize custom object by name
```
Arguments:
*args: Variable length list of dictionaries of name, class pairs to add to
custom objects.
Returns:
Object of type `CustomObjectScope`.
"""
return CustomObjectScope(*args)
@keras_export('keras.utils.get_custom_objects')
def get_custom_objects():
"""Retrieves a live reference to the global dictionary of custom objects.
Updating and clearing custom objects using `custom_object_scope`
is preferred, but `get_custom_objects` can
be used to directly access `_GLOBAL_CUSTOM_OBJECTS`.
Example:
```python
get_custom_objects().clear()
get_custom_objects()['MyObject'] = MyObject
```
Returns:
Global dictionary of names to classes (`_GLOBAL_CUSTOM_OBJECTS`).
"""
return _GLOBAL_CUSTOM_OBJECTS
def serialize_keras_class_and_config(cls_name, cls_config):
"""Returns the serialization of the class with the given config."""
return {'class_name': cls_name, 'config': cls_config}
@keras_export('keras.utils.register_keras_serializable')
def register_keras_serializable(package='Custom', name=None):
"""Registers an object with the Keras serialization framework.
This decorator injects the decorated class or function into the Keras custom
object dictionary, so that it can be serialized and deserialized without
needing an entry in the user-provided custom object dict. It also injects a
function that Keras will call to get the object's serializable string key.
Note that to be serialized and deserialized, classes must implement the
`get_config()` method. Functions do not have this requirement.
The object will be registered under the key 'package>name' where `name`,
defaults to the object name if not passed.
Arguments:
package: The package that this class belongs to.
name: The name to serialize this class under in this package. If None, the
class's name will be used.
Returns:
A decorator that registers the decorated class with the passed names.
"""
def decorator(arg):
"""Registers a class with the Keras serialization framework."""
class_name = name if name is not None else arg.__name__
registered_name = package + '>' + class_name
if tf_inspect.isclass(arg) and not hasattr(arg, 'get_config'):
raise ValueError(
'Cannot register a class that does not have a get_config() method.')
if registered_name in _GLOBAL_CUSTOM_OBJECTS:
raise ValueError(
'%s has already been registered to %s' %
(registered_name, _GLOBAL_CUSTOM_OBJECTS[registered_name]))
if arg in _GLOBAL_CUSTOM_NAMES:
raise ValueError('%s has already been registered to %s' %
(arg, _GLOBAL_CUSTOM_NAMES[arg]))
_GLOBAL_CUSTOM_OBJECTS[registered_name] = arg
_GLOBAL_CUSTOM_NAMES[arg] = registered_name
return arg
return decorator
@keras_export('keras.utils.get_registered_name')
def get_registered_name(obj):
"""Returns the name registered to an object within the Keras framework.
This function is part of the Keras serialization and deserialization
framework. It maps objects to the string names associated with those objects
for serialization/deserialization.
Args:
obj: The object to look up.
Returns:
The name associated with the object, or the default Python name if the
object is not registered.
"""
if obj in _GLOBAL_CUSTOM_NAMES:
return _GLOBAL_CUSTOM_NAMES[obj]
else:
return obj.__name__
@tf_contextlib.contextmanager
def skip_failed_serialization():
global _SKIP_FAILED_SERIALIZATION
prev = _SKIP_FAILED_SERIALIZATION
try:
_SKIP_FAILED_SERIALIZATION = True
yield
finally:
_SKIP_FAILED_SERIALIZATION = prev
@keras_export('keras.utils.get_registered_object')
def get_registered_object(name, custom_objects=None, module_objects=None):
"""Returns the class associated with `name` if it is registered with Keras.
This function is part of the Keras serialization and deserialization
framework. It maps strings to the objects associated with them for
serialization/deserialization.
Example:
```
def from_config(cls, config, custom_objects=None):
if 'my_custom_object_name' in config:
config['hidden_cls'] = tf.keras.utils.get_registered_object(
config['my_custom_object_name'], custom_objects=custom_objects)
```
Args:
name: The name to look up.
custom_objects: A dictionary of custom objects to look the name up in.
Generally, custom_objects is provided by the user.
module_objects: A dictionary of custom objects to look the name up in.
Generally, module_objects is provided by midlevel library implementers.
Returns:
An instantiable class associated with 'name', or None if no such class
exists.
"""
if name in _GLOBAL_CUSTOM_OBJECTS:
return _GLOBAL_CUSTOM_OBJECTS[name]
elif custom_objects and name in custom_objects:
return custom_objects[name]
elif module_objects and name in module_objects:
return module_objects[name]
return None
@keras_export('keras.utils.serialize_keras_object')
def serialize_keras_object(instance):
"""Serialize Keras object into JSON."""
_, instance = tf_decorator.unwrap(instance)
if instance is None:
return None
if hasattr(instance, 'get_config'):
name = get_registered_name(instance.__class__)
try:
config = instance.get_config()
except NotImplementedError as e:
if _SKIP_FAILED_SERIALIZATION:
return serialize_keras_class_and_config(
name, {_LAYER_UNDEFINED_CONFIG_KEY: True})
raise e
serialization_config = {}
for key, item in config.items():
if isinstance(item, six.string_types):
serialization_config[key] = item
continue
# Any object of a different type needs to be converted to string or dict
# for serialization (e.g. custom functions, custom classes)
try:
serialized_item = serialize_keras_object(item)
if isinstance(serialized_item, dict) and not isinstance(item, dict):
serialized_item['__passive_serialization__'] = True
serialization_config[key] = serialized_item
except ValueError:
serialization_config[key] = item
name = get_registered_name(instance.__class__)
return serialize_keras_class_and_config(name, serialization_config)
if hasattr(instance, '__name__'):
return get_registered_name(instance)
raise ValueError('Cannot serialize', instance)
def get_custom_objects_by_name(item, custom_objects=None):
"""Returns the item if it is in either local or global custom objects."""
if item in _GLOBAL_CUSTOM_OBJECTS:
return _GLOBAL_CUSTOM_OBJECTS[item]
elif custom_objects and item in custom_objects:
return custom_objects[item]
return None
def class_and_config_for_serialized_keras_object(
config,
module_objects=None,
custom_objects=None,
printable_module_name='object'):
"""Returns the class name and config for a serialized keras object."""
if (not isinstance(config, dict) or 'class_name' not in config or
'config' not in config):
raise ValueError('Improper config format: ' + str(config))
class_name = config['class_name']
cls = get_registered_object(class_name, custom_objects, module_objects)
if cls is None:
raise ValueError('Unknown ' + printable_module_name + ': ' + class_name)
cls_config = config['config']
deserialized_objects = {}
for key, item in cls_config.items():
if isinstance(item, dict) and '__passive_serialization__' in item:
deserialized_objects[key] = deserialize_keras_object(
item,
module_objects=module_objects,
custom_objects=custom_objects,
printable_module_name='config_item')
# TODO(momernick): Should this also have 'module_objects'?
elif (isinstance(item, six.string_types) and
tf_inspect.isfunction(get_registered_object(item, custom_objects))):
# Handle custom functions here. When saving functions, we only save the
# function's name as a string. If we find a matching string in the custom
# objects during deserialization, we convert the string back to the
# original function.
# Note that a potential issue is that a string field could have a naming
# conflict with a custom function name, but this should be a rare case.
# This issue does not occur if a string field has a naming conflict with
# a custom object, since the config of an object will always be a dict.
deserialized_objects[key] = get_registered_object(item, custom_objects)
for key, item in deserialized_objects.items():
cls_config[key] = deserialized_objects[key]
return (cls, cls_config)
@keras_export('keras.utils.deserialize_keras_object')
def deserialize_keras_object(identifier,
module_objects=None,
custom_objects=None,
printable_module_name='object'):
if identifier is None:
return None
if isinstance(identifier, dict):
# In this case we are dealing with a Keras config dictionary.
config = identifier
(cls, cls_config) = class_and_config_for_serialized_keras_object(
config, module_objects, custom_objects, printable_module_name)
if hasattr(cls, 'from_config'):
arg_spec = tf_inspect.getfullargspec(cls.from_config)
custom_objects = custom_objects or {}
if 'custom_objects' in arg_spec.args:
return cls.from_config(
cls_config,
custom_objects=dict(
list(_GLOBAL_CUSTOM_OBJECTS.items()) +
list(custom_objects.items())))
with CustomObjectScope(custom_objects):
return cls.from_config(cls_config)
else:
# Then `cls` may be a function returning a class.
# in this case by convention `config` holds
# the kwargs of the function.
custom_objects = custom_objects or {}
with CustomObjectScope(custom_objects):
return cls(**cls_config)
elif isinstance(identifier, six.string_types):
object_name = identifier
if custom_objects and object_name in custom_objects:
obj = custom_objects.get(object_name)
elif object_name in _GLOBAL_CUSTOM_OBJECTS:
obj = _GLOBAL_CUSTOM_OBJECTS[object_name]
else:
obj = module_objects.get(object_name)
if obj is None:
raise ValueError('Unknown ' + printable_module_name + ':' + object_name)
# Classes passed by name are instantiated with no args, functions are
# returned as-is.
if tf_inspect.isclass(obj):
return obj()
return obj
elif tf_inspect.isfunction(identifier):
# If a function has already been deserialized, return as is.
return identifier
else:
raise ValueError('Could not interpret serialized %s: %s' %
(printable_module_name, identifier))
def func_dump(func):
"""Serializes a user defined function.
Arguments:
func: the function to serialize.
Returns:
A tuple `(code, defaults, closure)`.
"""
if os.name == 'nt':
raw_code = marshal.dumps(func.__code__).replace(b'\\', b'/')
code = codecs.encode(raw_code, 'base64').decode('ascii')
else:
raw_code = marshal.dumps(func.__code__)
code = codecs.encode(raw_code, 'base64').decode('ascii')
defaults = func.__defaults__
if func.__closure__:
closure = tuple(c.cell_contents for c in func.__closure__)
else:
closure = None
return code, defaults, closure
def func_load(code, defaults=None, closure=None, globs=None):
"""Deserializes a user defined function.
Arguments:
code: bytecode of the function.
defaults: defaults of the function.
closure: closure of the function.
globs: dictionary of global objects.
Returns:
A function object.
"""
if isinstance(code, (tuple, list)): # unpack previous dump
code, defaults, closure = code
if isinstance(defaults, list):
defaults = tuple(defaults)
def ensure_value_to_cell(value):
"""Ensures that a value is converted to a python cell object.
Arguments:
value: Any value that needs to be casted to the cell type
Returns:
A value wrapped as a cell object (see function "func_load")
"""
def dummy_fn():
# pylint: disable=pointless-statement
value # just access it so it gets captured in .__closure__
cell_value = dummy_fn.__closure__[0]
if not isinstance(value, type(cell_value)):
return cell_value
return value
if closure is not None:
closure = tuple(ensure_value_to_cell(_) for _ in closure)
try:
raw_code = codecs.decode(code.encode('ascii'), 'base64')
except (UnicodeEncodeError, binascii.Error):
raw_code = code.encode('raw_unicode_escape')
code = marshal.loads(raw_code)
if globs is None:
globs = globals()
return python_types.FunctionType(
code, globs, name=code.co_name, argdefs=defaults, closure=closure)
def has_arg(fn, name, accept_all=False):
"""Checks if a callable accepts a given keyword argument.
Arguments:
fn: Callable to inspect.
name: Check if `fn` can be called with `name` as a keyword argument.
accept_all: What to return if there is no parameter called `name` but the
function accepts a `**kwargs` argument.
Returns:
bool, whether `fn` accepts a `name` keyword argument.
"""
arg_spec = tf_inspect.getfullargspec(fn)
if accept_all and arg_spec.varkw is not None:
return True
return name in arg_spec.args
@keras_export('keras.utils.Progbar')
class Progbar(object):
"""Displays a progress bar.
Arguments:
target: Total number of steps expected, None if unknown.
width: Progress bar width on screen.
verbose: Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose)
stateful_metrics: Iterable of string names of metrics that should *not* be
averaged over time. Metrics in this list will be displayed as-is. All
others will be averaged by the progbar before display.
interval: Minimum visual progress update interval (in seconds).
unit_name: Display name for step counts (usually "step" or "sample").
"""
def __init__(self,
target,
width=30,
verbose=1,
interval=0.05,
stateful_metrics=None,
unit_name='step'):
self.target = target
self.width = width
self.verbose = verbose
self.interval = interval
self.unit_name = unit_name
if stateful_metrics:
self.stateful_metrics = set(stateful_metrics)
else:
self.stateful_metrics = set()
self._dynamic_display = ((hasattr(sys.stdout, 'isatty') and
sys.stdout.isatty()) or
'ipykernel' in sys.modules or
'posix' in sys.modules or
'PYCHARM_HOSTED' in os.environ)
self._total_width = 0
self._seen_so_far = 0
# We use a dict + list to avoid garbage collection
# issues found in OrderedDict
self._values = {}
self._values_order = []
self._start = time.time()
self._last_update = 0
def update(self, current, values=None, finalize=None):
"""Updates the progress bar.
Arguments:
current: Index of current step.
values: List of tuples: `(name, value_for_last_step)`. If `name` is in
`stateful_metrics`, `value_for_last_step` will be displayed as-is.
Else, an average of the metric over time will be displayed.
finalize: Whether this is the last update for the progress bar. If
`None`, defaults to `current >= self.target`.
"""
if finalize is None:
if self.target is None:
finalize = False
else:
finalize = current >= self.target
values = values or []
for k, v in values:
if k not in self._values_order:
self._values_order.append(k)
if k not in self.stateful_metrics:
# In the case that progress bar doesn't have a target value in the first
# epoch, both on_batch_end and on_epoch_end will be called, which will
# cause 'current' and 'self._seen_so_far' to have the same value. Force
# the minimal value to 1 here, otherwise stateful_metric will be 0s.
value_base = max(current - self._seen_so_far, 1)
if k not in self._values:
self._values[k] = [v * value_base, value_base]
else:
self._values[k][0] += v * value_base
self._values[k][1] += value_base
else:
# Stateful metrics output a numeric value. This representation
# means "take an average from a single value" but keeps the
# numeric formatting.
self._values[k] = [v, 1]
self._seen_so_far = current
now = time.time()
info = ' - %.0fs' % (now - self._start)
if self.verbose == 1:
if now - self._last_update < self.interval and not finalize:
return
prev_total_width = self._total_width
if self._dynamic_display:
sys.stdout.write('\b' * prev_total_width)
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
if self.target is not None:
numdigits = int(np.log10(self.target)) + 1
bar = ('%' + str(numdigits) + 'd/%d [') % (current, self.target)
prog = float(current) / self.target
prog_width = int(self.width * prog)
if prog_width > 0:
bar += ('=' * (prog_width - 1))
if current < self.target:
bar += '>'
else:
bar += '='
bar += ('.' * (self.width - prog_width))
bar += ']'
else:
bar = '%7d/Unknown' % current
self._total_width = len(bar)
sys.stdout.write(bar)
if current:
time_per_unit = (now - self._start) / current
else:
time_per_unit = 0
if self.target is None or finalize:
if time_per_unit >= 1 or time_per_unit == 0:
info += ' %.0fs/%s' % (time_per_unit, self.unit_name)
elif time_per_unit >= 1e-3:
info += ' %.0fms/%s' % (time_per_unit * 1e3, self.unit_name)
else:
info += ' %.0fus/%s' % (time_per_unit * 1e6, self.unit_name)
else:
eta = time_per_unit * (self.target - current)
if eta > 3600:
eta_format = '%d:%02d:%02d' % (eta // 3600,
(eta % 3600) // 60, eta % 60)
elif eta > 60:
eta_format = '%d:%02d' % (eta // 60, eta % 60)
else:
eta_format = '%ds' % eta
info = ' - ETA: %s' % eta_format
for k in self._values_order:
info += ' - %s:' % k
if isinstance(self._values[k], list):
avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))
if abs(avg) > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
else:
info += ' %s' % self._values[k]
self._total_width += len(info)
if prev_total_width > self._total_width:
info += (' ' * (prev_total_width - self._total_width))
if finalize:
info += '\n'
sys.stdout.write(info)
sys.stdout.flush()
elif self.verbose == 2:
if finalize:
numdigits = int(np.log10(self.target)) + 1
count = ('%' + str(numdigits) + 'd/%d') % (current, self.target)
info = count + info
for k in self._values_order:
info += ' - %s:' % k
avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))
if avg > 1e-3:
info += ' %.4f' % avg
else:
info += ' %.4e' % avg
info += '\n'
sys.stdout.write(info)
sys.stdout.flush()
self._last_update = now
def add(self, n, values=None):
self.update(self._seen_so_far + n, values)
def make_batches(size, batch_size):
"""Returns a list of batch indices (tuples of indices).
Arguments:
size: Integer, total size of the data to slice into batches.
batch_size: Integer, batch size.
Returns:
A list of tuples of array indices.
"""
num_batches = int(np.ceil(size / float(batch_size)))
return [(i * batch_size, min(size, (i + 1) * batch_size))
for i in range(0, num_batches)]
def slice_arrays(arrays, start=None, stop=None):
"""Slice an array or list of arrays.
This takes an array-like, or a list of
array-likes, and outputs:
- arrays[start:stop] if `arrays` is an array-like
- [x[start:stop] for x in arrays] if `arrays` is a list
Can also work on list/array of indices: `slice_arrays(x, indices)`
Arguments:
arrays: Single array or list of arrays.
start: can be an integer index (start index) or a list/array of indices
stop: integer (stop index); should be None if `start` was a list.
Returns:
A slice of the array(s).
Raises:
ValueError: If the value of start is a list and stop is not None.
"""
if arrays is None:
return [None]
if isinstance(start, list) and stop is not None:
raise ValueError('The stop argument has to be None if the value of start '
'is a list.')
elif isinstance(arrays, list):
if hasattr(start, '__len__'):
# hdf5 datasets only support list objects as indices
if hasattr(start, 'shape'):
start = start.tolist()
return [None if x is None else x[start] for x in arrays]
return [
None if x is None else
None if not hasattr(x, '__getitem__') else x[start:stop] for x in arrays
]
else:
if hasattr(start, '__len__'):
if hasattr(start, 'shape'):
start = start.tolist()
return arrays[start]
if hasattr(start, '__getitem__'):
return arrays[start:stop]
return [None]
def to_list(x):
"""Normalizes a list/tensor into a list.
If a tensor is passed, we return
a list of size 1 containing the tensor.
Arguments:
x: target object to be normalized.
Returns:
A list.
"""
if isinstance(x, list):
return x
return [x]
def to_snake_case(name):
intermediate = re.sub('(.)([A-Z][a-z0-9]+)', r'\1_\2', name)
insecure = re.sub('([a-z])([A-Z])', r'\1_\2', intermediate).lower()
# If the class is private the name starts with "_" which is not secure
# for creating scopes. We prefix the name with "private" in this case.
if insecure[0] != '_':
return insecure
return 'private' + insecure
def is_all_none(structure):
iterable = nest.flatten(structure)
# We cannot use Python's `any` because the iterable may return Tensors.
for element in iterable:
if element is not None:
return False
return True
def check_for_unexpected_keys(name, input_dict, expected_values):
unknown = set(input_dict.keys()).difference(expected_values)
if unknown:
raise ValueError('Unknown entries in {} dictionary: {}. Only expected '
'following keys: {}'.format(name, list(unknown),
expected_values))
def validate_kwargs(kwargs,
allowed_kwargs,
error_message='Keyword argument not understood:'):
"""Checks that all keyword arguments are in the set of allowed keys."""
for kwarg in kwargs:
if kwarg not in allowed_kwargs:
raise TypeError(error_message, kwarg)
def validate_config(config):
"""Determines whether config appears to be a valid layer config."""
return isinstance(config, dict) and _LAYER_UNDEFINED_CONFIG_KEY not in config
def default(method):
"""Decorates a method to detect overrides in subclasses."""
method._is_default = True # pylint: disable=protected-access
return method
def is_default(method):
"""Check if a method is decorated with the `default` wrapper."""
return getattr(method, '_is_default', False)
| apache-2.0 |
h2oai/h2o | py/test_import3.py | 9 | 2552 | import unittest, time, sys, os
# not needed, but in case you move it down to subdir
sys.path.extend(['.','..'])
import h2o_cmd
import h2o, h2o_hosts
import h2o_browse as h2b
import h2o_import as h2i
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global localhost
localhost = h2o.decide_if_localhost()
if (localhost):
h2o.build_cloud(node_count=1)
else:
h2o_hosts.build_cloud_with_hosts(node_count=1)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test3(self):
# h2i.import_parse(path='standard/covtype.data', bucket='home-0xdiag-datasets', schema="s3n", timeoutSecs=60)
## This will get it from import hdfs with s3n. the hdfs_name_node and hdfs_version for s3
# will have been passed at build_cloud, either from the test, or the <config>.json
h2i.import_parse(path='standard/benign.csv', bucket='home-0xdiag-datasets', schema='s3n', timeoutSecs=60)
# h2i.import_parse(path='leads.csv', bucket='datasets', schema="hdfs", timeoutSecs=60)
# h2i.import_parse(path='/datasets/leads.csv', schema="hdfs", timeoutSecs=60)
# h2i.import_parse(path='datasets/leads.csv', schema="hdfs", timeoutSecs=60)
## This will get it from import s3.
h2i.import_parse(path='standard/benign.csv', bucket='home-0xdiag-datasets', schema='s3', timeoutSecs=60)
#import(path=junkdir/junk.csv, bucket="home-0xdiag-datasets", schema="s3")
#
## this will get it from hdfs. the hdfs_name_node and hdfs_version for hdfs will
# have been passed at build_cloud, either from the test, or the <config>.json.
## It defaults to the local 172.16.2.176 cdh4 hdfs
## I guess -hdfs_root behavior works, but shouldn't be necessary (full path will be sent to h2o)
#import(path=junkdir/junk.csv, bucket="home-0xdiag-datasets", schema="hdfs")
#
## separator, exclude params can be passed for the parse
#import(path=junkdir/junk.csv, bucket="home-0xdiag-datasets", schema="hdfs", separator=11)
#
#H2O_BUCKETS_ROOT is the only env variable that affects behavior
#there are two <config.json> node variables set during build_cloud that will
# redirect schema='local' to schema='s3n'
# node.redirect_import_folder_to_s3_path
# node.redirect_import_folder_to_s3n_path
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
sangwook236/general-development-and-testing | sw_dev/python/rnd/doc/machine_learning/triton/cifar10_trainer.py | 2 | 9634 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120) # For 28x28 input.
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
# REF [function] >> cifar10_on_cpu() in ${SWDT_PYTHON_HOME}/rnd/test/machine_learning/pytorch/pytorch_classification.py
def cifar10_on_cpu():
batch_size, num_epochs = 256, 100
# Load and normalize CIFAR10.
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
def imshow(img):
img = img / 2 + 0.5 # Unnormalize.
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
# Get some random training images.
dataiter = iter(trainloader)
images, labels = dataiter.next()
print('Images: shape = {}, dtype = {}.'.format(images.shape, images.dtype))
print('Labels: shape = {}, dtype = {}.'.format(labels.shape, labels.dtype))
# Show images.
imshow(torchvision.utils.make_grid(images))
# Print labels.
print(' '.join('%5s' % classes[labels[j]] for j in range(len(labels))))
#--------------------
# Define a Convolutional Neural Network.
net = Net()
print('Model is on {}.'.format(next(net.parameters()).device))
#--------------------
# Define a Loss function and optimizer.
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
#--------------------
# Train the network.
for epoch in range(num_epochs): # Loop over the dataset multiple times.
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# Get the inputs; data is a list of [inputs, labels].
inputs, labels = data
# Zero the parameter gradients.
optimizer.zero_grad()
# Forward + backward + optimize.
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# Print statistics.
running_loss += loss.item()
if i % 20 == 19: # Print every 2000 mini-batches.
print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 20))
running_loss = 0.0
print('Finished Training')
#--------------------
# Test the network on the test data.
dataiter = iter(testloader)
images, labels = dataiter.next()
# Print images.
imshow(torchvision.utils.make_grid(images))
print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(len(labels))))
# Now let us see what the neural network thinks these examples above are.
outputs = net(images)
_, predicted = torch.max(outputs, 1)
print('Predicted: ', ' '.join('%5s' % classes[predicted[j]] for j in range(len(labels))))
# Let us look at how the network performs on the whole dataset.
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels = data
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (100 * correct / total))
# What are the classes that performed well, and the classes that did not perform well.
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
with torch.no_grad():
for data in testloader:
images, labels = data
outputs = net(images)
_, predicted = torch.max(outputs, 1)
c = (predicted == labels).squeeze()
for i in range(len(labels)):
label = labels[i]
class_correct[label] += c[i].item()
class_total[label] += 1
for i in range(10):
print('Accuracy of %5s : %2d %%' % (classes[i], 100 * class_correct[i] / class_total[i]))
#--------------------
# Tracing modules.
if False:
images, labels = dataiter.next()
traced_net = torch.jit.trace(net, (images,))
torch_script_filepath = "./cifar10_cpu_ts.pth"
traced_net.save(torch_script_filepath)
print("Traced model saved to {}.".format(torch_script_filepath))
# REF [function] >> cifar10_on_gpu() in ${SWDT_PYTHON_HOME}/rnd/test/machine_learning/pytorch/pytorch_classification.py
def cifar10_on_gpu():
batch_size, num_epochs = 256, 100
# Load and normalize CIFAR10.
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
def imshow(img):
img = img / 2 + 0.5 # Unnormalize.
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
# Get some random training images.
dataiter = iter(trainloader)
images, labels = dataiter.next()
print('Images: shape = {}, dtype = {}.'.format(images.shape, images.dtype))
print('Labels: shape = {}, dtype = {}.'.format(labels.shape, labels.dtype))
# Show images.
imshow(torchvision.utils.make_grid(images))
# Print labels.
print(' '.join('%5s' % classes[labels[j]] for j in range(len(labels))))
#--------------------
# Define a Convolutional Neural Network.
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# Assuming that we are on a CUDA machine, this should print a CUDA device.
print('Device: {}.'.format(device))
net = Net()
net.to(device)
print('Model is on {}.'.format(next(net.parameters()).device))
#--------------------
# Define a Loss function and optimizer.
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
#--------------------
# Train the network.
for epoch in range(num_epochs): # Loop over the dataset multiple times.
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# Get the inputs; data is a list of [inputs, labels].
inputs, labels = data[0].to(device), data[1].to(device)
# Zero the parameter gradients.
optimizer.zero_grad()
# Forward + backward + optimize.
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# Print statistics.
running_loss += loss.item()
if i % 20 == 19: # Print every 2000 mini-batches.
print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 20))
running_loss = 0.0
print('Finished Training')
#--------------------
# Test the network on the test data.
dataiter = iter(testloader)
images, labels = dataiter.next()
# Print images.
imshow(torchvision.utils.make_grid(images))
print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(len(labels))))
# Now let us see what the neural network thinks these examples above are.
outputs = net(images.to(device))
_, predicted = torch.max(outputs, 1)
print('Predicted: ', ' '.join('%5s' % classes[predicted[j]] for j in range(len(labels))))
# Let us look at how the network performs on the whole dataset.
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels = data[0].to(device), data[1].to(device)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (100 * correct / total))
# What are the classes that performed well, and the classes that did not perform well.
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
with torch.no_grad():
for data in testloader:
images, labels = data[0].to(device), data[1].to(device)
outputs = net(images)
_, predicted = torch.max(outputs, 1)
c = (predicted == labels).squeeze()
for i in range(len(labels)):
label = labels[i]
class_correct[label] += c[i].item()
class_total[label] += 1
for i in range(10):
print('Accuracy of %5s : %2d %%' % (classes[i], 100 * class_correct[i] / class_total[i]))
#--------------------
# Tracing modules.
if False:
images, labels = dataiter.next()
traced_net = torch.jit.trace(net, (images.to(device),))
torch_script_filepath = "./cifar10_gpu_ts.pth"
traced_net.save(torch_script_filepath)
print("Traced model saved to {}.".format(torch_script_filepath))
def main():
#cifar10_on_cpu()
cifar10_on_gpu()
#--------------------------------------------------------------------
if '__main__' == __name__:
main()
| gpl-2.0 |
nhuntwalker/astroML | book_figures/chapter1/fig_S82_hess.py | 4 | 2146 | """
SDSS Stripe 82 Hess Diagram
---------------------------
Figure 1.10.
A Hess diagram of the r-i vs. g-r colors for the entire set of SDSS Stripe 82
standard stars. The pixels are colored with a logarithmic scaling;
cf. figures 1.6 and 1.9.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from astroML.datasets import fetch_sdss_S82standards
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Fetch the stripe 82 data
data = fetch_sdss_S82standards()
g = data['mmu_g']
r = data['mmu_r']
i = data['mmu_i']
#------------------------------------------------------------
# Compute and plot the 2D histogram
H, xbins, ybins = np.histogram2d(g - r, r - i,
bins=(np.linspace(-0.5, 2.5, 50),
np.linspace(-0.5, 2.5, 50)))
# Create a black and white color map where bad data (NaNs) are white
cmap = plt.cm.binary
cmap.set_bad('w', 1.)
# Use the image display function imshow() to plot the result
fig, ax = plt.subplots(figsize=(5, 3.75))
H[H == 0] = 1 # prevent warnings in log10
ax.imshow(np.log10(H).T, origin='lower',
extent=[xbins[0], xbins[-1], ybins[0], ybins[-1]],
cmap=cmap, interpolation='nearest',
aspect='auto')
ax.set_xlabel(r'${\rm g - r}$')
ax.set_ylabel(r'${\rm r - i}$')
ax.set_xlim(-0.6, 2.5)
ax.set_ylim(-0.6, 2.5)
plt.show()
| bsd-2-clause |
ChristopherGS/sensor_readings | app/machine_learning/wrangle.py | 1 | 9750 | import numpy as np
import pandas as pd
import os
import six.moves.cPickle as pickle
from sklearn import svm
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn import cross_validation
from sklearn.cross_validation import KFold
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.grid_search import GridSearchCV
from sklearn.externals import joblib
from flask import (abort, current_app)
from app.data import db, query_to_list
from app.sensors.models import Experiment, Sensor
from sklearn.preprocessing import PolynomialFeatures
from time import time
from scipy.stats import randint as sp_randint
from operator import itemgetter
from utilities import (format_time, print_full, combine_csv, blank_filter, concat_data, resolve_acc_gyro,
resolve_acc_gyro_db, convert_to_words, get_position_stats)
from feature_engineering import create_rm_feature
_basedir = os.path.abspath(os.path.dirname(__file__))
PICKLE = os.path.join(_basedir, 'pickle/training.pkl')
print PICKLE
"""
This file preps the jiu-jitsu motion data for analysis:
Step 1: Combine matching gyroscope and accelerometer rows
Step 2: Combine multiple csv files
Step 3: Label training data
Step 4: Select time interval sequence length to analyze and combine
Step 5: Create combined training file
Step 6: Algorithm explorations (inc. feature engineering)
"""
DIR = os.path.dirname(os.path.realpath(__file__))
pd.set_option('display.width', 1200)
FEATURE_COUNT = 0
TIME_SEQUENCE_LENGTH = 30
polynomial_features = PolynomialFeatures(interaction_only=False, include_bias=True, degree=3)
#================================================================================
# DATA PREPARATION
#================================================================================
def set_state(df, state):
"""set the state for training"""
if state == 'your_mount':
df['state'] = 0
elif state == 'your_side_control':
df['state'] = 1
elif state =='your_closed_guard':
df['state'] = 2
elif state =='your_back_control':
df['state'] = 3
elif state =='opponent_mount_or_sc':
df['state'] = 4
elif state =='opponent_closed_guard':
df['state'] = 5
elif state == 'opponent_back_control':
df['state'] = 6
elif state =='non_jj':
df['state'] = 7
return df
def combine_setState_createFeatures(directory, state):
"""
convenience method to combine three steps in one function:
(1) combine multiple csv files, (2) set their movement state for training,
(3) combine to create time sequences and add features
"""
combined_data = combine_csv(directory)
combined_data_updated = set_state(combined_data, state) # TO CHECK: probably not necessary
feature_training_data = create_rm_feature(combined_data_updated, TIME_SEQUENCE_LENGTH)
ready_training_data = set_state(feature_training_data, state)
return ready_training_data
def prep():
"""prepare the raw sensor data"""
#1 Your mount
ymount_td = combine_setState_createFeatures('your_mount_raw_data', 'your_mount')
#2 Your side control
ysc_td = combine_setState_createFeatures('your_side_control_raw_data', 'your_side_control')
#3 Your closed guard
ycg_td = combine_setState_createFeatures('your_closed_guard_raw_data', 'your_closed_guard')
#4 Your back control
ybc_td = combine_setState_createFeatures('your_back_control_raw_data', 'your_back_control')
#5 Opponent mount or opponent side control
omountsc_td = combine_setState_createFeatures('opponent_mount_and_opponent_side_control_raw_data', 'opponent_mount_or_sc')
#6 Opponent closed guard
ocg_td = combine_setState_createFeatures('opponent_closed_guard_raw_data', 'opponent_closed_guard')
#7 Opponent back control
obc_td = combine_setState_createFeatures('opponent_back_control_raw_data', 'opponent_back_control')
#8 "Non jiu-jitsu" motion
nonjj_td = combine_setState_createFeatures('non_jj_raw_data', 'non_jj')
training_data = concat_data([ymount_td, ysc_td, ycg_td, ybc_td, omountsc_td, ocg_td, obc_td, nonjj_td])
# remove NaN
training_data = blank_filter(training_data)
return training_data
def prep_test(el_file):
el_file = DIR + '/data/test_cases/' + el_file
df = pd.DataFrame()
df = pd.read_csv(el_file, index_col=None, header=0)
df = resolve_acc_gyro(df)
df = create_rm_feature(df, TIME_SEQUENCE_LENGTH)
test_data = blank_filter(df)
return test_data
#================================================================================
# MACHINE LEARNING
#================================================================================
"""
Things to try:
- Adjust random forest number of trees
- Adjust data time intervals
- Adjust general jj data quantity
- Add features - not sure whether to create them before or after time sequence creation
"""
def test_model(df_train):
"""check model accuracy"""
y = df_train['state'].values
X = df_train.drop(['state', 'index'], axis=1)
if X.isnull().values.any() == False:
rf = RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',
max_depth=None, max_features='auto', max_leaf_nodes=None,
min_samples_leaf=8, min_samples_split=4,
min_weight_fraction_leaf=0.0, n_estimators=5000, n_jobs=-1,
oob_score=False, random_state=None, verbose=2,
warm_start=False)
X = polynomial_features.fit_transform(X)
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.1)
else:
print "Found NaN values"
# Get the prediction accuracy
rf.fit(X_train, y_train)
rf_pred = rf.predict(X_test)
rf_scores = cross_validation.cross_val_score(
rf, X, df_train.state, cv=10, scoring='accuracy')
print 'rf prediction: {}'.format(accuracy_score(y_test, rf_pred))
print("Random Forest Accuracy: %0.2f (+/- %0.2f)" % (rf_scores.mean(), rf_scores.std() * 2))
def trial(df_train, test_data):
"""
Test 1: 1s followed by 3s
"""
y = df_train['state'].values
X = df_train.drop(['state', 'index'], axis=1)
if X.isnull().values.any() == False:
rf = RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',
max_depth=None, max_features='auto', max_leaf_nodes=None,
min_samples_leaf=8, min_samples_split=4,
min_weight_fraction_leaf=0.0, n_estimators=5000, n_jobs=-1,
oob_score=False, random_state=None, verbose=0,
warm_start=False)
X = polynomial_features.fit_transform(X)
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.1)
else:
print "Found NaN values"
rf.fit(X_train, y_train)
rf_pred2 = rf.predict(test_data)
final_prediction = convert_to_words(rf_pred2)
print_full(final_prediction)
get_position_stats(final_prediction)
##############
#API METHODS
##############
def api_serialize():
training_data = prep()
y = training_data['state'].values
X = training_data.drop(['state', 'index'], axis=1)
if X.isnull().values.any() == False:
rf = RandomForestClassifier(bootstrap=True, class_weight=None, criterion='gini',
max_depth=None, max_features='auto', max_leaf_nodes=None,
min_samples_leaf=8, min_samples_split=4,
min_weight_fraction_leaf=0.0, n_estimators=5000, n_jobs=-1,
oob_score=False, random_state=None, verbose=2,
warm_start=False)
X = polynomial_features.fit_transform(X)
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.1)
else:
print "Found NaN values"
rf.fit(X_train, y_train)
joblib.dump(rf, PICKLE, compress=3)
def api_test(experiment_id_number):
"""Prepare an experiment already uploaded to the db
to be run against the model"""
clean_experiment_number = int(experiment_id_number)
try:
query = db.session.query(Sensor)
df = pd.read_sql_query(query.statement, query.session.bind)
df2 = df[df['experiment_id']==clean_experiment_number]
current_app.logger.debug(df2)
current_app.logger.debug('run resolve_acc_gyro')
df2 = resolve_acc_gyro_db(df2)
current_app.logger.debug(df2)
current_app.logger.debug('run create_rm_feature')
df2 = create_rm_feature(df2, TIME_SEQUENCE_LENGTH)
current_app.logger.debug(df2)
test_data = blank_filter(df2)
current_app.logger.debug(test_data)
# TODO: NEED TO MAKE SURE FEATURE NUMBER IS THE SAME
#Xt = df2.drop(['state', 'index'], axis=1)
Xt = polynomial_features.fit_transform(test_data)
current_app.logger.debug(Xt)
return Xt
except Exception as e:
current_app.logger.debug('error: {}'.format(e))
abort(500)
def start():
"""Start here"""
print "Begin analysis"
training_data = prep()
print 'Finished preparing training data, total length: {}'.format(len(training_data))
print training_data
test_data1 = prep_test('test1_ymount_ycg.csv')
test_data2 = prep_test('test2_ysc_ymount_ybc.csv')
test_data3 = prep_test('test3_omount_ycg_ymount_ocg_obc.csv')
test_data4 = prep_test('test4_osc_omount_ycg.csv')
test_model(training_data)
#trial(training_data, test_data2)
if __name__ == "__main__":
start()
| bsd-3-clause |
kylepjohnson/ipython | public_talks/2016_02_26_columbia/do_ml_on_feature_tables (all.csv).py | 2 | 5547 |
# coding: utf-8
# Script to demo scikit for tweet popular/unpopular classification.
# In[1]:
from __future__ import division
from __future__ import print_function
import csv
import datetime as dt
import os
import platform
import sys
import numpy as np
import pandas
from sklearn import preprocessing
from sklearn import svm
from sklearn import tree
from sklearn.cross_validation import train_test_split
from sklearn.externals import joblib
from sklearn.feature_extraction import DictVectorizer
from sklearn.metrics import classification_report
# In[2]:
def csv_to_dict_cesar(csv_filename):
# Let's say, We are intersted in only count features
count_features = ['_char_count', '_hashtag_count', '_word_count', '_url_count']
with open(csv_filename) as f:
features = [({k: int(v) for k, v in row.items() if k in count_features}, row['_popular'])
for row in csv.DictReader(f, skipinitialspace=True)]
X = [f[0] for f in features]
Y = [f[1] for f in features]
return (X, Y)
# In[3]:
def csv_to_dict(csv_filename):
"""Open feature table with csv library.
Task: Run with '_rt_count'. See the good results!
"""
non_numeric_features = ['', '_text', '_urls', '_mentions', '_hashtags',
'_tweet_datetime', '_popular', '_rt_count']
with open(csv_filename, 'rU') as f:
rows = csv.DictReader(f, skipinitialspace=True, delimiter='|')
labels = [row['_popular'] for row in rows]
features = []
with open(csv_filename, 'rU') as f:
rows = csv.DictReader(f, skipinitialspace=True, delimiter='|')
for row in rows:
#print(row)
row_dict = {}
for k, v in row.items():
if k not in non_numeric_features:
try:
row_dict[k] = int(v)
# these tries catch a few junk entries
except TypeError:
row_dict[k] = 0
except ValueError:
row_dict[k] = 0
#row_dict = {k: int(v) for k, v in row.items() if k not in non_numeric_features}
features.append(row_dict)
return features, labels
# In[4]:
def csv_to_df(csv_file):
"""Open csv with Pandas DataFrame, then convert to dict
and return.
TODO: Fix this.
"""
dataframe = pandas.read_csv(csv_file,
encoding='utf-8',
engine='python',
sep='|',
delimiter='|',
index_col=0)
return dataframe
# In[5]:
def train(csv_filename):
print('Loading CSV into dict ...')
t0 = dt.datetime.utcnow()
data, target = csv_to_dict(csv_filename)
print('... finished in {} secs.'.format(dt.datetime.utcnow() - t0))
print()
print('Loading dict into vectorizer')
t0 = dt.datetime.utcnow()
vec = DictVectorizer()
X = vec.fit_transform(data).toarray() # change to numpy array
Y = np.array(target) # change to numpy array
print('... finished in {} secs.'.format(dt.datetime.utcnow() - t0))
print()
'''
-In case we need to know the features
'''
feature_names = vec.get_feature_names()
'''
-Dividing the data into train and test
-random_state is pseudo-random number generator state used for
random sampling
'''
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, random_state=0)
# write models dir if not present
models_dir = 'models'
if not os.path.isdir(models_dir):
os.mkdir(models_dir)
'''
-PREPOCESSING
-Here, scaled data has zero mean and unit varience
-We save the scaler to later use with testing/prediction data
'''
print('Scaling data ...')
t0 = dt.datetime.utcnow()
scaler = preprocessing.StandardScaler().fit(X_train)
joblib.dump(scaler, 'models/scaler.pickle')
X_train_scaled = scaler.transform(X_train)
X_test_scaled = scaler.transform(X_test)
print('... finished in {} secs.'.format(dt.datetime.utcnow() - t0))
print()
'''
-This is where we define the models
-Here, I use SVM and Decision tree with pre-defined parameters
-We can learn these parameters given our data
'''
print('Defining and fitting models ...')
t0 = dt.datetime.utcnow()
clf0 = svm.LinearSVC(C=100.)
clf1 = tree.DecisionTreeClassifier()
clf0.fit(X_train_scaled, Y_train)
clf1.fit(X_train_scaled, Y_train)
joblib.dump(clf0, 'models/svc.pickle')
joblib.dump(clf1, 'models/tree.pickle')
print('... finished in {} secs.'.format(dt.datetime.utcnow() - t0))
print()
Y_prediction_svc = clf0.predict(X_test_scaled)
print('svc_predictions ', Y_prediction_svc)
Y_prediction_tree = clf1.predict(X_test_scaled)
print('tree_predictions ', Y_prediction_tree)
expected = Y_test
print('actual_values ', expected)
print()
'''
Classifiation metrics
(Case 1): SVMs
'''
print()
print('----Linear SVC_report--------------------------')
print(classification_report(expected, Y_prediction_svc))
'''
Classification metrics
(case 2): Decision tree
'''
print()
print('----Tree_report--------------------------------')
print(classification_report(expected, Y_prediction_tree))
# In[ ]:
train("feature_tables/all.csv")
# In[ ]:
| mit |
herilalaina/scikit-learn | sklearn/preprocessing/tests/test_data.py | 1 | 84793 | # Authors:
#
# Giorgio Patrini
#
# License: BSD 3 clause
from __future__ import division
import warnings
import re
import numpy as np
import numpy.linalg as la
from scipy import sparse
from distutils.version import LooseVersion
from sklearn.utils import gen_batches
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import clean_warning_registry
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_allclose
from sklearn.utils.testing import skip_if_32bit
from sklearn.utils.testing import SkipTest
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.preprocessing.data import _transform_selected
from sklearn.preprocessing.data import _handle_zeros_in_scale
from sklearn.preprocessing.data import Binarizer
from sklearn.preprocessing.data import KernelCenterer
from sklearn.preprocessing.data import Normalizer
from sklearn.preprocessing.data import normalize
from sklearn.preprocessing.data import OneHotEncoder
from sklearn.preprocessing.data import CategoricalEncoder
from sklearn.preprocessing.data import StandardScaler
from sklearn.preprocessing.data import scale
from sklearn.preprocessing.data import MinMaxScaler
from sklearn.preprocessing.data import minmax_scale
from sklearn.preprocessing.data import QuantileTransformer
from sklearn.preprocessing.data import quantile_transform
from sklearn.preprocessing.data import MaxAbsScaler
from sklearn.preprocessing.data import maxabs_scale
from sklearn.preprocessing.data import RobustScaler
from sklearn.preprocessing.data import robust_scale
from sklearn.preprocessing.data import add_dummy_feature
from sklearn.preprocessing.data import PolynomialFeatures
from sklearn.exceptions import DataConversionWarning
from sklearn.pipeline import Pipeline
from sklearn.model_selection import cross_val_predict
from sklearn.svm import SVR
from sklearn import datasets
iris = datasets.load_iris()
# Make some data to be used many times
rng = np.random.RandomState(0)
n_features = 30
n_samples = 1000
offsets = rng.uniform(-1, 1, size=n_features)
scales = rng.uniform(1, 10, size=n_features)
X_2d = rng.randn(n_samples, n_features) * scales + offsets
X_1row = X_2d[0, :].reshape(1, n_features)
X_1col = X_2d[:, 0].reshape(n_samples, 1)
X_list_1row = X_1row.tolist()
X_list_1col = X_1col.tolist()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def _check_dim_1axis(a):
if isinstance(a, list):
return np.array(a).shape[0]
return a.shape[0]
def assert_correct_incr(i, batch_start, batch_stop, n, chunk_size,
n_samples_seen):
if batch_stop != n:
assert_equal((i + 1) * chunk_size, n_samples_seen)
else:
assert_equal(i * chunk_size + (batch_stop - batch_start),
n_samples_seen)
def test_polynomial_features():
# Test Polynomial Features
X1 = np.arange(6)[:, np.newaxis]
P1 = np.hstack([np.ones_like(X1),
X1, X1 ** 2, X1 ** 3])
deg1 = 3
X2 = np.arange(6).reshape((3, 2))
x1 = X2[:, :1]
x2 = X2[:, 1:]
P2 = np.hstack([x1 ** 0 * x2 ** 0,
x1 ** 1 * x2 ** 0,
x1 ** 0 * x2 ** 1,
x1 ** 2 * x2 ** 0,
x1 ** 1 * x2 ** 1,
x1 ** 0 * x2 ** 2])
deg2 = 2
for (deg, X, P) in [(deg1, X1, P1), (deg2, X2, P2)]:
P_test = PolynomialFeatures(deg, include_bias=True).fit_transform(X)
assert_array_almost_equal(P_test, P)
P_test = PolynomialFeatures(deg, include_bias=False).fit_transform(X)
assert_array_almost_equal(P_test, P[:, 1:])
interact = PolynomialFeatures(2, interaction_only=True, include_bias=True)
X_poly = interact.fit_transform(X)
assert_array_almost_equal(X_poly, P2[:, [0, 1, 2, 4]])
assert_equal(interact.powers_.shape, (interact.n_output_features_,
interact.n_input_features_))
def test_polynomial_feature_names():
X = np.arange(30).reshape(10, 3)
poly = PolynomialFeatures(degree=2, include_bias=True).fit(X)
feature_names = poly.get_feature_names()
assert_array_equal(['1', 'x0', 'x1', 'x2', 'x0^2', 'x0 x1',
'x0 x2', 'x1^2', 'x1 x2', 'x2^2'],
feature_names)
poly = PolynomialFeatures(degree=3, include_bias=False).fit(X)
feature_names = poly.get_feature_names(["a", "b", "c"])
assert_array_equal(['a', 'b', 'c', 'a^2', 'a b', 'a c', 'b^2',
'b c', 'c^2', 'a^3', 'a^2 b', 'a^2 c',
'a b^2', 'a b c', 'a c^2', 'b^3', 'b^2 c',
'b c^2', 'c^3'], feature_names)
# test some unicode
poly = PolynomialFeatures(degree=1, include_bias=True).fit(X)
feature_names = poly.get_feature_names(
[u"\u0001F40D", u"\u262E", u"\u05D0"])
assert_array_equal([u"1", u"\u0001F40D", u"\u262E", u"\u05D0"],
feature_names)
def test_standard_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_almost_equal(scaler.mean_, X.ravel())
assert_almost_equal(scaler.scale_, np.ones(n_features))
assert_array_almost_equal(X_scaled.mean(axis=0),
np.zeros_like(n_features))
assert_array_almost_equal(X_scaled.std(axis=0),
np.zeros_like(n_features))
else:
assert_almost_equal(scaler.mean_, X.mean())
assert_almost_equal(scaler.scale_, X.std())
assert_array_almost_equal(X_scaled.mean(axis=0),
np.zeros_like(n_features))
assert_array_almost_equal(X_scaled.mean(axis=0), .0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones(5).reshape(5, 1)
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_almost_equal(scaler.mean_, 1.)
assert_almost_equal(scaler.scale_, 1.)
assert_array_almost_equal(X_scaled.mean(axis=0), .0)
assert_array_almost_equal(X_scaled.std(axis=0), .0)
assert_equal(scaler.n_samples_seen_, X.shape[0])
def test_scale_1d():
# 1-d inputs
X_list = [1., 3., 5., 0.]
X_arr = np.array(X_list)
for X in [X_list, X_arr]:
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(), 0.0)
assert_array_almost_equal(X_scaled.std(), 1.0)
assert_array_equal(scale(X, with_mean=False, with_std=False), X)
@skip_if_32bit
def test_standard_scaler_numerical_stability():
# Test numerical stability of scaling
# np.log(1e-5) is taken because of its floating point representation
# was empirically found to cause numerical problems with np.mean & np.std.
x = np.zeros(8, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
if LooseVersion(np.__version__) >= LooseVersion('1.9'):
# This does not raise a warning as the number of samples is too low
# to trigger the problem in recent numpy
x_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(scale(x), np.zeros(8))
else:
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(8))
# with 2 more samples, the std computation run into numerical issues:
x = np.zeros(10, dtype=np.float64) + np.log(1e-5, dtype=np.float64)
w = "standard deviation of the data is probably very close to 0"
x_scaled = assert_warns_message(UserWarning, w, scale, x)
assert_array_almost_equal(x_scaled, np.zeros(10))
x = np.ones(10, dtype=np.float64) * 1e-100
x_small_scaled = assert_no_warnings(scale, x)
assert_array_almost_equal(x_small_scaled, np.zeros(10))
# Large values can cause (often recoverable) numerical stability issues:
x_big = np.ones(10, dtype=np.float64) * 1e100
w = "Dataset may contain too large values"
x_big_scaled = assert_warns_message(UserWarning, w, scale, x_big)
assert_array_almost_equal(x_big_scaled, np.zeros(10))
assert_array_almost_equal(x_big_scaled, x_small_scaled)
x_big_centered = assert_warns_message(UserWarning, w, scale, x_big,
with_std=False)
assert_array_almost_equal(x_big_centered, np.zeros(10))
assert_array_almost_equal(x_big_centered, x_small_scaled)
def test_scaler_2d_arrays():
# Test scaling of 2d array along first axis
rng = np.random.RandomState(0)
n_features = 5
n_samples = 4
X = rng.randn(n_samples, n_features)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_equal(scaler.n_samples_seen_, n_samples)
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert_true(X_scaled is not X)
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), n_samples * [1.0])
# Check that the data hasn't been modified
assert_true(X_scaled is not X)
X_scaled = scaler.fit(X).transform(X, copy=False)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is X)
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
def test_handle_zeros_in_scale():
s1 = np.array([0, 1, 2, 3])
s2 = _handle_zeros_in_scale(s1, copy=True)
assert_false(s1[0] == s2[0])
assert_array_equal(s1, np.array([0, 1, 2, 3]))
assert_array_equal(s2, np.array([1, 1, 2, 3]))
def test_minmax_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = MinMaxScaler().fit(X)
scaler_incr = MinMaxScaler()
for batch in gen_batches(n_samples, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_array_almost_equal(scaler_batch.data_min_,
scaler_incr.data_min_)
assert_array_almost_equal(scaler_batch.data_max_,
scaler_incr.data_max_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.data_range_,
scaler_incr.data_range_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_)
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_batch = MinMaxScaler().fit(X[batch0])
scaler_incr = MinMaxScaler().partial_fit(X[batch0])
assert_array_almost_equal(scaler_batch.data_min_,
scaler_incr.data_min_)
assert_array_almost_equal(scaler_batch.data_max_,
scaler_incr.data_max_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.data_range_,
scaler_incr.data_range_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_)
# Test std until the end of partial fits, and
scaler_batch = MinMaxScaler().fit(X)
scaler_incr = MinMaxScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n_samples, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
def test_standard_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = StandardScaler(with_std=False).fit(X)
scaler_incr = StandardScaler(with_std=False)
for batch in gen_batches(n_samples, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_array_almost_equal(scaler_batch.mean_, scaler_incr.mean_)
assert_equal(scaler_batch.var_, scaler_incr.var_) # Nones
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_incr = StandardScaler().partial_fit(X[batch0])
if chunk_size == 1:
assert_array_almost_equal(np.zeros(n_features, dtype=np.float64),
scaler_incr.var_)
assert_array_almost_equal(np.ones(n_features, dtype=np.float64),
scaler_incr.scale_)
else:
assert_array_almost_equal(np.var(X[batch0], axis=0),
scaler_incr.var_)
assert_array_almost_equal(np.std(X[batch0], axis=0),
scaler_incr.scale_) # no constants
# Test std until the end of partial fits, and
scaler_batch = StandardScaler().fit(X)
scaler_incr = StandardScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n_samples, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.var_, scaler_incr.var_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
def test_standard_scaler_partial_fit_numerical_stability():
# Test if the incremental computation introduces significative errors
# for large datasets with values of large magniture
rng = np.random.RandomState(0)
n_features = 2
n_samples = 100
offsets = rng.uniform(-1e15, 1e15, size=n_features)
scales = rng.uniform(1e3, 1e6, size=n_features)
X = rng.randn(n_samples, n_features) * scales + offsets
scaler_batch = StandardScaler().fit(X)
scaler_incr = StandardScaler()
for chunk in X:
scaler_incr = scaler_incr.partial_fit(chunk.reshape(1, n_features))
# Regardless of abs values, they must not be more diff 6 significant digits
tol = 10 ** (-6)
assert_allclose(scaler_incr.mean_, scaler_batch.mean_, rtol=tol)
assert_allclose(scaler_incr.var_, scaler_batch.var_, rtol=tol)
assert_allclose(scaler_incr.scale_, scaler_batch.scale_, rtol=tol)
# NOTE Be aware that for much larger offsets std is very unstable (last
# assert) while mean is OK.
# Sparse input
size = (100, 3)
scale = 1e20
X = rng.randint(0, 2, size).astype(np.float64) * scale
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
for X in [X_csr, X_csc]:
# with_mean=False is required with sparse input
scaler = StandardScaler(with_mean=False).fit(X)
scaler_incr = StandardScaler(with_mean=False)
for chunk in X:
# chunk = sparse.csr_matrix(data_chunks)
scaler_incr = scaler_incr.partial_fit(chunk)
# Regardless of magnitude, they must not differ more than of 6 digits
tol = 10 ** (-6)
assert_true(scaler.mean_ is not None)
assert_allclose(scaler_incr.var_, scaler.var_, rtol=tol)
assert_allclose(scaler_incr.scale_, scaler.scale_, rtol=tol)
def test_partial_fit_sparse_input():
# Check that sparsity is not destroyed
X = np.array([[1.], [0.], [0.], [5.]])
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
for X in [X_csr, X_csc]:
X_null = null_transform.partial_fit(X).transform(X)
assert_array_equal(X_null.data, X.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_null.data)
assert_array_equal(X_orig.data, X.data)
def test_standard_scaler_trasform_with_partial_fit():
# Check some postconditions after applying partial_fit and transform
X = X_2d[:100, :]
scaler_incr = StandardScaler()
for i, batch in enumerate(gen_batches(X.shape[0], 1)):
X_sofar = X[:(i + 1), :]
chunks_copy = X_sofar.copy()
scaled_batch = StandardScaler().fit_transform(X_sofar)
scaler_incr = scaler_incr.partial_fit(X[batch])
scaled_incr = scaler_incr.transform(X_sofar)
assert_array_almost_equal(scaled_batch, scaled_incr)
assert_array_almost_equal(X_sofar, chunks_copy) # No change
right_input = scaler_incr.inverse_transform(scaled_incr)
assert_array_almost_equal(X_sofar, right_input)
zero = np.zeros(X.shape[1])
epsilon = np.nextafter(0, 1)
assert_array_less(zero, scaler_incr.var_ + epsilon) # as less or equal
assert_array_less(zero, scaler_incr.scale_ + epsilon)
# (i+1) because the Scaler has been already fitted
assert_equal((i + 1), scaler_incr.n_samples_seen_)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
assert_raises(ValueError, scaler.fit, X)
def test_min_max_scaler_zero_variance_features():
# Check min max scaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
# function interface
X_trans = minmax_scale(X)
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans = minmax_scale(X, feature_range=(1, 2))
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_minmax_scale_axis1():
X = iris.data
X_trans = minmax_scale(X, axis=1)
assert_array_almost_equal(np.min(X_trans, axis=1), 0)
assert_array_almost_equal(np.max(X_trans, axis=1), 1)
def test_min_max_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = MinMaxScaler(copy=True)
X_scaled = scaler.fit(X).transform(X)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_array_almost_equal(X_scaled.min(axis=0),
np.zeros(n_features))
assert_array_almost_equal(X_scaled.max(axis=0),
np.zeros(n_features))
else:
assert_array_almost_equal(X_scaled.min(axis=0), .0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones(5).reshape(5, 1)
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert_greater_equal(X_scaled.min(), 0.)
assert_less_equal(X_scaled.max(), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# Function interface
X_1d = X_1row.ravel()
min_ = X_1d.min()
max_ = X_1d.max()
assert_array_almost_equal((X_1d - min_) / (max_ - min_),
minmax_scale(X_1d, copy=True))
def test_scaler_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
assert_raises(ValueError, StandardScaler().fit, X_csr)
assert_raises(ValueError, StandardScaler().fit, X_csc)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csc.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_array_almost_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.var_, scaler_csr.var_)
assert_array_almost_equal(scaler.scale_, scaler_csr.scale_)
assert_array_almost_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.var_, scaler_csc.var_)
assert_array_almost_equal(scaler.scale_, scaler_csc.scale_)
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
clean_warning_registry()
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert_false(np.any(np.isnan(X_scaled)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
clean_warning_registry()
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csc.transform(X_csc, copy=True)
assert_false(np.any(np.isnan(X_csc_scaled.data)))
assert_array_almost_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.var_, scaler_csr.var_)
assert_array_almost_equal(scaler.scale_, scaler_csr.scale_)
assert_array_almost_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.var_, scaler_csc.var_)
assert_array_almost_equal(scaler.scale_, scaler_csc.scale_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(
X_csr_scaled.astype(np.float), 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert_true(X_scaled is not X)
assert_true(X_csr_scaled is not X_csr)
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_true(X_scaled_back is not X)
assert_true(X_scaled_back is not X_scaled)
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert_true(X_csr_scaled_back is not X_csr)
assert_true(X_csr_scaled_back is not X_csr_scaled)
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert_true(X_csc_scaled_back is not X_csc)
assert_true(X_csc_scaled_back is not X_csc_scaled)
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
# Check that StandardScaler.fit does not change input
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
X_csc_copy = X_csc.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csc)
assert_array_equal(X_csc.toarray(), X_csc_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
# check scaling and fit with direct calls on sparse data
assert_raises(ValueError, scale, X_csr, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csr)
assert_raises(ValueError, scale, X_csc, with_mean=True)
assert_raises(ValueError, StandardScaler(with_mean=True).fit, X_csc)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
assert_raises(ValueError, scaler.transform, X_csr)
assert_raises(ValueError, scaler.transform, X_csc)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csr)
X_transformed_csc = sparse.csc_matrix(scaler.transform(X))
assert_raises(ValueError, scaler.inverse_transform, X_transformed_csc)
def test_scale_input_finiteness_validation():
# Check if non finite inputs raise ValueError
X = [[np.nan, 5, 6, 7, 8]]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
X = [[np.inf, 5, 6, 7, 8]]
assert_raises_regex(ValueError,
"Input contains NaN, infinity or a value too large",
scale, X)
def test_robust_scaler_2d_arrays():
# Test robust scaling of 2d array along first axis
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = RobustScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0)[0], 0)
def test_robust_scaler_transform_one_row_csr():
# Check RobustScaler on transforming csr matrix with one row
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
single_row = np.array([[0.1, 1., 2., 0., -1.]])
scaler = RobustScaler(with_centering=False)
scaler = scaler.fit(X)
row_trans = scaler.transform(sparse.csr_matrix(single_row))
row_expected = single_row / scaler.scale_
assert_array_almost_equal(row_trans.toarray(), row_expected)
row_scaled_back = scaler.inverse_transform(row_trans)
assert_array_almost_equal(single_row, row_scaled_back.toarray())
def test_robust_scaler_iris():
X = iris.data
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(25, 75), axis=0)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_iris_quantiles():
X = iris.data
scaler = RobustScaler(quantile_range=(10, 90))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(10, 90), axis=0)
q_range = q[1] - q[0]
assert_array_almost_equal(q_range, 1)
def test_quantile_transform_iris():
X = iris.data
# uniform output distribution
transformer = QuantileTransformer(n_quantiles=30)
X_trans = transformer.fit_transform(X)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# normal output distribution
transformer = QuantileTransformer(n_quantiles=30,
output_distribution='normal')
X_trans = transformer.fit_transform(X)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure it is possible to take the inverse of a sparse matrix
# which contain negative value; this is the case in the iris dataset
X_sparse = sparse.csc_matrix(X)
X_sparse_tran = transformer.fit_transform(X_sparse)
X_sparse_tran_inv = transformer.inverse_transform(X_sparse_tran)
assert_array_almost_equal(X_sparse.A, X_sparse_tran_inv.A)
def test_quantile_transform_check_error():
X = np.transpose([[0, 25, 50, 0, 0, 0, 75, 0, 0, 100],
[2, 4, 0, 0, 6, 8, 0, 10, 0, 0],
[0, 0, 2.6, 4.1, 0, 0, 2.3, 0, 9.5, 0.1]])
X = sparse.csc_matrix(X)
X_neg = np.transpose([[0, 25, 50, 0, 0, 0, 75, 0, 0, 100],
[-2, 4, 0, 0, 6, 8, 0, 10, 0, 0],
[0, 0, 2.6, 4.1, 0, 0, 2.3, 0, 9.5, 0.1]])
X_neg = sparse.csc_matrix(X_neg)
assert_raises_regex(ValueError, "Invalid value for 'n_quantiles': 0.",
QuantileTransformer(n_quantiles=0).fit, X)
assert_raises_regex(ValueError, "Invalid value for 'subsample': 0.",
QuantileTransformer(subsample=0).fit, X)
assert_raises_regex(ValueError, "The number of quantiles cannot be"
" greater than the number of samples used. Got"
" 1000 quantiles and 10 samples.",
QuantileTransformer(subsample=10).fit, X)
transformer = QuantileTransformer(n_quantiles=10)
assert_raises_regex(ValueError, "QuantileTransformer only accepts "
"non-negative sparse matrices.",
transformer.fit, X_neg)
transformer.fit(X)
assert_raises_regex(ValueError, "QuantileTransformer only accepts "
"non-negative sparse matrices.",
transformer.transform, X_neg)
X_bad_feat = np.transpose([[0, 25, 50, 0, 0, 0, 75, 0, 0, 100],
[0, 0, 2.6, 4.1, 0, 0, 2.3, 0, 9.5, 0.1]])
assert_raises_regex(ValueError, "X does not have the same number of "
"features as the previously fitted data. Got 2"
" instead of 3.",
transformer.transform, X_bad_feat)
assert_raises_regex(ValueError, "X does not have the same number of "
"features as the previously fitted data. Got 2"
" instead of 3.",
transformer.inverse_transform, X_bad_feat)
transformer = QuantileTransformer(n_quantiles=10,
output_distribution='rnd')
# check that an error is raised at fit time
assert_raises_regex(ValueError, "'output_distribution' has to be either"
" 'normal' or 'uniform'. Got 'rnd' instead.",
transformer.fit, X)
# check that an error is raised at transform time
transformer.output_distribution = 'uniform'
transformer.fit(X)
X_tran = transformer.transform(X)
transformer.output_distribution = 'rnd'
assert_raises_regex(ValueError, "'output_distribution' has to be either"
" 'normal' or 'uniform'. Got 'rnd' instead.",
transformer.transform, X)
# check that an error is raised at inverse_transform time
assert_raises_regex(ValueError, "'output_distribution' has to be either"
" 'normal' or 'uniform'. Got 'rnd' instead.",
transformer.inverse_transform, X_tran)
# check that an error is raised if input is scalar
assert_raise_message(ValueError,
'Expected 2D array, got scalar array instead',
transformer.transform, 10)
def test_quantile_transform_sparse_ignore_zeros():
X = np.array([[0, 1],
[0, 0],
[0, 2],
[0, 2],
[0, 1]])
X_sparse = sparse.csc_matrix(X)
transformer = QuantileTransformer(ignore_implicit_zeros=True,
n_quantiles=5)
# dense case -> warning raise
assert_warns_message(UserWarning, "'ignore_implicit_zeros' takes effect"
" only with sparse matrix. This parameter has no"
" effect.", transformer.fit, X)
X_expected = np.array([[0, 0],
[0, 0],
[0, 1],
[0, 1],
[0, 0]])
X_trans = transformer.fit_transform(X_sparse)
assert_almost_equal(X_expected, X_trans.A)
# consider the case where sparse entries are missing values and user-given
# zeros are to be considered
X_data = np.array([0, 0, 1, 0, 2, 2, 1, 0, 1, 2, 0])
X_col = np.array([0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1])
X_row = np.array([0, 4, 0, 1, 2, 3, 4, 5, 6, 7, 8])
X_sparse = sparse.csc_matrix((X_data, (X_row, X_col)))
X_trans = transformer.fit_transform(X_sparse)
X_expected = np.array([[0., 0.5],
[0., 0.],
[0., 1.],
[0., 1.],
[0., 0.5],
[0., 0.],
[0., 0.5],
[0., 1.],
[0., 0.]])
assert_almost_equal(X_expected, X_trans.A)
transformer = QuantileTransformer(ignore_implicit_zeros=True,
n_quantiles=5)
X_data = np.array([-1, -1, 1, 0, 0, 0, 1, -1, 1])
X_col = np.array([0, 0, 1, 1, 1, 1, 1, 1, 1])
X_row = np.array([0, 4, 0, 1, 2, 3, 4, 5, 6])
X_sparse = sparse.csc_matrix((X_data, (X_row, X_col)))
X_trans = transformer.fit_transform(X_sparse)
X_expected = np.array([[0, 1],
[0, 0.375],
[0, 0.375],
[0, 0.375],
[0, 1],
[0, 0],
[0, 1]])
assert_almost_equal(X_expected, X_trans.A)
assert_almost_equal(X_sparse.A, transformer.inverse_transform(X_trans).A)
# check in conjunction with subsampling
transformer = QuantileTransformer(ignore_implicit_zeros=True,
n_quantiles=5,
subsample=8,
random_state=0)
X_trans = transformer.fit_transform(X_sparse)
assert_almost_equal(X_expected, X_trans.A)
assert_almost_equal(X_sparse.A, transformer.inverse_transform(X_trans).A)
def test_quantile_transform_dense_toy():
X = np.array([[0, 2, 2.6],
[25, 4, 4.1],
[50, 6, 2.3],
[75, 8, 9.5],
[100, 10, 0.1]])
transformer = QuantileTransformer(n_quantiles=5)
transformer.fit(X)
# using the a uniform output, each entry of X should be map between 0 and 1
# and equally spaced
X_trans = transformer.fit_transform(X)
X_expected = np.tile(np.linspace(0, 1, num=5), (3, 1)).T
assert_almost_equal(np.sort(X_trans, axis=0), X_expected)
X_test = np.array([
[-1, 1, 0],
[101, 11, 10],
])
X_expected = np.array([
[0, 0, 0],
[1, 1, 1],
])
assert_array_almost_equal(transformer.transform(X_test), X_expected)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
def test_quantile_transform_subsampling():
# Test that subsampling the input yield to a consistent results We check
# that the computed quantiles are almost mapped to a [0, 1] vector where
# values are equally spaced. The infinite norm is checked to be smaller
# than a given threshold. This is repeated 5 times.
# dense support
n_samples = 1000000
n_quantiles = 1000
X = np.sort(np.random.sample((n_samples, 1)), axis=0)
ROUND = 5
inf_norm_arr = []
for random_state in range(ROUND):
transformer = QuantileTransformer(random_state=random_state,
n_quantiles=n_quantiles,
subsample=n_samples // 10)
transformer.fit(X)
diff = (np.linspace(0, 1, n_quantiles) -
np.ravel(transformer.quantiles_))
inf_norm = np.max(np.abs(diff))
assert_true(inf_norm < 1e-2)
inf_norm_arr.append(inf_norm)
# each random subsampling yield a unique approximation to the expected
# linspace CDF
assert_equal(len(np.unique(inf_norm_arr)), len(inf_norm_arr))
# sparse support
# TODO: rng should be seeded once we drop support for older versions of
# scipy (< 0.13) that don't support seeding.
X = sparse.rand(n_samples, 1, density=.99, format='csc')
inf_norm_arr = []
for random_state in range(ROUND):
transformer = QuantileTransformer(random_state=random_state,
n_quantiles=n_quantiles,
subsample=n_samples // 10)
transformer.fit(X)
diff = (np.linspace(0, 1, n_quantiles) -
np.ravel(transformer.quantiles_))
inf_norm = np.max(np.abs(diff))
assert_true(inf_norm < 1e-1)
inf_norm_arr.append(inf_norm)
# each random subsampling yield a unique approximation to the expected
# linspace CDF
assert_equal(len(np.unique(inf_norm_arr)), len(inf_norm_arr))
def test_quantile_transform_sparse_toy():
X = np.array([[0., 2., 0.],
[25., 4., 0.],
[50., 0., 2.6],
[0., 0., 4.1],
[0., 6., 0.],
[0., 8., 0.],
[75., 0., 2.3],
[0., 10., 0.],
[0., 0., 9.5],
[100., 0., 0.1]])
X = sparse.csc_matrix(X)
transformer = QuantileTransformer(n_quantiles=10)
transformer.fit(X)
X_trans = transformer.fit_transform(X)
assert_array_almost_equal(np.min(X_trans.toarray(), axis=0), 0.)
assert_array_almost_equal(np.max(X_trans.toarray(), axis=0), 1.)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X.toarray(), X_trans_inv.toarray())
transformer_dense = QuantileTransformer(n_quantiles=10).fit(
X.toarray())
X_trans = transformer_dense.transform(X)
assert_array_almost_equal(np.min(X_trans.toarray(), axis=0), 0.)
assert_array_almost_equal(np.max(X_trans.toarray(), axis=0), 1.)
X_trans_inv = transformer_dense.inverse_transform(X_trans)
assert_array_almost_equal(X.toarray(), X_trans_inv.toarray())
def test_quantile_transform_axis1():
X = np.array([[0, 25, 50, 75, 100],
[2, 4, 6, 8, 10],
[2.6, 4.1, 2.3, 9.5, 0.1]])
X_trans_a0 = quantile_transform(X.T, axis=0, n_quantiles=5)
X_trans_a1 = quantile_transform(X, axis=1, n_quantiles=5)
assert_array_almost_equal(X_trans_a0, X_trans_a1.T)
def test_quantile_transform_bounds():
# Lower and upper bounds are manually mapped. We checked that in the case
# of a constant feature and binary feature, the bounds are properly mapped.
X_dense = np.array([[0, 0],
[0, 0],
[1, 0]])
X_sparse = sparse.csc_matrix(X_dense)
# check sparse and dense are consistent
X_trans = QuantileTransformer(n_quantiles=3,
random_state=0).fit_transform(X_dense)
assert_array_almost_equal(X_trans, X_dense)
X_trans_sp = QuantileTransformer(n_quantiles=3,
random_state=0).fit_transform(X_sparse)
assert_array_almost_equal(X_trans_sp.A, X_dense)
assert_array_almost_equal(X_trans, X_trans_sp.A)
# check the consistency of the bounds by learning on 1 matrix
# and transforming another
X = np.array([[0, 1],
[0, 0.5],
[1, 0]])
X1 = np.array([[0, 0.1],
[0, 0.5],
[1, 0.1]])
transformer = QuantileTransformer(n_quantiles=3).fit(X)
X_trans = transformer.transform(X1)
assert_array_almost_equal(X_trans, X1)
# check that values outside of the range learned will be mapped properly.
X = np.random.random((1000, 1))
transformer = QuantileTransformer()
transformer.fit(X)
assert_equal(transformer.transform([[-10]]),
transformer.transform([[np.min(X)]]))
assert_equal(transformer.transform([[10]]),
transformer.transform([[np.max(X)]]))
assert_equal(transformer.inverse_transform([[-10]]),
transformer.inverse_transform(
[[np.min(transformer.references_)]]))
assert_equal(transformer.inverse_transform([[10]]),
transformer.inverse_transform(
[[np.max(transformer.references_)]]))
def test_quantile_transform_and_inverse():
# iris dataset
X = iris.data
transformer = QuantileTransformer(n_quantiles=1000, random_state=0)
X_trans = transformer.fit_transform(X)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
def test_robust_scaler_invalid_range():
for range_ in [
(-1, 90),
(-2, -3),
(10, 101),
(100.5, 101),
(90, 50),
]:
scaler = RobustScaler(quantile_range=range_)
assert_raises_regex(ValueError, 'Invalid quantile range: \(',
scaler.fit, iris.data)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert_false(np.any(np.isnan(X_scaled)))
X_csr_scaled = scale(X_csr, with_mean=False)
assert_false(np.any(np.isnan(X_csr_scaled.data)))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
assert_raises(ValueError, scale, X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert_true(X_scaled is not X)
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# null scale
X_csr_scaled = scale(X_csr, with_mean=False, with_std=False, copy=True)
assert_array_almost_equal(X_csr.toarray(), X_csr_scaled.toarray())
def test_robust_scale_axis1():
X = iris.data
X_trans = robust_scale(X, axis=1)
assert_array_almost_equal(np.median(X_trans, axis=1), 0)
q = np.percentile(X_trans, q=(25, 75), axis=1)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scale_1d_array():
X = iris.data[:, 1]
X_trans = robust_scale(X)
assert_array_almost_equal(np.median(X_trans), 0)
q = np.percentile(X_trans, q=(25, 75))
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_zero_variance_features():
# Check RobustScaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
# NOTE: for such a small sample size, what we expect in the third column
# depends HEAVILY on the method used to calculate quantiles. The values
# here were calculated to fit the quantiles produces by np.percentile
# using numpy 1.9 Calculating quantiles with
# scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles
# would yield very different results!
X_expected = [[0., 0., +0.0],
[0., 0., -1.0],
[0., 0., +1.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 1., +0.],
[-1., 0., -0.83333],
[+0., 0., +1.66667]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3)
def test_maxabs_scaler_zero_variance_features():
# Check MaxAbsScaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.3],
[0., 1., +1.5],
[0., 0., +0.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 2.0, 1.0 / 3.0],
[-1., 1.0, 0.0],
[+0., 1.0, 1.0]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2)
# function interface
X_trans = maxabs_scale(X)
assert_array_almost_equal(X_trans, X_expected)
# sparse data
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_trans_csr = scaler.fit_transform(X_csr)
X_trans_csc = scaler.fit_transform(X_csc)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans_csr.A, X_expected)
assert_array_almost_equal(X_trans_csc.A, X_expected)
X_trans_csr_inv = scaler.inverse_transform(X_trans_csr)
X_trans_csc_inv = scaler.inverse_transform(X_trans_csc)
assert_array_almost_equal(X, X_trans_csr_inv.A)
assert_array_almost_equal(X, X_trans_csc_inv.A)
def test_maxabs_scaler_large_negative_value():
# Check MaxAbsScaler on toy data with a large negative value
X = [[0., 1., +0.5, -1.0],
[0., 1., -0.3, -0.5],
[0., 1., -100.0, 0.0],
[0., 0., +0.0, -2.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 0.005, -0.5],
[0., 1., -0.003, -0.25],
[0., 1., -1.0, 0.0],
[0., 0., 0.0, -1.0]]
assert_array_almost_equal(X_trans, X_expected)
def test_maxabs_scaler_transform_one_row_csr():
# Check MaxAbsScaler on transforming csr matrix with one row
X = sparse.csr_matrix([[0.5, 1., 1.]])
scaler = MaxAbsScaler()
scaler = scaler.fit(X)
X_trans = scaler.transform(X)
X_expected = sparse.csr_matrix([[1., 1., 1.]])
assert_array_almost_equal(X_trans.toarray(), X_expected.toarray())
X_scaled_back = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X.toarray(), X_scaled_back.toarray())
def test_warning_scaling_integers():
# Check warning when scaling integer data
X = np.array([[1, 2, 0],
[0, 0, 0]], dtype=np.uint8)
w = "Data with input dtype uint8 was converted to float64"
clean_warning_registry()
assert_warns_message(DataConversionWarning, w, scale, X)
assert_warns_message(DataConversionWarning, w, StandardScaler().fit, X)
assert_warns_message(DataConversionWarning, w, MinMaxScaler().fit, X)
def test_maxabs_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = MaxAbsScaler(copy=True)
X_scaled = scaler.fit(X).transform(X)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)),
np.ones(n_features))
else:
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones(5).reshape(5, 1)
scaler = MaxAbsScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.)
assert_equal(scaler.n_samples_seen_, X.shape[0])
# function interface
X_1d = X_1row.ravel()
max_abs = np.abs(X_1d).max()
assert_array_almost_equal(X_1d / max_abs, maxabs_scale(X_1d, copy=True))
def test_maxabs_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d[:100, :]
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = MaxAbsScaler().fit(X)
scaler_incr = MaxAbsScaler()
scaler_incr_csr = MaxAbsScaler()
scaler_incr_csc = MaxAbsScaler()
for batch in gen_batches(n, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
X_csr = sparse.csr_matrix(X[batch])
scaler_incr_csr = scaler_incr_csr.partial_fit(X_csr)
X_csc = sparse.csc_matrix(X[batch])
scaler_incr_csc = scaler_incr_csc.partial_fit(X_csc)
assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_)
assert_array_almost_equal(scaler_batch.max_abs_,
scaler_incr_csr.max_abs_)
assert_array_almost_equal(scaler_batch.max_abs_,
scaler_incr_csc.max_abs_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_equal(scaler_batch.n_samples_seen_,
scaler_incr_csr.n_samples_seen_)
assert_equal(scaler_batch.n_samples_seen_,
scaler_incr_csc.n_samples_seen_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csr.scale_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csc.scale_)
assert_array_almost_equal(scaler_batch.transform(X),
scaler_incr.transform(X))
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_batch = MaxAbsScaler().fit(X[batch0])
scaler_incr = MaxAbsScaler().partial_fit(X[batch0])
assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_)
assert_equal(scaler_batch.n_samples_seen_, scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.transform(X),
scaler_incr.transform(X))
# Test std until the end of partial fits, and
scaler_batch = MaxAbsScaler().fit(X)
scaler_incr = MaxAbsScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert_true(X_norm is not X)
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert_true(X_norm is X)
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_max():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='max', copy=True)
X_norm1 = normalizer.transform(X)
assert_true(X_norm1 is not X)
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='max', copy=False)
X_norm2 = normalizer.transform(X)
assert_true(X_norm2 is X)
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
row_maxs = X_norm.max(axis=1)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(row_maxs[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert_true(X_norm is not X)
assert_true(isinstance(X_norm, sparse.csr_matrix))
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalize():
# Test normalize function
# Only tests functionality not used by the tests for Normalizer.
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False),
normalize(X.T, axis=0, copy=False).T)
assert_raises(ValueError, normalize, [[0]], axis=2)
assert_raises(ValueError, normalize, [[0]], norm='l3')
rs = np.random.RandomState(0)
X_dense = rs.randn(10, 5)
X_sparse = sparse.csr_matrix(X_dense)
ones = np.ones((10))
for X in (X_dense, X_sparse):
for dtype in (np.float32, np.float64):
for norm in ('l1', 'l2'):
X = X.astype(dtype)
X_norm = normalize(X, norm=norm)
assert_equal(X_norm.dtype, dtype)
X_norm = toarray(X_norm)
if norm == 'l1':
row_sums = np.abs(X_norm).sum(axis=1)
else:
X_norm_squared = X_norm**2
row_sums = X_norm_squared.sum(axis=1)
assert_array_almost_equal(row_sums, ones)
# Test return_norm
X_dense = np.array([[3.0, 0, 4.0], [1.0, 0.0, 0.0], [2.0, 3.0, 0.0]])
for norm in ('l1', 'l2', 'max'):
_, norms = normalize(X_dense, norm=norm, return_norm=True)
if norm == 'l1':
assert_array_almost_equal(norms, np.array([7.0, 1.0, 5.0]))
elif norm == 'l2':
assert_array_almost_equal(norms, np.array([5.0, 1.0, 3.60555127]))
else:
assert_array_almost_equal(norms, np.array([4.0, 1.0, 3.0]))
X_sparse = sparse.csr_matrix(X_dense)
for norm in ('l1', 'l2'):
assert_raises(NotImplementedError, normalize, X_sparse,
norm=norm, return_norm=True)
_, norms = normalize(X_sparse, norm='max', return_norm=True)
assert_array_almost_equal(norms, np.array([4.0, 1.0, 3.0]))
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 4)
assert_equal(np.sum(X_bin == 1), 2)
X_bin = binarizer.transform(X)
assert_equal(sparse.issparse(X), sparse.issparse(X_bin))
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert_true(X_bin is not X)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert_true(X_bin is not X)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert_true(X_bin is X)
binarizer = Binarizer(copy=False)
X_float = np.array([[1, 0, 5], [2, 3, -1]], dtype=np.float64)
X_bin = binarizer.transform(X_float)
if init is not list:
assert_true(X_bin is X_float)
X_bin = toarray(X_bin)
assert_equal(np.sum(X_bin == 0), 2)
assert_equal(np.sum(X_bin == 1), 4)
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert_equal(np.sum(X_bin == 0), 1)
assert_equal(np.sum(X_bin == 1), 5)
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
assert_raises(ValueError, binarizer.transform, sparse.csc_matrix(X))
def test_center_kernel():
# Test that KernelCenterer is equivalent to StandardScaler
# in feature space
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = np.dot(X_fit_centered, X_fit_centered.T)
K_fit_centered2 = centerer.fit_transform(K_fit)
assert_array_almost_equal(K_fit_centered, K_fit_centered2)
# center predict time matrix
X_pred = rng.random_sample((2, 4))
K_pred = np.dot(X_pred, X_fit.T)
X_pred_centered = scaler.transform(X_pred)
K_pred_centered = np.dot(X_pred_centered, X_fit_centered.T)
K_pred_centered2 = centerer.transform(K_pred)
assert_array_almost_equal(K_pred_centered, K_pred_centered2)
def test_cv_pipeline_precomputed():
# Cross-validate a regression on four coplanar points with the same
# value. Use precomputed kernel to ensure Pipeline with KernelCenterer
# is treated as a _pairwise operation.
X = np.array([[3, 0, 0], [0, 3, 0], [0, 0, 3], [1, 1, 1]])
y_true = np.ones((4,))
K = X.dot(X.T)
kcent = KernelCenterer()
pipeline = Pipeline([("kernel_centerer", kcent), ("svr", SVR())])
# did the pipeline set the _pairwise attribute?
assert_true(pipeline._pairwise)
# test cross-validation, score should be almost perfect
# NB: this test is pretty vacuous -- it's mainly to test integration
# of Pipeline and KernelCenterer
y_pred = cross_val_predict(pipeline, K, y_true, cv=2)
assert_array_almost_equal(y_true, y_pred)
def test_fit_transform():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for obj in ((StandardScaler(), Normalizer(), Binarizer())):
X_transformed = obj.fit(X).transform(X)
X_transformed2 = obj.fit_transform(X)
assert_array_equal(X_transformed, X_transformed2)
def test_add_dummy_feature():
X = [[1, 0], [0, 1], [0, 1]]
X = add_dummy_feature(X)
assert_array_equal(X, [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_coo():
X = sparse.coo_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_coo(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csc():
X = sparse.csc_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csc(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_add_dummy_feature_csr():
X = sparse.csr_matrix([[1, 0], [0, 1], [0, 1]])
X = add_dummy_feature(X)
assert_true(sparse.isspmatrix_csr(X), X)
assert_array_equal(X.toarray(), [[1, 1, 0], [1, 0, 1], [1, 0, 1]])
def test_one_hot_encoder_sparse():
# Test OneHotEncoder's fit and transform.
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder()
# discover max values automatically
X_trans = enc.fit_transform(X).toarray()
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
[[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]])
# max value given as 3
enc = OneHotEncoder(n_values=4)
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 4 * 3))
assert_array_equal(enc.feature_indices_, [0, 4, 8, 12])
# max value given per feature
enc = OneHotEncoder(n_values=[3, 2, 2])
X = [[1, 0, 1], [0, 1, 1]]
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 3 + 2 + 2))
assert_array_equal(enc.n_values_, [3, 2, 2])
# check that testing with larger feature works:
X = np.array([[2, 0, 1], [0, 1, 1]])
enc.transform(X)
# test that an error is raised when out of bounds:
X_too_large = [[0, 2, 1], [0, 1, 1]]
assert_raises(ValueError, enc.transform, X_too_large)
error_msg = "unknown categorical feature present \[2\] during transform."
assert_raises_regex(ValueError, error_msg, enc.transform, X_too_large)
assert_raises(ValueError, OneHotEncoder(n_values=2).fit_transform, X)
# test that error is raised when wrong number of features
assert_raises(ValueError, enc.transform, X[:, :-1])
# test that error is raised when wrong number of features in fit
# with prespecified n_values
assert_raises(ValueError, enc.fit, X[:, :-1])
# test exception on wrong init param
assert_raises(TypeError, OneHotEncoder(n_values=np.int).fit, X)
enc = OneHotEncoder()
# test negative input to fit
assert_raises(ValueError, enc.fit, [[0], [-1]])
# test negative input to transform
enc.fit([[0], [1]])
assert_raises(ValueError, enc.transform, [[0], [-1]])
def test_one_hot_encoder_dense():
# check for sparse=False
X = [[3, 2, 1], [0, 1, 1]]
enc = OneHotEncoder(sparse=False)
# discover max values automatically
X_trans = enc.fit_transform(X)
assert_equal(X_trans.shape, (2, 5))
assert_array_equal(enc.active_features_,
np.where([1, 0, 0, 1, 0, 1, 1, 0, 1])[0])
assert_array_equal(enc.feature_indices_, [0, 4, 7, 9])
# check outcome
assert_array_equal(X_trans,
np.array([[0., 1., 0., 1., 1.],
[1., 0., 1., 0., 1.]]))
def _check_transform_selected(X, X_expected, sel):
for M in (X, sparse.csr_matrix(X)):
Xtr = _transform_selected(M, Binarizer().transform, sel)
assert_array_equal(toarray(Xtr), X_expected)
def test_transform_selected():
X = [[3, 2, 1], [0, 1, 1]]
X_expected = [[1, 2, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0])
_check_transform_selected(X, X_expected, [True, False, False])
X_expected = [[1, 1, 1], [0, 1, 1]]
_check_transform_selected(X, X_expected, [0, 1, 2])
_check_transform_selected(X, X_expected, [True, True, True])
_check_transform_selected(X, X_expected, "all")
_check_transform_selected(X, X, [])
_check_transform_selected(X, X, [False, False, False])
def test_transform_selected_copy_arg():
# transformer that alters X
def _mutating_transformer(X):
X[0, 0] = X[0, 0] + 1
return X
original_X = np.asarray([[1, 2], [3, 4]])
expected_Xtr = [[2, 2], [3, 4]]
X = original_X.copy()
Xtr = _transform_selected(X, _mutating_transformer, copy=True,
selected='all')
assert_array_equal(toarray(X), toarray(original_X))
assert_array_equal(toarray(Xtr), expected_Xtr)
def _run_one_hot(X, X2, cat):
enc = OneHotEncoder(categorical_features=cat)
Xtr = enc.fit_transform(X)
X2tr = enc.transform(X2)
return Xtr, X2tr
def _check_one_hot(X, X2, cat, n_features):
ind = np.where(cat)[0]
# With mask
A, B = _run_one_hot(X, X2, cat)
# With indices
C, D = _run_one_hot(X, X2, ind)
# Check shape
assert_equal(A.shape, (2, n_features))
assert_equal(B.shape, (1, n_features))
assert_equal(C.shape, (2, n_features))
assert_equal(D.shape, (1, n_features))
# Check that mask and indices give the same results
assert_array_equal(toarray(A), toarray(C))
assert_array_equal(toarray(B), toarray(D))
def test_one_hot_encoder_categorical_features():
X = np.array([[3, 2, 1], [0, 1, 1]])
X2 = np.array([[1, 1, 1]])
cat = [True, False, False]
_check_one_hot(X, X2, cat, 4)
# Edge case: all non-categorical
cat = [False, False, False]
_check_one_hot(X, X2, cat, 3)
# Edge case: all categorical
cat = [True, True, True]
_check_one_hot(X, X2, cat, 5)
def test_one_hot_encoder_unknown_transform():
X = np.array([[0, 2, 1], [1, 0, 3], [1, 0, 2]])
y = np.array([[4, 1, 1]])
# Test that one hot encoder raises error for unknown features
# present during transform.
oh = OneHotEncoder(handle_unknown='error')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
# Test the ignore option, ignores unknown features.
oh = OneHotEncoder(handle_unknown='ignore')
oh.fit(X)
assert_array_equal(
oh.transform(y).toarray(),
np.array([[0., 0., 0., 0., 1., 0., 0.]]))
# Raise error if handle_unknown is neither ignore or error.
oh = OneHotEncoder(handle_unknown='42')
oh.fit(X)
assert_raises(ValueError, oh.transform, y)
def check_categorical_onehot(X):
enc = CategoricalEncoder(encoding='onehot')
Xtr1 = enc.fit_transform(X)
enc = CategoricalEncoder(encoding='onehot-dense')
Xtr2 = enc.fit_transform(X)
assert_allclose(Xtr1.toarray(), Xtr2)
assert sparse.isspmatrix_csr(Xtr1)
return Xtr1.toarray()
def test_categorical_encoder_onehot():
X = [['abc', 1, 55], ['def', 2, 55]]
Xtr = check_categorical_onehot(np.array(X)[:, [0]])
assert_allclose(Xtr, [[1, 0], [0, 1]])
Xtr = check_categorical_onehot(np.array(X)[:, [0, 1]])
assert_allclose(Xtr, [[1, 0, 1, 0], [0, 1, 0, 1]])
Xtr = CategoricalEncoder().fit_transform(X)
assert_allclose(Xtr.toarray(), [[1, 0, 1, 0, 1], [0, 1, 0, 1, 1]])
def test_categorical_encoder_onehot_inverse():
for encoding in ['onehot', 'onehot-dense']:
X = [['abc', 2, 55], ['def', 1, 55], ['abc', 3, 55]]
enc = CategoricalEncoder(encoding=encoding)
X_tr = enc.fit_transform(X)
exp = np.array(X, dtype=object)
assert_array_equal(enc.inverse_transform(X_tr), exp)
X = [[2, 55], [1, 55], [3, 55]]
enc = CategoricalEncoder(encoding=encoding)
X_tr = enc.fit_transform(X)
exp = np.array(X)
assert_array_equal(enc.inverse_transform(X_tr), exp)
# with unknown categories
X = [['abc', 2, 55], ['def', 1, 55], ['abc', 3, 55]]
enc = CategoricalEncoder(encoding=encoding, handle_unknown='ignore',
categories=[['abc', 'def'], [1, 2],
[54, 55, 56]])
X_tr = enc.fit_transform(X)
exp = np.array(X, dtype=object)
exp[2, 1] = None
assert_array_equal(enc.inverse_transform(X_tr), exp)
# with an otherwise numerical output, still object if unknown
X = [[2, 55], [1, 55], [3, 55]]
enc = CategoricalEncoder(encoding=encoding,
categories=[[1, 2], [54, 56]],
handle_unknown='ignore')
X_tr = enc.fit_transform(X)
exp = np.array(X, dtype=object)
exp[2, 0] = None
exp[:, 1] = None
assert_array_equal(enc.inverse_transform(X_tr), exp)
# incorrect shape raises
X_tr = np.array([[0, 1, 1], [1, 0, 1]])
msg = re.escape('Shape of the passed X data is not correct')
assert_raises_regex(ValueError, msg, enc.inverse_transform, X_tr)
def test_categorical_encoder_handle_unknown():
X = np.array([[1, 2, 3], [4, 5, 6]])
X2 = np.array([[7, 5, 3]])
# Test that encoder raises error for unknown features during transform.
enc = CategoricalEncoder()
enc.fit(X)
msg = re.escape('unknown categories [7] in column 0')
assert_raises_regex(ValueError, msg, enc.transform, X2)
# With 'ignore' you get all 0's in result
enc = CategoricalEncoder(handle_unknown='ignore')
enc.fit(X)
X2_passed = X2.copy()
Xtr = enc.transform(X2_passed)
assert_allclose(Xtr.toarray(), [[0, 0, 0, 1, 1, 0]])
# ensure transformed data was not modified in place
assert_allclose(X2, X2_passed)
# Invalid option
enc = CategoricalEncoder(handle_unknown='invalid')
assert_raises(ValueError, enc.fit, X)
def test_categorical_encoder_categories():
X = [['abc', 1, 55], ['def', 2, 55]]
# order of categories should not depend on order of samples
for Xi in [X, X[::-1]]:
enc = CategoricalEncoder()
enc.fit(Xi)
assert enc.categories == 'auto'
assert isinstance(enc.categories_, list)
cat_exp = [['abc', 'def'], [1, 2], [55]]
for res, exp in zip(enc.categories_, cat_exp):
assert res.tolist() == exp
def test_categorical_encoder_specified_categories():
X = np.array([['a', 'b']], dtype=object).T
enc = CategoricalEncoder(categories=[['a', 'b', 'c']])
exp = np.array([[1., 0., 0.],
[0., 1., 0.]])
assert_array_equal(enc.fit_transform(X).toarray(), exp)
assert enc.categories[0] == ['a', 'b', 'c']
assert enc.categories_[0].tolist() == ['a', 'b', 'c']
assert np.issubdtype(enc.categories_[0].dtype, str)
# unsorted passed categories raises for now
enc = CategoricalEncoder(categories=[['c', 'b', 'a']])
msg = re.escape('Unsorted categories are not yet supported')
assert_raises_regex(ValueError, msg, enc.fit_transform, X)
# multiple columns
X = np.array([['a', 'b'], [0, 2]], dtype=object).T
enc = CategoricalEncoder(categories=[['a', 'b', 'c'], [0, 1, 2]])
exp = np.array([[1., 0., 0., 1., 0., 0.],
[0., 1., 0., 0., 0., 1.]])
assert_array_equal(enc.fit_transform(X).toarray(), exp)
assert enc.categories_[0].tolist() == ['a', 'b', 'c']
assert np.issubdtype(enc.categories_[0].dtype, str)
assert enc.categories_[1].tolist() == [0, 1, 2]
assert np.issubdtype(enc.categories_[1].dtype, np.integer)
# when specifying categories manually, unknown categories should already
# raise when fitting
X = np.array([['a', 'b', 'c']]).T
enc = CategoricalEncoder(categories=[['a', 'b']])
assert_raises(ValueError, enc.fit, X)
enc = CategoricalEncoder(categories=[['a', 'b']], handle_unknown='ignore')
exp = np.array([[1., 0.], [0., 1.], [0., 0.]])
assert_array_equal(enc.fit(X).transform(X).toarray(), exp)
def test_categorical_encoder_pandas():
try:
import pandas as pd
except ImportError:
raise SkipTest("pandas is not installed")
X_df = pd.DataFrame({'A': ['a', 'b'], 'B': [1, 2]})
Xtr = check_categorical_onehot(X_df)
assert_allclose(Xtr, [[1, 0, 1, 0], [0, 1, 0, 1]])
def test_categorical_encoder_ordinal():
X = [['abc', 2, 55], ['def', 1, 55]]
enc = CategoricalEncoder(encoding='other')
assert_raises(ValueError, enc.fit, X)
enc = CategoricalEncoder(encoding='ordinal', handle_unknown='ignore')
assert_raises(ValueError, enc.fit, X)
enc = CategoricalEncoder(encoding='ordinal')
exp = np.array([[0, 1, 0],
[1, 0, 0]], dtype='int64')
assert_array_equal(enc.fit_transform(X), exp.astype('float64'))
enc = CategoricalEncoder(encoding='ordinal', dtype='int64')
assert_array_equal(enc.fit_transform(X), exp)
def test_categorical_encoder_ordinal_inverse():
X = [['abc', 2, 55], ['def', 1, 55]]
enc = CategoricalEncoder(encoding='ordinal')
X_tr = enc.fit_transform(X)
exp = np.array(X, dtype=object)
assert_array_equal(enc.inverse_transform(X_tr), exp)
# incorrect shape raises
X_tr = np.array([[0, 1, 1, 2], [1, 0, 1, 0]])
msg = re.escape('Shape of the passed X data is not correct')
assert_raises_regex(ValueError, msg, enc.inverse_transform, X_tr)
def test_categorical_encoder_dtypes():
# check that dtypes are preserved when determining categories
enc = CategoricalEncoder()
exp = np.array([[1., 0., 1., 0.], [0., 1., 0., 1.]], dtype='float64')
for X in [np.array([[1, 2], [3, 4]], dtype='int64'),
np.array([[1, 2], [3, 4]], dtype='float64'),
np.array([['a', 'b'], ['c', 'd']]), # string dtype
np.array([[1, 'a'], [3, 'b']], dtype='object')]:
enc.fit(X)
assert all([enc.categories_[i].dtype == X.dtype for i in range(2)])
assert_array_equal(enc.transform(X).toarray(), exp)
X = [[1, 2], [3, 4]]
enc.fit(X)
assert all([np.issubdtype(enc.categories_[i].dtype, np.integer)
for i in range(2)])
assert_array_equal(enc.transform(X).toarray(), exp)
X = [[1, 'a'], [3, 'b']]
enc.fit(X)
assert all([enc.categories_[i].dtype == 'object' for i in range(2)])
assert_array_equal(enc.transform(X).toarray(), exp)
def test_categorical_encoder_dtypes_pandas():
# check dtype (similar to test_categorical_encoder_dtypes for dataframes)
try:
import pandas as pd
except ImportError:
raise SkipTest("pandas is not installed")
enc = CategoricalEncoder()
exp = np.array([[1., 0., 1., 0.], [0., 1., 0., 1.]], dtype='float64')
X = pd.DataFrame({'A': [1, 2], 'B': [3, 4]}, dtype='int64')
enc.fit(X)
assert all([enc.categories_[i].dtype == 'int64' for i in range(2)])
assert_array_equal(enc.transform(X).toarray(), exp)
X = pd.DataFrame({'A': [1, 2], 'B': ['a', 'b']})
enc.fit(X)
assert all([enc.categories_[i].dtype == 'object' for i in range(2)])
assert_array_equal(enc.transform(X).toarray(), exp)
def test_fit_cold_start():
X = iris.data
X_2d = X[:, :2]
# Scalers that have a partial_fit method
scalers = [StandardScaler(with_mean=False, with_std=False),
MinMaxScaler(),
MaxAbsScaler()]
for scaler in scalers:
scaler.fit_transform(X)
# with a different shape, this may break the scaler unless the internal
# state is reset
scaler.fit_transform(X_2d)
def test_quantile_transform_valid_axis():
X = np.array([[0, 25, 50, 75, 100],
[2, 4, 6, 8, 10],
[2.6, 4.1, 2.3, 9.5, 0.1]])
assert_raises_regex(ValueError, "axis should be either equal to 0 or 1"
". Got axis=2", quantile_transform, X.T, axis=2)
| bsd-3-clause |
nwiizo/workspace_2017 | keras_ex/example/mnist_siamese_graph.py | 1 | 4198 | '''Train a Siamese MLP on pairs of digits from the MNIST dataset.
It follows Hadsell-et-al.'06 [1] by computing the Euclidean distance on the
output of the shared network and by optimizing the contrastive loss (see paper
for mode details).
[1] "Dimensionality Reduction by Learning an Invariant Mapping"
http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
Gets to 99.5% test accuracy after 20 epochs.
3 seconds per epoch on a Titan X GPU
'''
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
import random
from keras.datasets import mnist
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Input, Lambda
from keras.optimizers import RMSprop
from keras import backend as K
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.sum(K.square(x - y), axis=1, keepdims=True))
def eucl_dist_output_shape(shapes):
shape1, shape2 = shapes
return (shape1[0], 1)
def contrastive_loss(y_true, y_pred):
'''Contrastive loss from Hadsell-et-al.'06
http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
'''
margin = 1
return K.mean(y_true * K.square(y_pred) + (1 - y_true) * K.square(K.maximum(margin - y_pred, 0)))
def create_pairs(x, digit_indices):
'''Positive and negative pair creation.
Alternates between positive and negative pairs.
'''
pairs = []
labels = []
n = min([len(digit_indices[d]) for d in range(10)]) - 1
for d in range(10):
for i in range(n):
z1, z2 = digit_indices[d][i], digit_indices[d][i + 1]
pairs += [[x[z1], x[z2]]]
inc = random.randrange(1, 10)
dn = (d + inc) % 10
z1, z2 = digit_indices[d][i], digit_indices[dn][i]
pairs += [[x[z1], x[z2]]]
labels += [1, 0]
return np.array(pairs), np.array(labels)
def create_base_network(input_dim):
'''Base network to be shared (eq. to feature extraction).
'''
seq = Sequential()
seq.add(Dense(128, input_shape=(input_dim,), activation='relu'))
seq.add(Dropout(0.1))
seq.add(Dense(128, activation='relu'))
seq.add(Dropout(0.1))
seq.add(Dense(128, activation='relu'))
return seq
def compute_accuracy(predictions, labels):
'''Compute classification accuracy with a fixed threshold on distances.
'''
return np.mean(labels == (predictions.ravel() > 0.5))
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
input_dim = 784
nb_epoch = 20
# create training+test positive and negative pairs
digit_indices = [np.where(y_train == i)[0] for i in range(10)]
tr_pairs, tr_y = create_pairs(X_train, digit_indices)
digit_indices = [np.where(y_test == i)[0] for i in range(10)]
te_pairs, te_y = create_pairs(X_test, digit_indices)
# network definition
base_network = create_base_network(input_dim)
input_a = Input(shape=(input_dim,))
input_b = Input(shape=(input_dim,))
# because we re-use the same instance `base_network`,
# the weights of the network
# will be shared across the two branches
processed_a = base_network(input_a)
processed_b = base_network(input_b)
distance = Lambda(euclidean_distance, output_shape=eucl_dist_output_shape)([processed_a, processed_b])
model = Model(input=[input_a, input_b], output=distance)
# train
rms = RMSprop()
model.compile(loss=contrastive_loss, optimizer=rms)
model.fit([tr_pairs[:, 0], tr_pairs[:, 1]], tr_y,
validation_data=([te_pairs[:, 0], te_pairs[:, 1]], te_y),
batch_size=128,
nb_epoch=nb_epoch)
# compute final accuracy on training and test sets
pred = model.predict([tr_pairs[:, 0], tr_pairs[:, 1]])
tr_acc = compute_accuracy(pred, tr_y)
pred = model.predict([te_pairs[:, 0], te_pairs[:, 1]])
te_acc = compute_accuracy(pred, te_y)
print('* Accuracy on training set: %0.2f%%' % (100 * tr_acc))
print('* Accuracy on test set: %0.2f%%' % (100 * te_acc))
| mit |
GuillaumeSalha/jieba | test/extract_topic.py | 65 | 1463 | import sys
sys.path.append("../")
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn import decomposition
import jieba
import time
import glob
import sys
import os
import random
if len(sys.argv)<2:
print("usage: extract_topic.py directory [n_topic] [n_top_words]")
sys.exit(0)
n_topic = 10
n_top_words = 25
if len(sys.argv)>2:
n_topic = int(sys.argv[2])
if len(sys.argv)>3:
n_top_words = int(sys.argv[3])
count_vect = CountVectorizer()
docs = []
pattern = os.path.join(sys.argv[1],"*.txt")
print("read "+pattern)
for f_name in glob.glob(pattern):
with open(f_name) as f:
print("read file:", f_name)
for line in f: #one line as a document
words = " ".join(jieba.cut(line))
docs.append(words)
random.shuffle(docs)
print("read done.")
print("transform")
counts = count_vect.fit_transform(docs)
tfidf = TfidfTransformer().fit_transform(counts)
print(tfidf.shape)
t0 = time.time()
print("training...")
nmf = decomposition.NMF(n_components=n_topic).fit(tfidf)
print("done in %0.3fs." % (time.time() - t0))
# Inverse the vectorizer vocabulary to be able
feature_names = count_vect.get_feature_names()
for topic_idx, topic in enumerate(nmf.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print("")
| mit |
h2oai/h2o | py/testdir_single_jvm/test_KMeans_covtype_fvec.py | 9 | 8720 | import unittest, random, sys, time, json
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_kmeans, h2o_import as h2i
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init(1,java_heap_GB=12)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_KMeans_covtype_fvec(self):
csvFilenameList = [
('covtype.data', 800),
]
importFolderPath = "standard"
for csvFilename, timeoutSecs in csvFilenameList:
# creates csvFilename.hex from file in importFolder dir
csvPathname = importFolderPath + "/" + csvFilename
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname,
timeoutSecs=2000, pollTimeoutSecs=60)
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'])
print "\n" + csvPathname, \
" numRows:", "{:,}".format(inspect['numRows']), \
" numCols:", "{:,}".format(inspect['numCols'])
for trial in range(2):
kwargs = {
'k': 6,
'initialization': 'Furthest',
# 'initialization': '',
# 'ignored_cols': range(11, inspect['numCols']),
# ignore the response
'ignored_cols_by_name': 'C55',
'max_iter': 100,
# 'normalize': 0,
# reuse the same seed, to get deterministic results
'seed': 265211114317615310
}
start = time.time()
kmeansResult = h2o_cmd.runKMeans(parseResult=parseResult, \
timeoutSecs=timeoutSecs, retryDelaySecs=2, pollTimeoutSecs=60, **kwargs)
elapsed = time.time() - start
print "kmeans end on ", csvPathname, 'took', elapsed, 'seconds.', \
"%d pct. of timeout" % ((elapsed/timeoutSecs) * 100)
h2o_kmeans.simpleCheckKMeans(self, kmeansResult, **kwargs)
expected = [
([2781.64184460309, 162.69950733599902, 16.545275983574268, 243.73547234768156, 50.48239522121315, 942.4480922085701, 208.3915356763203, 218.7135425941215, 140.10956243018794, 1040.6795741397266, 0.22024185323685105, 0.0845245225799837, 0.4957505706376572, 0.19948305354550802, 0.01635558145683929, 0.033196811983660604, 0.026025394050259283, 0.04566180477986607, 0.008617572941792261, 0.03547936261257615, 0.0, 0.0, 0.006189327591882107, 0.13606268110663236, 0.037222303163733886, 0.024007252359445064, 0.040891651692487006, 0.003232264365769295, 1.6188302332734367e-05, 0.004667627172605076, 0.00910861811255187, 9.173371321882807e-05, 0.0025415634662392956, 0.008946735089224526, 0.0023095311328034363, 0.04957397784361021, 0.09252154393235448, 0.03887890610245037, 0.0, 0.0, 0.0010792201555156243, 0.004867282901375466, 0.08281935473426902, 0.045640220376755754, 0.04933654940939677, 0.08426550974265995, 0.07772003949945769, 0.001327440791284218, 0.0014191745045030462, 0.0, 0.0, 0.009513325670870229, 0.010970272880816322, 0.009443176360761713], 185319, 116283720155.37769) ,
([2892.8730376693256, 119.94759695676377, 11.22516236778623, 189.0301354611245, 24.621525329374652, 2631.9842642419744, 219.94967526442753, 223.3794395991835, 135.71226572647987, 5409.1797365002785, 0.883243644460939, 0.11675635553906105, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0015587307478196325, 0.0, 0.0, 0.0, 0.23410651326776769, 0.0, 0.0, 0.0, 0.026498422712933754, 0.0, 0.04152904063833735, 0.005158656522545927, 0.0695490814622379, 0.0, 0.0634997216552236, 0.05418444980515866, 0.010391538318797551, 0.0002969010948227871, 0.0, 0.0, 0.0, 0.3677862312117276, 0.07596956763778066, 0.0, 0.01109667841900167, 0.005641120801632956, 0.0, 0.0018185192057895714, 0.0, 0.0, 0.0021154203006123586, 0.018444980515865652, 0.010354425681944703], 26945, 46932273891.61873) ,
([3022.020861415003, 137.8546989122598, 13.3449108178427, 282.99227296949937, 45.23691263596753, 1606.0215197015768, 216.64941537882825, 222.64791856054669, 137.40339644525253, 2529.4366555907336, 0.4113429046111407, 0.08617284724616782, 0.5024842481426914, 0.0, 0.0, 0.0052506191028494405, 0.0, 0.014176671577693489, 0.0, 0.0, 0.0, 0.0, 0.0, 0.018949249239835743, 0.029850161436945546, 0.05403435628977148, 0.020892761982382997, 0.0, 0.0, 0.0018494718033917432, 0.011731607159650168, 0.005979436381304661, 0.0047098837027052445, 0.013714303626845553, 0.0007601642581737249, 0.047788470580859534, 0.10631328171530674, 0.04641704021817498, 0.0036519231372057308, 0.011872668568383437, 0.0, 0.00034481677690354536, 0.17267483777937995, 0.044473527475627724, 0.05637754302372967, 0.1292435973793925, 0.11970627880003762, 0.0013871038525438075, 0.004858781856368139, 0.0, 0.0, 0.03151155136202627, 0.028988119494686687, 0.012491771417823892], 127604, 95229063588.02844) ,
([3051.365089986695, 168.1268450579292, 14.114846831985933, 287.6101588092033, 50.702549817536706, 2835.266162979793, 209.89460702308608, 226.92302305495684, 148.84282479633362, 1461.8985753079312, 0.3284728328107128, 0.0006069141527711857, 0.670920253036516, 0.0, 0.0, 0.0054700083256172235, 0.0, 0.01653452018767653, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03886584862938554, 0.013250959002170886, 0.04277966681969203, 0.05480901656564399, 0.0, 0.0, 0.0010426473906581905, 0.0018440853103432178, 0.0, 0.0035014278044491476, 0.011671426014830491, 0.002435437561761296, 0.044405885511091744, 0.10662236712081483, 0.042756323967662366, 0.0, 0.007384122192049426, 0.006263665294625696, 0.0, 0.14390868276285998, 0.022152366576148275, 0.07071327974851968, 0.14799368186805065, 0.1011367968938445, 0.009111493242244337, 0.006427065258833325, 0.0009259331305098857, 0.002318723301612991, 0.03055579330682623, 0.041044514818820564, 0.024074261393257027], 128519, 106432862495.53804) ,
([3052.088693852026, 149.15056174929376, 11.549996765359152, 328.4748452763461, 44.2420589567205, 4786.68757682272, 215.8348392383499, 226.91413106764713, 143.9780260065124, 4192.589071226791, 0.8949819938326181, 0.0, 0.10501800616738188, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0022642485929312314, 0.002415198499126647, 0.0, 0.00012938563388178466, 0.0, 0.1351648588618377, 0.0, 0.0, 0.0, 0.014836219351777974, 0.0, 0.0, 0.010674314795247235, 0.03553792077286352, 0.0, 0.039290104155435275, 0.09289888512712138, 0.03864317598602636, 0.0, 0.0, 0.0, 0.0, 0.4371509283419232, 0.08636491061609126, 0.0003665926293317232, 0.002717098311517478, 0.017100467944709204, 0.0, 0.0028249196730856323, 0.0, 0.0, 0.03226015138119164, 0.017316110667845514, 0.03204450865805533], 46373, 77991941653.19676) ,
([3119.4885286481917, 165.13178470083923, 11.672206122079334, 271.2690333876713, 39.407851838435064, 4959.81440560285, 212.5861709835175, 227.95909557447322, 148.6725381875264, 1613.4457676749382, 0.9052556903942522, 0.0, 0.09474430960574776, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.00037734709895550323, 0.0, 0.0, 0.0, 0.008346917828895732, 0.0021584254060254783, 0.0, 0.0, 0.0031395278633097865, 0.0, 0.0, 0.02815009358208054, 0.012512829801364487, 0.0, 0.13355068526233171, 0.11424560767976816, 0.008799734347642335, 0.0, 0.0018867354947775161, 0.0012226046006158305, 0.0, 0.44056028497252914, 0.10774014369377528, 0.0033810300066413087, 0.014580691903640641, 0.02313892410795146, 0.0002565960272897422, 3.018776791644026e-05, 0.0, 0.0, 0.06503954597597053, 0.022625732053371973, 0.008256354525146411], 66252, 74666940350.2879) ,
]
### print h2o.dump_json(kmeans)
predictKey = 'd'
(centers, tupleResultList) = h2o_kmeans.bigCheckResults(self, kmeansResult, csvPathname, parseResult, predictKey, **kwargs)
# all are multipliers of expected tuple value
allowedDelta = (0.01, 0.01, 0.01)
# these clusters were sorted compared to the cluster order in training
h2o_kmeans.showClusterDistribution(self, tupleResultList, expected, trial=trial)
# why is the expected # of rows not right in KMeans2. That means predictions are wrong
h2o_kmeans.compareResultsToExpected(self, tupleResultList, expected, allowedDelta, allowError=False,
allowRowError=True, trial=trial)
print "Trial #", trial, "completed\n"
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
herilalaina/scikit-learn | examples/manifold/plot_t_sne_perplexity.py | 14 | 4207 | """
=============================================================================
t-SNE: The effect of various perplexity values on the shape
=============================================================================
An illustration of t-SNE on the two concentric circles and the S-curve
datasets for different perplexity values.
We observe a tendency towards clearer shapes as the preplexity value increases.
The size, the distance and the shape of clusters may vary upon initialization,
perplexity values and does not always convey a meaning.
As shown below, t-SNE for higher perplexities finds meaningful topology of
two concentric circles, however the size and the distance of the circles varies
slightly from the original. Contrary to the two circles dataset, the shapes
visually diverge from S-curve topology on the S-curve dataset even for
larger perplexity values.
For further details, "How to Use t-SNE Effectively"
http://distill.pub/2016/misread-tsne/ provides a good discussion of the
effects of various parameters, as well as interactive plots to explore
those effects.
"""
# Author: Narine Kokhlikyan <[email protected]>
# License: BSD
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
from time import time
n_samples = 300
n_components = 2
(fig, subplots) = plt.subplots(3, 5, figsize=(15, 8))
perplexities = [5, 30, 50, 100]
X, y = datasets.make_circles(n_samples=n_samples, factor=.5, noise=.05)
red = y == 0
green = y == 1
ax = subplots[0][0]
ax.scatter(X[red, 0], X[red, 1], c="r")
ax.scatter(X[green, 0], X[green, 1], c="g")
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
for i, perplexity in enumerate(perplexities):
ax = subplots[0][i + 1]
t0 = time()
tsne = manifold.TSNE(n_components=n_components, init='random',
random_state=0, perplexity=perplexity)
Y = tsne.fit_transform(X)
t1 = time()
print("circles, perplexity=%d in %.2g sec" % (perplexity, t1 - t0))
ax.set_title("Perplexity=%d" % perplexity)
ax.scatter(Y[red, 0], Y[red, 1], c="r")
ax.scatter(Y[green, 0], Y[green, 1], c="g")
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
ax.axis('tight')
# Another example using s-curve
X, color = datasets.samples_generator.make_s_curve(n_samples, random_state=0)
ax = subplots[1][0]
ax.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.viridis)
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
for i, perplexity in enumerate(perplexities):
ax = subplots[1][i + 1]
t0 = time()
tsne = manifold.TSNE(n_components=n_components, init='random',
random_state=0, perplexity=perplexity)
Y = tsne.fit_transform(X)
t1 = time()
print("S-curve, perplexity=%d in %.2g sec" % (perplexity, t1 - t0))
ax.set_title("Perplexity=%d" % perplexity)
ax.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.viridis)
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
ax.axis('tight')
# Another example using a 2D uniform grid
x = np.linspace(0, 1, int(np.sqrt(n_samples)))
xx, yy = np.meshgrid(x, x)
X = np.hstack([
xx.ravel().reshape(-1, 1),
yy.ravel().reshape(-1, 1),
])
color = xx.ravel()
ax = subplots[2][0]
ax.scatter(X[:, 0], X[:, 1], c=color, cmap=plt.cm.viridis)
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
for i, perplexity in enumerate(perplexities):
ax = subplots[2][i + 1]
t0 = time()
tsne = manifold.TSNE(n_components=n_components, init='random',
random_state=0, perplexity=perplexity)
Y = tsne.fit_transform(X)
t1 = time()
print("uniform grid, perplexity=%d in %.2g sec" % (perplexity, t1 - t0))
ax.set_title("Perplexity=%d" % perplexity)
ax.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.viridis)
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
ax.axis('tight')
plt.show()
| bsd-3-clause |
xiaolonw/fast-rcnn-normal | tools/test_net.py | 21 | 2458 | #!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""Test a Fast R-CNN network on an image database."""
import _init_paths
from fast_rcnn.test import test_net
from fast_rcnn.config import cfg, cfg_from_file
from datasets.factory import get_imdb
import caffe
import argparse
import pprint
import time, os, sys
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Test a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id', help='GPU id to use',
default=0, type=int)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--net', dest='caffemodel',
help='model to test',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--wait', dest='wait',
help='wait until net file exists',
default=True, type=bool)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to test',
default='voc_2007_test', type=str)
parser.add_argument('--comp', dest='comp_mode', help='competition mode',
action='store_true')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
print('Using config:')
pprint.pprint(cfg)
while not os.path.exists(args.caffemodel) and args.wait:
print('Waiting for {} to exist...'.format(args.caffemodel))
time.sleep(10)
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
net = caffe.Net(args.prototxt, args.caffemodel, caffe.TEST)
net.name = os.path.splitext(os.path.basename(args.caffemodel))[0]
imdb = get_imdb(args.imdb_name)
imdb.competition_mode(args.comp_mode)
test_net(net, imdb)
| mit |
schets/scikit-learn | examples/plot_digits_pipe.py | 249 | 1809 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
###############################################################################
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
###############################################################################
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
#Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
| bsd-3-clause |
pravsripad/mne-python | tutorials/inverse/90_phantom_4DBTi.py | 16 | 2689 | # -*- coding: utf-8 -*-
"""
.. _tut-phantom-4Dbti:
============================================
4D Neuroimaging/BTi phantom dataset tutorial
============================================
Here we read 4DBTi epochs data obtained with a spherical phantom
using four different dipole locations. For each condition we
compute evoked data and compute dipole fits.
Data are provided by Jean-Michel Badier from MEG center in Marseille, France.
"""
# Authors: Alex Gramfort <[email protected]>
#
# License: BSD-3-Clause
# %%
import os.path as op
import numpy as np
from mne.datasets import phantom_4dbti
import mne
# %%
# Read data and compute a dipole fit at the peak of the evoked response
data_path = phantom_4dbti.data_path()
raw_fname = op.join(data_path, '%d/e,rfhp1.0Hz')
dipoles = list()
sphere = mne.make_sphere_model(r0=(0., 0., 0.), head_radius=0.080)
t0 = 0.07 # peak of the response
pos = np.empty((4, 3))
ori = np.empty((4, 3))
for ii in range(4):
raw = mne.io.read_raw_bti(raw_fname % (ii + 1,),
rename_channels=False, preload=True)
raw.info['bads'] = ['A173', 'A213', 'A232']
events = mne.find_events(raw, 'TRIGGER', mask=4350, mask_type='not_and')
epochs = mne.Epochs(raw, events=events, event_id=8192, tmin=-0.2, tmax=0.4,
preload=True)
evoked = epochs.average()
evoked.plot(time_unit='s')
cov = mne.compute_covariance(epochs, tmax=0.)
dip = mne.fit_dipole(evoked.copy().crop(t0, t0), cov, sphere)[0]
pos[ii] = dip.pos[0]
ori[ii] = dip.ori[0]
# %%
# Compute localisation errors
actual_pos = 0.01 * np.array([[0.16, 1.61, 5.13],
[0.17, 1.35, 4.15],
[0.16, 1.05, 3.19],
[0.13, 0.80, 2.26]])
actual_pos = np.dot(actual_pos, [[0, 1, 0], [-1, 0, 0], [0, 0, 1]])
errors = 1e3 * np.linalg.norm(actual_pos - pos, axis=1)
print("errors (mm) : %s" % errors)
# %%
# Plot the dipoles in 3D
actual_amp = np.ones(len(dip)) # misc amp to create Dipole instance
actual_gof = np.ones(len(dip)) # misc GOF to create Dipole instance
dip = mne.Dipole(dip.times, pos, actual_amp, ori, actual_gof)
dip_true = mne.Dipole(dip.times, actual_pos, actual_amp, ori, actual_gof)
fig = mne.viz.plot_alignment(evoked.info, bem=sphere, surfaces=[])
# Plot the position of the actual dipole
fig = mne.viz.plot_dipole_locations(dipoles=dip_true, mode='sphere',
color=(1., 0., 0.), fig=fig)
# Plot the position of the estimated dipole
fig = mne.viz.plot_dipole_locations(dipoles=dip, mode='sphere',
color=(1., 1., 0.), fig=fig)
| bsd-3-clause |
luoyetx/mxnet | example/stochastic-depth/sd_mnist.py | 44 | 4374 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
################################################################################
# A sanity check mainly for debugging purpose. See sd_cifar10.py for a non-trivial
# example of stochastic depth on cifar10.
################################################################################
import os
import sys
import mxnet as mx
import logging
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
from utils import get_data
import sd_module
def get_conv(
name,
data,
num_filter,
kernel,
stride,
pad,
with_relu,
bn_momentum
):
conv = mx.symbol.Convolution(
name=name,
data=data,
num_filter=num_filter,
kernel=kernel,
stride=stride,
pad=pad,
no_bias=True
)
bn = mx.symbol.BatchNorm(
name=name + '_bn',
data=conv,
fix_gamma=False,
momentum=bn_momentum,
# Same with https://github.com/soumith/cudnn.torch/blob/master/BatchNormalization.lua
# cuDNN v5 don't allow a small eps of 1e-5
eps=2e-5
)
return (
# It's better to remove ReLU here
# https://github.com/gcr/torch-residual-networks
mx.symbol.Activation(name=name + '_relu', data=bn, act_type='relu')
if with_relu else bn
)
death_rates = [0.3]
contexts = [mx.context.cpu()]
data = mx.symbol.Variable('data')
conv = get_conv(
name='conv0',
data=data,
num_filter=16,
kernel=(3, 3),
stride=(1, 1),
pad=(1, 1),
with_relu=True,
bn_momentum=0.9
)
base_mod = mx.mod.Module(conv, label_names=None, context=contexts)
mod_seq = mx.mod.SequentialModule()
mod_seq.add(base_mod)
for i in range(len(death_rates)):
conv = get_conv(
name='conv0_%d' % i,
data=mx.sym.Variable('data_%d' % i),
num_filter=16,
kernel=(3, 3),
stride=(1, 1),
pad=(1, 1),
with_relu=True,
bn_momentum=0.9
)
conv = get_conv(
name='conv1_%d' % i,
data=conv,
num_filter=16,
kernel=(3, 3),
stride=(1, 1),
pad=(1, 1),
with_relu=False,
bn_momentum=0.9
)
mod = sd_module.StochasticDepthModule(conv, data_names=['data_%d' % i],
context=contexts, death_rate=death_rates[i])
mod_seq.add(mod, auto_wiring=True)
act = mx.sym.Activation(mx.sym.Variable('data_final'), act_type='relu')
flat = mx.sym.Flatten(act)
pred = mx.sym.FullyConnected(flat, num_hidden=10)
softmax = mx.sym.SoftmaxOutput(pred, name='softmax')
mod_seq.add(mx.mod.Module(softmax, context=contexts, data_names=['data_final']),
auto_wiring=True, take_labels=True)
n_epoch = 2
batch_size = 100
basedir = os.path.dirname(__file__)
get_data.get_mnist(os.path.join(basedir, "data"))
train = mx.io.MNISTIter(
image=os.path.join(basedir, "data", "train-images-idx3-ubyte"),
label=os.path.join(basedir, "data", "train-labels-idx1-ubyte"),
input_shape=(1, 28, 28), flat=False,
batch_size=batch_size, shuffle=True, silent=False, seed=10)
val = mx.io.MNISTIter(
image=os.path.join(basedir, "data", "t10k-images-idx3-ubyte"),
label=os.path.join(basedir, "data", "t10k-labels-idx1-ubyte"),
input_shape=(1, 28, 28), flat=False,
batch_size=batch_size, shuffle=True, silent=False)
logging.basicConfig(level=logging.DEBUG)
mod_seq.fit(train, val, optimizer_params={'learning_rate': 0.01, 'momentum': 0.9},
num_epoch=n_epoch, batch_end_callback=mx.callback.Speedometer(batch_size, 10))
| apache-2.0 |
h2oai/h2o | py/testdir_0xdata_slow/test_covtype_summary.py | 9 | 1878 | import unittest, random, sys, time
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_import as h2i, h2o_jobs, h2o_gbm
DO_PLOT = True
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init(1, java_heap_GB=12)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_speedrf_covtype_fvec(self):
importFolderPath = "standard"
# Parse Train ******************************************************
# csvTrainFilename = 'covtype.data'
csvTrainFilename = 'covtype20x.data'
csvTrainPathname = importFolderPath + "/" + csvTrainFilename
hex_key = csvTrainFilename + ".hex"
parseTrainResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvTrainPathname, hex_key=hex_key,
timeoutSecs=180, doSummary=False)
inspect = h2o_cmd.runInspect(None, parseTrainResult['destination_key'])
xList = []
eList = []
fList = []
trial = 0
for trial in range(10):
timeoutSecs = 30
# have unique model names
start = time.time()
summaryResult = h2o_cmd.runSummary(key=hex_key, timeoutSecs=timeoutSecs)
elapsed = time.time() - start
print 'summary end', trial, 'on', csvTrainPathname, 'took', elapsed, 'seconds'
fList.append(elapsed)
eList.append(elapsed)
if DO_PLOT:
xLabel = 'trial'
xList.append(trial)
if DO_PLOT:
eLabel = 'elapsed'
fLabel = 'elapsed'
eListTitle = ""
fListTitle = ""
h2o_gbm.plotLists(xList, xLabel, eListTitle, eList, eLabel, fListTitle, fList, fLabel)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
h2oai/h2o | bench/BMscripts/gbmgridBench.py | 11 | 9605 | #GBM bench
import os, sys, time, csv
sys.path.append('../py/')
sys.path.extend(['.','..'])
import h2o_cmd, h2o, h2o_hosts, h2o_browse as h2b, h2o_import as h2i, h2o_rf, h2o_jobs
csv_header = ('h2o_build','nMachines','nJVMs','Xmx/JVM','dataset','nTrainRows','nTestRows','nCols','trainParseWallTime','classification','gbmBuildTime')
files = {'Airlines' : {'train': ('AirlinesTrain1x', 'AirlinesTrain10x', 'AirlinesTrain100x'), 'test' : 'AirlinesTest'},
'AllBedrooms': {'train': ('AllBedroomsTrain1x', 'AllBedroomsTrain10x', 'AllBedroomsTrain100x'), 'test' : 'AllBedroomsTest'},
'Covtype' : {'train': ('CovTypeTrain1x', 'CovTypeTrain10x', 'CovTypeTrain100x'), 'test' : 'CovTypeTest'},
}
build = ""
debug = False
def doGBM(fs, folderPath, ignored_cols, classification, testFilehex, ntrees, depth, minrows, nbins, learnRate, response, row):
bench = "bench"
if debug:
print "Doing GBM DEBUG"
bench = "bench/debug"
date = '-'.join([str(x) for x in list(time.localtime())][0:3])
for f in fs['train']:
overallWallStart = time.time()
pre = ""
if debug: pre = 'DEBUG'
gbmbenchcsv = 'benchmarks/'+build+'/'+date+'/'+pre+'gbmbench.csv'
if not os.path.exists(gbmbenchcsv):
output = open(gbmbenchcsv,'w')
output.write(','.join(csv_header)+'\n')
else:
output = open(gbmbenchcsv,'a')
csvWrt = csv.DictWriter(output, fieldnames=csv_header, restval=None,
dialect='excel', extrasaction='ignore',delimiter=',')
try:
java_heap_GB = h2o.nodes[0].java_heap_GB
importFolderPath = bench + folderPath
if (f in ['AirlinesTrain1x','AllBedroomsTrain1x', 'AllBedroomsTrain10x', 'AllBedroomsTrain100x','CovTypeTrain1x', 'CovTypeTrain10x', 'CovTypeTrain100x']):
csvPathname = importFolderPath + "/" + f + '.csv'
else:
csvPathname = importFolderPath + "/" + f + "/*linked*"
hex_key = f + '.hex'
hK = folderPath + "Header.csv"
headerPathname = importFolderPath + "/" + hK
h2i.import_only(bucket='home-0xdiag-datasets', path=headerPathname)
headerKey = h2i.find_key(hK)
trainParseWallStart = time.time()
parseResult = h2i.import_parse(bucket = 'home-0xdiag-datasets',
path = csvPathname,
schema = 'local',
hex_key = hex_key,
header = 1,
header_from_file = headerKey,
separator = 44,
timeoutSecs = 7200,
retryDelaySecs = 5,
pollTimeoutSecs = 7200
)
parseWallTime = time.time() - trainParseWallStart
print "Parsing training file took ", parseWallTime ," seconds."
inspect_train = h2o.nodes[0].inspect(parseResult['destination_key'])
inspect_test = h2o.nodes[0].inspect(testFilehex)
nMachines = 1 if len(h2o_hosts.hosts) is 0 else len(h2o_hosts.hosts)
row.update( {'h2o_build' : build,
'nMachines' : nMachines,
'nJVMs' : len(h2o.nodes),
'Xmx/JVM' : java_heap_GB,
'dataset' : f,
'nTrainRows' : inspect_train['numRows'],
'nTestRows' : inspect_test['numRows'],
'nCols' : inspect_train['numCols'],
'trainParseWallTime' : parseWallTime,
'classification' : classification,
})
params = {'destination_key' : 'GBM('+f+')',
'response' : response,
'ignored_cols_by_name' : ignored_cols,
'classification' : classification,
'validation' : testFilehex,
'ntrees' : ntrees,
'max_depth' : depth,
'min_rows' : minrows,
'nbins' : nbins,
'learn_rate' : learnRate,
}
kwargs = params.copy()
gbmStart = time.time()
#TODO(spencer): Uses jobs to poll for gbm completion
h2o.beta_features = True
gbm = h2o_cmd.runGBM(parseResult = parseResult, noPoll=True, timeoutSecs=4800, **kwargs)
h2o_jobs.pollWaitJobs(timeoutSecs=7200, pollTimeoutSecs=120, retryDelaySecs=5)
h2o.beta_features = False
gbmTime = time.time() - gbmStart
row.update( {'gbmBuildTime' : gbmTime,
})
#TODO(spencer): Add in gbm scoring
#gbmScoreStart = time.time()
#gbmScore = h2o_cmd.runGLMScore(key=testFilehex,model_key=params['destination_key'])
#scoreTime = time.time() - gbmScoreStart
csvWrt.writerow(row)
finally:
output.close()
if __name__ == '__main__':
debug = sys.argv.pop(-1)
build = sys.argv.pop(-1)
h2o.parse_our_args()
h2o_hosts.build_cloud_with_hosts(enable_benchmark_log=False)
#AIRLINES
airlinesTestParseStart = time.time()
hK = "AirlinesHeader.csv"
headerPathname = "bench/Airlines" + "/" + hK
h2i.import_only(bucket='home-0xdiag-datasets', path=headerPathname)
headerKey = h2i.find_key(hK)
testFile = h2i.import_parse(bucket='home-0xdiag-datasets', path='bench/Airlines/AirlinesTest.csv', schema='local', hex_key="atest.hex", header=1, header_from_file=headerKey, separator=44,
timeoutSecs=4800,retryDelaySecs=5, pollTimeoutSecs=4800)
elapsedAirlinesTestParse = time.time() - airlinesTestParseStart
row = {'testParseWallTime' : elapsedAirlinesTestParse}
response = 'IsDepDelayed'
ignored = None
doGBM(files['Airlines'], folderPath='Airlines',
ignored_cols = ignored,
classification = 1,
testFilehex = testFile['destination_key'],
ntrees = 100,
depth = 5,
minrows = 10,
nbins = 100,
learnRate = 0.01,
response = response,
row = row
)
#COVTYPE
#covTypeTestParseStart = time.time()
#hK = "CovTypeHeader.csv"
#headerPathname = "bench/CovType" + "/" + hK
#h2i.import_only(bucket='home-0xdiag-datasets', path=headerPathname)
#headerKey = h2i.find_key(hK)
#testFile = h2i.import_parse(bucket='home-0xdiag-datasets', path='bench/CovType/CovTypeTest.csv', schema='local', hex_key="covTtest.hex", header=1, header_from_file=headerKey, separator=44,
# timeoutSecs=4800,retryDelaySecs=5, pollTimeoutSecs=4800)
#elapsedCovTypeTestParse = time.time() - covTypeTestParseStart
#row = {'testParseWallTime' : elapsedCovTypeTestParse}
#response = 'C55'
#ignored = None
#doGBM(files['Covtype'], folderPath='CovType',
# ignored_cols = ignored,
# classification = 1,
# testFilehex = testFile['destination_key'],
# ntrees = 100,
# depth = 5,
# minrows = 10,
# nbins = 100,
# learnRate = 0.01,
# response = response,
# row = row
# )
#ALLBEDROOMS
allBedroomsTestParseStart = time.time()
hK = "AllBedroomsHeader.csv"
headerPathname = "bench/AllBedrooms" + "/" + hK
h2i.import_only(bucket='home-0xdiag-datasets', path=headerPathname)
headerKey = h2i.find_key(hK)
testFile = h2i.import_parse(bucket='home-0xdiag-datasets', path='bench/AllBedrooms/AllBedroomsTest.csv', schema='local', hex_key="allBTest.hex", header=1, header_from_file=headerKey, separator=44,
timeoutSecs=4800,retryDelaySecs=5, pollTimeoutSecs=4800)
elapsedAllBedroomsParse = time.time() - allBedroomsTestParseStart
row = {'testParseWallTime' : elapsedAllBedroomsTestParse}
response = 'medrent'
ignored = None
doGBM(files['AllBedroom'], folderPath='AllBedrooms',
ignored_cols = ignored,
classification = 0,
testFilehex = testFile['destination_key'],
ntrees = 100,
depth = 5,
minrows = 10,
nbins = 100,
learnRate = 0.01,
response = response,
row = row
)
h2o.tear_down_cloud()
| apache-2.0 |
nhuntwalker/astroML | book_figures/chapter10/fig_LS_comparison.py | 4 | 2128 | """
Comparison of Lomb-Scargle Methods
----------------------------------
This shows a comparison of the Lomb-Scargle periodogram
and the Modified Lomb-Scargle periodogram for a single star,
along with the multi-term results.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
from __future__ import print_function
import numpy as np
from matplotlib import pyplot as plt
from astroML.time_series import\
lomb_scargle, search_frequencies, multiterm_periodogram
from astroML.datasets import fetch_LINEAR_sample
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#id, period = 11375941, 58.4
id, period = 18525697, 17.05
data = fetch_LINEAR_sample()
t, y, dy = data[id].T
omega = np.linspace(period, period + 0.1, 1000)
fig = plt.figure(figsize=(5, 3.75))
ax = plt.subplot(211)
for n_terms in [1, 2, 3]:
P1 = multiterm_periodogram(t, y, dy, omega, n_terms=n_terms)
plt.plot(omega, P1, lw=1, label='m = %i' % n_terms)
plt.legend(loc=2)
plt.xlim(period, period + 0.1)
plt.ylim(0, 1.0)
plt.ylabel('$1 - \chi^2(\omega) / \chi^2_{ref}$')
plt.subplot(212, sharex=ax)
for generalized in [True, False]:
if generalized:
label = 'generalized LS'
else:
label = 'standard LS'
P2 = lomb_scargle(t, y, dy, omega, generalized=generalized)
plt.plot(omega, P2, lw=1, label=label)
plt.legend(loc=2)
plt.xlim(period, period + 0.1)
plt.ylim(0, 1.0)
plt.xlabel('frequency $\omega$')
plt.ylabel('$P_{LS}(\omega)$')
plt.show()
| bsd-2-clause |
herilalaina/scikit-learn | sklearn/linear_model/tests/test_sparse_coordinate_descent.py | 90 | 10801 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.coordinate_descent import (Lasso, ElasticNet,
LassoCV, ElasticNetCV)
def test_sparse_coef():
# Check that the sparse_coef property works
clf = ElasticNet()
clf.coef_ = [1, 2, 3]
assert_true(sp.isspmatrix(clf.sparse_coef_))
assert_equal(clf.sparse_coef_.toarray().tolist()[0], clf.coef_)
def test_normalize_option():
# Check that the normalize option in enet works
X = sp.csc_matrix([[-1], [0], [1]])
y = [-1, 0, 1]
clf_dense = ElasticNet(fit_intercept=True, normalize=True)
clf_sparse = ElasticNet(fit_intercept=True, normalize=True)
clf_dense.fit(X, y)
X = sp.csc_matrix(X)
clf_sparse.fit(X, y)
assert_almost_equal(clf_dense.dual_gap_, 0)
assert_array_almost_equal(clf_dense.coef_, clf_sparse.coef_)
def test_lasso_zero():
# Check that the sparse lasso can handle zero data without crashing
X = sp.csc_matrix((3, 1))
y = [0, 0, 0]
T = np.array([[1], [2], [3]])
clf = Lasso().fit(X, y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_list_input():
# Test ElasticNet for various values of alpha and l1_ratio with list X
X = np.array([[-1], [0], [1]])
X = sp.csc_matrix(X)
Y = [-1, 0, 1] # just a straight line
T = np.array([[2], [3], [4]]) # test sample
# this should be the same as unregularized least squares
clf = ElasticNet(alpha=0, l1_ratio=1.0)
# catch warning about alpha=0.
# this is discouraged but should work.
ignore_warnings(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_explicit_sparse_input():
# Test ElasticNet for various values of alpha and l1_ratio with sparse X
f = ignore_warnings
# training samples
X = sp.lil_matrix((3, 1))
X[0, 0] = -1
# X[1, 0] = 0
X[2, 0] = 1
Y = [-1, 0, 1] # just a straight line (the identity function)
# test samples
T = sp.lil_matrix((3, 1))
T[0, 0] = 2
T[1, 0] = 3
T[2, 0] = 4
# this should be the same as lasso
clf = ElasticNet(alpha=0, l1_ratio=1.0)
f(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def make_sparse_data(n_samples=100, n_features=100, n_informative=10, seed=42,
positive=False, n_targets=1):
random_state = np.random.RandomState(seed)
# build an ill-posed linear regression problem with many noisy features and
# comparatively few samples
# generate a ground truth model
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0 # only the top features are impacting the model
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0 # 50% of zeros in input signal
# generate training ground truth labels
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
return X, y
def _test_sparse_enet_not_as_toy_dataset(alpha, fit_intercept, positive):
n_samples, n_features, max_iter = 100, 100, 1000
n_informative = 10
X, y = make_sparse_data(n_samples, n_features, n_informative,
positive=positive)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)
assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)
# check that the coefs are sparse
assert_less(np.sum(s_clf.coef_ != 0.0), 2 * n_informative)
def test_sparse_enet_not_as_toy_dataset():
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=False,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=True,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=False,
positive=True)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=True,
positive=True)
def test_sparse_lasso_not_as_toy_dataset():
n_samples = 100
max_iter = 1000
n_informative = 10
X, y = make_sparse_data(n_samples=n_samples, n_informative=n_informative)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
# check that the coefs are sparse
assert_equal(np.sum(s_clf.coef_ != 0.0), n_informative)
def test_enet_multitarget():
n_targets = 3
X, y = make_sparse_data(n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True, precompute=None)
# XXX: There is a bug when precompute is not None!
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_,
estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_path_parameters():
X, y = make_sparse_data()
max_iter = 50
n_alphas = 10
clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, fit_intercept=False)
ignore_warnings(clf.fit)(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(n_alphas, clf.n_alphas)
assert_equal(n_alphas, len(clf.alphas_))
sparse_mse_path = clf.mse_path_
ignore_warnings(clf.fit)(X.toarray(), y) # compare with dense data
assert_almost_equal(clf.mse_path_, sparse_mse_path)
def test_same_output_sparse_dense_lasso_and_enet_cv():
X, y = make_sparse_data(n_samples=40, n_features=10)
for normalize in [True, False]:
clfs = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
clfs = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
def test_same_multiple_output_sparse_dense():
for normalize in [True, False]:
l = ElasticNet(normalize=normalize)
X = [[0, 1, 2, 3, 4],
[0, 2, 5, 8, 11],
[9, 10, 11, 12, 13],
[10, 11, 12, 13, 14]]
y = [[1, 2, 3, 4, 5],
[1, 3, 6, 9, 12],
[10, 11, 12, 13, 14],
[11, 12, 13, 14, 15]]
ignore_warnings(l.fit)(X, y)
sample = np.array([1, 2, 3, 4, 5]).reshape(1, -1)
predict_dense = l.predict(sample)
l_sp = ElasticNet(normalize=normalize)
X_sp = sp.coo_matrix(X)
ignore_warnings(l_sp.fit)(X_sp, y)
sample_sparse = sp.coo_matrix(sample)
predict_sparse = l_sp.predict(sample_sparse)
assert_array_almost_equal(predict_sparse, predict_dense)
| bsd-3-clause |
rhiever/sklearn-benchmarks | model_code/random_search_preprocessing/GradientBoostingClassifier.py | 1 | 2817 | import sys
import pandas as pd
import numpy as np
from sklearn.preprocessing import Binarizer, MaxAbsScaler, MinMaxScaler
from sklearn.preprocessing import Normalizer, PolynomialFeatures, RobustScaler, StandardScaler
from sklearn.decomposition import FastICA, PCA
from sklearn.kernel_approximation import RBFSampler, Nystroem
from sklearn.cluster import FeatureAgglomeration
from sklearn.feature_selection import SelectFwe, SelectPercentile, VarianceThreshold
from sklearn.feature_selection import SelectFromModel, RFE
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from evaluate_model import evaluate_model
dataset = sys.argv[1]
num_param_combinations = int(sys.argv[2])
random_seed = int(sys.argv[3])
preprocessor_num = int(sys.argv[4])
np.random.seed(random_seed)
preprocessor_list = [Binarizer, MaxAbsScaler, MinMaxScaler, Normalizer,
PolynomialFeatures, RobustScaler, StandardScaler,
FastICA, PCA, RBFSampler, Nystroem, FeatureAgglomeration,
SelectFwe, SelectPercentile, VarianceThreshold,
SelectFromModel, RFE]
chosen_preprocessor = preprocessor_list[preprocessor_num]
pipeline_components = [chosen_preprocessor, GradientBoostingClassifier]
pipeline_parameters = {}
n_estimators_values = np.random.choice(list(range(50, 1001, 50)), size=num_param_combinations)
min_impurity_decrease_values = np.random.exponential(scale=0.01, size=num_param_combinations)
max_features_values = np.random.choice(list(np.arange(0.01, 1., 0.01)) + ['sqrt', 'log2', None], size=num_param_combinations)
learning_rate_values = np.random.uniform(low=1e-10, high=5., size=num_param_combinations)
loss_values = np.random.choice(['deviance', 'exponential'], size=num_param_combinations)
max_depth_values = np.random.choice(list(range(1, 51)) + [None], size=num_param_combinations)
all_param_combinations = zip(n_estimators_values, min_impurity_decrease_values, max_features_values, learning_rate_values, loss_values, max_depth_values)
pipeline_parameters[GradientBoostingClassifier] = \
[{'n_estimators': n_estimators, 'min_impurity_decrease': min_impurity_decrease, 'max_features': max_features, 'learning_rate': learning_rate, 'loss': loss, 'max_depth': max_depth, 'random_state': 324089}
for (n_estimators, min_impurity_decrease, max_features, learning_rate, loss, max_depth) in all_param_combinations]
if chosen_preprocessor is SelectFromModel:
pipeline_parameters[SelectFromModel] = [{'estimator': ExtraTreesClassifier(n_estimators=100, random_state=324089)}]
elif chosen_preprocessor is RFE:
pipeline_parameters[RFE] = [{'estimator': ExtraTreesClassifier(n_estimators=100, random_state=324089)}]
evaluate_model(dataset, pipeline_components, pipeline_parameters)
| mit |
schets/scikit-learn | examples/linear_model/plot_logistic.py | 309 | 1426 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logit function
=========================================================
Show in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logit-curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='blue', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(())
plt.yticks(())
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.show()
| bsd-3-clause |
schets/scikit-learn | benchmarks/bench_plot_lasso_path.py | 299 | 4003 | """Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
from collections import defaultdict
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path
from sklearn.linear_model import lasso_path
from sklearn.datasets.samples_generator import make_regression
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
dataset_kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_features / 10,
'effective_rank': min(n_samples, n_features) / 10,
#'effective_rank': None,
'bias': 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (without Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (without Gram)'].append(delta)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 2000, 5).astype(np.int)
features_range = np.linspace(10, 2000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure('scikit-learn Lasso path benchmark results')
i = 1
for c, (label, timings) in zip('bcry', sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection='3d')
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
#ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
#ax.legend()
i += 1
plt.show()
| bsd-3-clause |
shareactorIO/pipeline | source.ml/jupyterhub.ml/notebooks/zz_old/TensorFlow/GoogleTraining/workshop_sections/extras/cnn_text_classification/data_helpers2.py | 4 | 9959 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# This file is a modification of the code here:
# https://github.com/dennybritz/cnn-text-classification-tf
from collections import Counter
import itertools
import json
import re
import numpy as np
import tensorflow as tf
vocabulary_mapping = None
vocabulary_inv = None
def clean_str(string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from
https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
def load_data_and_labels(
cat1=None, cat2=None, x_text=None,
positive_examples=None, negative_examples=None
):
"""
Loads two-category data from files, splits the data into words and
generates labels. Returns split sentences and labels.
"""
if not x_text or not positive_examples or not negative_examples:
# Load data from files
print("Loading data from {} and {}".format(cat1, cat2))
positive_examples = list(open(cat1, "r").readlines())
positive_examples = [s.strip() for s in positive_examples]
negative_examples = list(open(cat2, "r").readlines())
negative_examples = [s.strip() for s in negative_examples]
# Split by words
x_text = positive_examples + negative_examples
x_text = [clean_str(sent) for sent in x_text]
x_text = [s.split(" ") for s in x_text]
# Generate labels
print("Generating labels...")
positive_labels = [[0, 1] for _ in positive_examples]
negative_labels = [[1, 0] for _ in negative_examples]
y = np.concatenate([positive_labels, negative_labels], 0)
positive_examples = None
negative_examples = None
return [x_text, y]
def build_vocab_mapping(run="", write_mapping=True,
cat1=None, cat2=None
):
"""
Generate vocabulary mapping info, write it to disk for later eval.
This ensures that the mapping used for the eval is the same.
"""
print("Building the vocabulary mapping. " +
"This will take a while for large datasets.")
# Load data from files
positive_examples = list(open(cat1, "r").readlines())
positive_examples = [s.strip() for s in positive_examples]
negative_examples = list(open(cat2, "r").readlines())
negative_examples = [s.strip() for s in negative_examples]
# Split by words
x_text = positive_examples + negative_examples
print("cleaning...")
x_text = [clean_str(sent) for sent in x_text]
print("splitting...")
x_text = [s.split(" ") for s in x_text]
print("building indexes...")
padded_sentences = pad_sentences(x_text)
vocabulary_mapping, vocabulary_inv = build_vocab(
padded_sentences)
vocab_file = "vocab{}.json".format(run)
if write_mapping:
print("writing vocab file {}".format(vocab_file))
with open(vocab_file, "w") as f:
f.write(json.dumps(vocabulary_mapping))
return [x_text, positive_examples, negative_examples, padded_sentences, vocabulary_mapping, vocabulary_inv]
def pad_sentences(sentences, padding_word="<PAD/>",
max_sent_length=60):
"""
Pads all sentences to the same length. The length is defined by the min of
the longest sentence and a given max sentence length.
Returns padded sentences.
"""
sequence_length = max(len(x) for x in sentences)
# cap sentence length
print("setting seq length to min of {} and {}".format(
sequence_length, max_sent_length))
sequence_length = min(sequence_length, max_sent_length)
print("capped longest seq length: {}".format(sequence_length))
padded_sentences = []
for i in range(len(sentences)):
# truncate as necessary
sentence = sentences[i][:sequence_length]
num_padding = sequence_length - len(sentence)
new_sentence = sentence + [padding_word] * num_padding
padded_sentences.append(new_sentence)
return padded_sentences
def build_vocab(sentences, max_vocab=30000):
"""
Builds a vocabulary mapping from word to index based on the sentences.
Returns vocabulary mapping and inverse vocabulary mapping.
"""
# Build vocabulary. Cap to a max.
word_counts = Counter(itertools.chain(*sentences))
# Mapping from index to word. Use the 'max_vocab' most common.
vocabulary_inv = [x[0] for x in word_counts.most_common(max_vocab)]
vocabulary_inv = list(sorted(vocabulary_inv))
# Mapping from word to index
vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}
return [vocabulary, vocabulary_inv]
def get_embeddings(vocab_size, embedding_size, emb_file): # expected sizes
"""..."""
# create a matrix of the right size
embeddings = np.random.uniform(
-1.0, 1.0, size=(vocab_size, embedding_size)).astype('float32')
# get the vocabulary mapping info
if not vocabulary_mapping:
# should have already generated the vocab mapping
print("Don't have vocabulary mapping.")
return None
vocabulary = vocabulary_mapping
if len(vocabulary) != vocab_size:
print('vocab size mismatch: %s vs %s' % (vocab_size, len(vocabulary)))
return None
# read and parse the generated embeddings file
try:
with open(emb_file, "r") as f:
for line in f:
edict = json.loads(line)
key = list(edict.keys())[0]
# see if key is in the vocab
if key in vocabulary:
# then add the embedding vector
emb = edict[key][0]
if len(emb) != embedding_size:
print(
"embedding size mismatch for word {}: " +
"{} vs {}".format(
key, embedding_size, len(emb)))
return None
vocab_idx = vocabulary[key]
embeddings[vocab_idx] = emb
return tf.convert_to_tensor(embeddings)
except Exception as e:
print(e)
return None
def build_input_data(sentences, labels, vocabulary):
"""
Maps sentencs and labels to vectors based on a vocabulary.
"""
# With capped vocab, need to account for word not present in
# vocab. Using the padding word.
# TODO -- pass padding word in as an arg
padding_word = "<PAD/>"
pad_idx = vocabulary[padding_word]
x = np.array(
[[vocabulary.get(word, pad_idx) for word in sentence] for sentence in sentences])
y = np.array(labels)
return [x, y]
def load_data(run="", cat1=None, cat2=None,
eval=False, vocab_file=None):
"""
Loads and preprocessed data for the MR dataset.
Returns input vectors, labels, vocabulary, and inverse vocabulary.
"""
x_text = None
positive_examples = None
negative_examples = None
padded_sentences = None
print("eval mode: {}".format(eval))
print("vocab file: {}".format(vocab_file))
# Load and preprocess data
if eval: # in eval mode, use the generated vocab mapping.
print("loading generated vocab mapping")
with open(vocab_file, "r") as f:
mapping_line = f.readline()
vocabulary_mapping = json.loads(mapping_line)
else:
x_text, positive_examples, negative_examples, padded_sentences, vocabulary_mapping, vocabulary_inv = build_vocab_mapping(
run=run, cat1=cat1, cat2=cat2)
print("building training data structures")
sentences, labels = load_data_and_labels(
cat1=cat1, cat2=cat2,
x_text=x_text, positive_examples=positive_examples,
negative_examples=negative_examples)
if not padded_sentences:
padded_sentences = pad_sentences(sentences)
print("Building input data...")
x, y = build_input_data(padded_sentences, labels, vocabulary_mapping)
return [x, y, vocabulary_mapping, vocabulary_inv]
def batch_iter(data, batch_size, num_epochs, shuffle=True):
"""
Generates a batch iterator for a dataset.
"""
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int(len(data)/batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
else:
shuffled_data = data
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]
| apache-2.0 |
sangwook236/general-development-and-testing | sw_dev/python/rnd/test/machine_learning/pytorch_lightning/pl_bolts_self_supervised_learning.py | 2 | 11425 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Self-supervised learning.
# REF [site] >>
# https://lightning-bolts.readthedocs.io/en/latest/deprecated/models/self_supervised.html
# https://lightning-bolts.readthedocs.io/en/latest/deprecated/transforms/self_supervised.html
# https://pytorch-lightning-bolts.readthedocs.io/en/latest/self_supervised_models.html
# https://pytorch-lightning-bolts.readthedocs.io/en/latest/self_supervised_utils.html
# https://pytorch-lightning-bolts.readthedocs.io/en/latest/transforms.html
# AMDIM, BYOL, CPC v2, Moco v2, SimCLR, SwAV, SimSiam.
import torch, torchvision
import pytorch_lightning as pl
# REF [site] >> https://pytorch-lightning-bolts.readthedocs.io/en/latest/self_supervised_models.html
def simple_simclr_example():
from pl_bolts.models.self_supervised import SimCLR
from pl_bolts.models.self_supervised.simclr import SimCLRTrainDataTransform, SimCLREvalDataTransform
# Load ResNet50 pretrained using SimCLR on ImageNet.
weight_path = "https://pl-bolts-weights.s3.us-east-2.amazonaws.com/simclr/bolts_simclr_imagenet/simclr_imagenet.ckpt"
simclr = SimCLR.load_from_checkpoint(weight_path, strict=False)
#train_dataset = MyDataset(transforms=SimCLRTrainDataTransform())
#val_dataset = MyDataset(transforms=SimCLREvalDataTransform())
train_dataset = torchvision.datasets.CIFAR10("", train=True, download=True, transform=SimCLRTrainDataTransform())
val_dataset = torchvision.datasets.CIFAR10("", train=False, download=True, transform=SimCLREvalDataTransform())
# SimCLR needs a lot of compute!
model = SimCLR(gpus=2, num_samples=len(train_dataset), batch_size=32, dataset="cifar10")
trainer = pl.Trainer(gpus=2, accelerator="ddp")
trainer.fit(
model,
torch.utils.data.DataLoader(train_dataset, batch_size=32, num_workers=12),
torch.utils.data.DataLoader(val_dataset, batch_size=32, num_workers=12),
)
#--------------------
simclr_resnet50 = simclr.encoder
simclr_resnet50.eval()
#my_dataset = SomeDataset()
my_dataset = val_dataset
for batch in my_dataset:
x, y = batch
out = simclr_resnet50(x)
# REF [site] >> https://pytorch-lightning-bolts.readthedocs.io/en/latest/self_supervised_models.html
def mix_and_match_any_part_or_subclass_example():
from pl_bolts.models.self_supervised import CPC_v2
from pl_bolts.losses.self_supervised_learning import FeatureMapContrastiveTask
from pl_bolts.datamodules import CIFAR10DataModule
from pl_bolts.models.self_supervised.cpc import CPCTrainTransformsCIFAR10, CPCEvalTransformsCIFAR10
from pytorch_lightning.plugins import DDPPlugin
# Data module.
dm = CIFAR10DataModule(num_workers=12, batch_size=32)
dm.train_transforms = CPCTrainTransformsCIFAR10()
dm.val_transforms = CPCEvalTransformsCIFAR10()
# Model.
amdim_task = FeatureMapContrastiveTask(comparisons="01, 11, 02", bidirectional=True)
model = CPC_v2(encoder="cpc_encoder", contrastive_task=amdim_task)
# Fit.
trainer = pl.Trainer(gpus=2, accelerator="ddp", plugins=DDPPlugin(find_unused_parameters=False))
trainer.fit(model, datamodule=dm)
# REF [site] >> https://pytorch-lightning-bolts.readthedocs.io/en/latest/self_supervised_models.html
def byol_example():
from pl_bolts.models.self_supervised import BYOL
from pl_bolts.datamodules import CIFAR10DataModule
from pl_bolts.models.self_supervised.simclr import SimCLRTrainDataTransform, SimCLREvalDataTransform
# Data module.
dm = CIFAR10DataModule(num_workers=12, batch_size=32)
dm.train_transforms = SimCLRTrainDataTransform(input_height=32)
dm.val_transforms = SimCLREvalDataTransform(input_height=32)
# Model.
model = BYOL(num_classes=10)
# Fit.
trainer = pl.Trainer(gpus=2, accelerator="ddp")
trainer.fit(model, datamodule=dm)
#--------------------
# CLI command:
# CIFAR-10:
# python byol_module.py --gpus 1
# ImageNet:
# python byol_module.py --gpus 8 --dataset imagenet2012 --data_dir /path/to/imagenet/ --meta_dir /path/to/folder/with/meta.bin/ --batch_size 32
# REF [site] >> https://pytorch-lightning-bolts.readthedocs.io/en/latest/self_supervised_models.html
def cpc_v2_example():
from pl_bolts.models.self_supervised import CPC_v2
from pl_bolts.datamodules import CIFAR10DataModule
from pl_bolts.models.self_supervised.cpc import CPCTrainTransformsCIFAR10, CPCEvalTransformsCIFAR10
from pytorch_lightning.plugins import DDPPlugin
# Data module.
dm = CIFAR10DataModule(num_workers=12, batch_size=32)
dm.train_transforms = CPCTrainTransformsCIFAR10()
dm.val_transforms = CPCEvalTransformsCIFAR10()
# Model.
model = CPC_v2(encoder="cpc_encoder")
# Fit.
trainer = pl.Trainer(gpus=2, accelerator="ddp", plugins=DDPPlugin(find_unused_parameters=False))
trainer.fit(model, datamodule=dm)
#--------------------
# CIFAR-10 pretrained model:
weight_path = "https://pl-bolts-weights.s3.us-east-2.amazonaws.com/cpc/cpc-cifar10-v4-exp3/epoch%3D474.ckpt"
# STL-10 pretrained model:
#weight_path = "https://pl-bolts-weights.s3.us-east-2.amazonaws.com/cpc/cpc-stl10-v0-exp3/epoch%3D624.ckpt"
cpc_v2 = CPC_v2.load_from_checkpoint(weight_path, strict=False)
cpc_v2.freeze()
#--------------------
# CLI command:
# Finetune:
# python cpc_finetuner.py --ckpt_path path/to/checkpoint.ckpt --dataset cifar10 --gpus 1
def moco_v2_example():
from pl_bolts.models.self_supervised import Moco_v2
from pl_bolts.datamodules import CIFAR10DataModule
from pl_bolts.models.self_supervised.moco import Moco2TrainCIFAR10Transforms, Moco2EvalCIFAR10Transforms
from pytorch_lightning.plugins import DDPPlugin
# Data module.
dm = CIFAR10DataModule(num_workers=12, batch_size=32)
dm.train_transforms = Moco2TrainCIFAR10Transforms()
dm.val_transforms = Moco2EvalCIFAR10Transforms()
# Model.
model = Moco_v2()
# Fit.
trainer = pl.Trainer(gpus=2, accelerator="ddp", plugins=DDPPlugin(find_unused_parameters=False))
trainer.fit(model, datamodule=dm)
#--------------------
# CLI command:
# CIFAR-10:
# python moco2_module.py --gpus 1
# ImageNet:
# python moco2_module.py --gpus 8 --dataset imagenet2012 --data_dir /path/to/imagenet/ --meta_dir /path/to/folder/with/meta.bin/ --batch_size 32
# REF [site] >> https://pytorch-lightning-bolts.readthedocs.io/en/latest/self_supervised_models.html
def simclr_example():
from pl_bolts.models.self_supervised import SimCLR
from pl_bolts.datamodules import CIFAR10DataModule
from pl_bolts.models.self_supervised.simclr import SimCLRTrainDataTransform, SimCLREvalDataTransform
# Data module.
dm = CIFAR10DataModule(num_workers=12, batch_size=32)
dm.train_transforms = SimCLRTrainDataTransform(input_height=32)
dm.val_transforms = SimCLREvalDataTransform(input_height=32)
# Model.
model = SimCLR(gpus=2, num_samples=dm.num_samples, batch_size=dm.batch_size, dataset="cifar10")
# Fit.
trainer = pl.Trainer(gpus=2, accelerator="ddp")
trainer.fit(model, datamodule=dm)
#--------------------
# CIFAR-10 pretrained model:
weight_path = "https://pl-bolts-weights.s3.us-east-2.amazonaws.com/simclr/bolts_simclr_imagenet/simclr_imagenet.ckpt"
# ImageNet pretrained model:
#weight_path = "https://pl-bolts-weights.s3.us-east-2.amazonaws.com/simclr/bolts_simclr_imagenet/simclr_imagenet.ckpt"
simclr = SimCLR.load_from_checkpoint(weight_path, strict=False)
simclr.freeze()
#--------------------
# CLI command:
# CIFAR-10:
# Pretrain:
# python simclr_module.py --gpus 8 --dataset cifar10 --batch_size 256 -- num_workers 16 --optimizer sgd --learning_rate 1.5 --lars_wrapper --exclude_bn_bias --max_epochs 800 --online_ft
# Finetune:
# python simclr_finetuner.py --gpus 4 --ckpt_path path/to/simclr/ckpt --dataset cifar10 --batch_size 64 --num_workers 8 --learning_rate 0.3 --num_epochs 100
# ImageNet:
# Pretrain:
# python simclr_module.py --dataset imagenet --data_path path/to/imagenet
# Finetune:
# python simclr_finetuner.py --gpus 8 --ckpt_path path/to/simclr/ckpt --dataset imagenet --data_dir path/to/imagenet/dataset --batch_size 256 --num_workers 16 --learning_rate 0.8 --nesterov True --num_epochs 90
# REF [site] >> https://pytorch-lightning-bolts.readthedocs.io/en/latest/self_supervised_models.html
def swav_example():
from pl_bolts.models.self_supervised import SwAV
from pl_bolts.datamodules import STL10DataModule
from pl_bolts.models.self_supervised.swav.transforms import SwAVTrainDataTransform, SwAVEvalDataTransform
from pl_bolts.transforms.dataset_normalizations import stl10_normalization
batch_size = 128
# Data module.
dm = STL10DataModule(data_dir=".", num_workers=16, batch_size=batch_size)
dm.train_dataloader = dm.train_dataloader_mixed
dm.val_dataloader = dm.val_dataloader_mixed
dm.train_transforms = SwAVTrainDataTransform(normalize=stl10_normalization())
dm.val_transforms = SwAVEvalDataTransform(normalize=stl10_normalization())
# Model.
model = SwAV(
gpus=1,
num_samples=dm.num_unlabeled_samples,
dataset="stl10",
batch_size=batch_size
)
# Fit.
trainer = pl.Trainer(gpus=2, accelerator="ddp", precision=16)
trainer.fit(model, datamodule=dm)
#--------------------
# ImageNet pretrained model:
weight_path = "https://pl-bolts-weights.s3.us-east-2.amazonaws.com/swav/bolts_swav_imagenet/swav_imagenet.ckpt"
#weight_path = "https://pl-bolts-weights.s3.us-east-2.amazonaws.com/swav/swav_imagenet/swav_imagenet.pth.tar"
# STL-10 pretrained model:
#weight_path = "https://pl-bolts-weights.s3.us-east-2.amazonaws.com/swav/checkpoints/swav_stl10.pth.tar"
swav = SwAV.load_from_checkpoint(weight_path, strict=True)
swav.freeze()
#--------------------
# CLI command:
# Pretrain:
# python swav_module.py --online_ft --gpus 1 --lars_wrapper --batch_size 128 --learning_rate 1e-3 --gaussian_blur --queue_length 0 --jitter_strength 1. --nmb_prototypes 512
# Finetune:
# python swav_finetuner.py --gpus 8 --ckpt_path path/to/simclr/ckpt --dataset imagenet --data_dir path/to/imagenet/dataset --batch_size 256 --num_workers 16 --learning_rate 0.8 --nesterov True --num_epochs 90
def simsiam_example():
from pl_bolts.models.self_supervised import SimSiam
from pl_bolts.datamodules import CIFAR10DataModule
from pl_bolts.models.self_supervised.simclr import SimCLRTrainDataTransform, SimCLREvalDataTransform
# Data module.
dm = CIFAR10DataModule(num_workers=12, batch_size=32)
dm.train_transforms = SimCLRTrainDataTransform(input_height=32)
dm.val_transforms = SimCLREvalDataTransform(input_height=32)
# Model.
model = SimSiam(gpus=2, num_samples=dm.num_samples, batch_size=dm.batch_size, dataset="cifar10")
# Fit.
trainer = pl.Trainer(gpus=2, accelerator="ddp")
trainer.fit(model, datamodule=dm)
#--------------------
# CLI command:
# CIFAR-10:
# python simsiam_module.py --gpus 1
# ImageNet:
# python simsiam_module.py --gpus 8 --dataset imagenet2012 --data_dir /path/to/imagenet/ --meta_dir /path/to/folder/with/meta.bin/ --batch_size 32
# REF [site] >> https://pytorch-lightning.readthedocs.io/en/latest/notebooks/course_UvA-DL/13-contrastive-learning.html
def self_supervised_contrastive_learning_with_simclr_tutorial():
raise NotImplementedError
def main():
#simple_simclr_example()
#mix_and_match_any_part_or_subclass_example()
#byol_example()
#cpc_v2_example()
#moco_v2_example()
simclr_example()
#swav_example()
#simsiam_example()
#self_supervised_contrastive_learning_with_simclr_tutorial() # Not yet implemented.
#--------------------------------------------------------------------
if "__main__" == __name__:
main()
| gpl-2.0 |
xzturn/tensorflow | tensorflow/python/keras/callbacks_test.py | 2 | 77079 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras callbacks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import json
import os
import re
import shutil
import sys
import threading
import time
import unittest
from absl.testing import parameterized
import numpy as np
from tensorflow.core.framework import summary_pb2
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import random_seed
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow.python.keras.optimizer_v2 import learning_rate_schedule
from tensorflow.python.keras.utils import np_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary_iterator
from tensorflow.python.training import adam
from tensorflow.python.training import checkpoint_management
try:
import h5py # pylint:disable=g-import-not-at-top
except ImportError:
h5py = None
try:
import requests # pylint:disable=g-import-not-at-top
except ImportError:
requests = None
TRAIN_SAMPLES = 10
TEST_SAMPLES = 10
NUM_CLASSES = 2
INPUT_DIM = 3
NUM_HIDDEN = 5
BATCH_SIZE = 5
class Counter(keras.callbacks.Callback):
"""Counts the number of times each callback method was run.
Attributes:
method_counts: dict. Contains the counts of time each callback method was
run.
"""
def __init__(self):
self.method_counts = collections.defaultdict(int)
methods_to_count = [
'on_batch_begin', 'on_batch_end', 'on_epoch_begin', 'on_epoch_end',
'on_predict_batch_begin', 'on_predict_batch_end', 'on_predict_begin',
'on_predict_end', 'on_test_batch_begin', 'on_test_batch_end',
'on_test_begin', 'on_test_end', 'on_train_batch_begin',
'on_train_batch_end', 'on_train_begin', 'on_train_end'
]
for method_name in methods_to_count:
setattr(self, method_name,
self.wrap_with_counts(method_name, getattr(self, method_name)))
def wrap_with_counts(self, method_name, method):
def _call_and_count(*args, **kwargs):
self.method_counts[method_name] += 1
return method(*args, **kwargs)
return _call_and_count
def _get_numpy():
return np.ones((10, 10)), np.ones((10, 1))
def _get_sequence():
class MySequence(keras.utils.data_utils.Sequence):
def __getitem__(self, _):
return np.ones((2, 10)), np.ones((2, 1))
def __len__(self):
return 5
return MySequence(), None
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
class CallbackCountsTest(keras_parameterized.TestCase):
def _check_counts(self, counter, expected_counts):
"""Checks that the counts registered by `counter` are those expected."""
for method_name, expected_count in expected_counts.items():
self.assertEqual(
counter.method_counts[method_name],
expected_count,
msg='For method {}: expected {}, got: {}'.format(
method_name, expected_count, counter.method_counts[method_name]))
def _get_model(self):
layers = [
keras.layers.Dense(10, activation='relu'),
keras.layers.Dense(1, activation='sigmoid')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(
adam.AdamOptimizer(0.001),
'binary_crossentropy',
run_eagerly=testing_utils.should_run_eagerly())
return model
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_fit(self, data):
if not context.executing_eagerly():
self.skipTest('Behavior changed in v2.')
x, y = data
val_x, val_y = np.ones((4, 10)), np.ones((4, 1))
model = self._get_model()
counter = Counter()
model.fit(
x,
y,
validation_data=(val_x, val_y),
batch_size=2,
steps_per_epoch=5,
epochs=5,
callbacks=[counter])
self._check_counts(
counter, {
'on_batch_begin': 25,
'on_batch_end': 25,
'on_epoch_begin': 5,
'on_epoch_end': 5,
'on_predict_batch_begin': 0,
'on_predict_batch_end': 0,
'on_predict_begin': 0,
'on_predict_end': 0,
'on_test_batch_begin': 10,
'on_test_batch_end': 10,
'on_test_begin': 5,
'on_test_end': 5,
'on_train_batch_begin': 25,
'on_train_batch_end': 25,
'on_train_begin': 1,
'on_train_end': 1
})
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_evaluate(self, data):
x, y = data
is_sequence = isinstance(x, keras.utils.data_utils.Sequence)
model = self._get_model()
counter = Counter()
model.evaluate(
x,
y,
batch_size=2 if not is_sequence else None,
steps=5 if is_sequence else None,
callbacks=[counter])
self._check_counts(
counter, {
'on_test_batch_begin': 5,
'on_test_batch_end': 5,
'on_test_begin': 1,
'on_test_end': 1
})
@parameterized.named_parameters(('with_numpy', _get_numpy()),
('with_sequence', _get_sequence()))
def test_callback_hooks_are_called_in_predict(self, data):
x = data[0]
is_sequence = isinstance(x, keras.utils.data_utils.Sequence)
model = self._get_model()
counter = Counter()
model.predict(
x,
batch_size=2 if not is_sequence else None,
steps=5 if is_sequence else None,
callbacks=[counter])
self._check_counts(
counter, {
'on_predict_batch_begin': 5,
'on_predict_batch_end': 5,
'on_predict_begin': 1,
'on_predict_end': 1
})
def test_callback_list_methods(self):
counter = Counter()
callback_list = keras.callbacks.CallbackList([counter])
batch = 0
callback_list.on_test_batch_begin(batch)
callback_list.on_test_batch_end(batch)
callback_list.on_predict_batch_begin(batch)
callback_list.on_predict_batch_end(batch)
self._check_counts(
counter, {
'on_test_batch_begin': 1,
'on_test_batch_end': 1,
'on_predict_batch_begin': 1,
'on_predict_batch_end': 1
})
class KerasCallbacksTest(keras_parameterized.TestCase):
def _get_model(self, input_shape=None):
layers = [
keras.layers.Dense(3, activation='relu'),
keras.layers.Dense(2, activation='softmax')
]
model = testing_utils.get_model_from_layers(layers, input_shape=input_shape)
model.compile(
loss='mse',
optimizer='rmsprop',
metrics=[keras.metrics.CategoricalAccuracy(name='my_acc')],
run_eagerly=testing_utils.should_run_eagerly())
return model
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_progbar_logging(self):
model = self._get_model(input_shape=(3,))
x = array_ops.ones((200, 3))
y = array_ops.zeros((200, 2))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*- loss:.*- my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(dataset, epochs=2, steps_per_epoch=10)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types(exclude_models='functional')
@keras_parameterized.run_all_keras_modes
def test_progbar_logging_deferred_model_build(self):
model = self._get_model()
self.assertFalse(model.built)
x = array_ops.ones((200, 3))
y = array_ops.zeros((200, 2))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*- loss:.*- my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(dataset, epochs=2, steps_per_epoch=10)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_progbar_logging_validation_data(self):
model = self._get_model(input_shape=(3,))
x = array_ops.ones((50, 3))
y = array_ops.zeros((50, 2))
training_dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
val_dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(10)
expected_log = r'(.*5/5.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*)+'
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(training_dataset, epochs=2, validation_data=val_dataset)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_progbar_logging_validation_split(self):
model = self._get_model(input_shape=(3,))
x = np.ones((100, 3))
y = np.zeros((100, 2))
expected_log = (
r'(?s).*1/2.*8/8.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:'
r'.*2/2.*8/8.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*')
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(x, y, batch_size=10, epochs=2, validation_split=0.2)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_progbar_logging_training_validation(self):
model = self._get_model(input_shape=(2,))
def generator():
for _ in range(100):
yield [1, 1], 1
training = dataset_ops.Dataset \
.from_generator(
generator=generator,
output_types=('float64', 'float64'),
output_shapes=([2], [])) \
.batch(2) \
.repeat()
validation = dataset_ops.Dataset \
.from_generator(
generator=generator,
output_types=('float64', 'float64'),
output_shapes=([2], [])) \
.batch(2)
expected_log = (
r'(?s).*1/2.*20/20.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:'
r'.*2/2.*20/20.*- loss:.*- my_acc:.*- val_loss:.*- val_my_acc:.*')
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(
x=training, validation_data=validation, epochs=2, steps_per_epoch=20)
self.assertRegexpMatches(printed.contents(), expected_log)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_progbar_logging_with_dataset_and_partial_batch(self):
model = self._get_model(input_shape=(2,))
def generator():
# Have a partial batch at the end.
for _ in range(9):
yield np.random.random(2), 1
training = dataset_ops.Dataset \
.from_generator(
generator=generator,
output_types=('float64', 'float64'),
output_shapes=([2], [])) \
.batch(2)
validation = dataset_ops.Dataset \
.from_generator(
generator=generator,
output_types=('float64', 'float64'),
output_shapes=([2], [])) \
.batch(2)
with self.captureWritesToStream(sys.stdout) as printed:
model.fit(x=training, validation_data=validation)
# Make sure the value of val_ metrics are not zeros.
log_content = printed.contents()
val_loss = re.findall(r'val_loss: (\d\.\d+)', log_content)
self.assertLen(val_loss, 1)
self.assertGreater(float(val_loss[0]), 0.0)
@keras_parameterized.run_with_all_model_types
def test_ModelCheckpoint(self):
if h5py is None:
return # Skip test if models cannot be saved.
layers = [
keras.layers.Dense(NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'),
keras.layers.Dense(NUM_CLASSES, activation='softmax')
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10,))
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'checkpoint.h5')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
# case 1
monitor = 'val_loss'
save_best_only = False
mode = 'auto'
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 2
mode = 'min'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 3
mode = 'max'
monitor = 'val_acc'
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# case 4
save_best_only = True
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
os.remove(filepath)
# Case: metric not available.
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor='unknown',
save_best_only=True)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# File won't be written.
assert not os.path.exists(filepath)
# case 5
save_best_only = False
period = 2
mode = 'auto'
filepath = os.path.join(temp_dir, 'checkpoint.{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
period=period)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=4,
verbose=1)
assert os.path.exists(filepath.format(epoch=2))
assert os.path.exists(filepath.format(epoch=4))
os.remove(filepath.format(epoch=2))
os.remove(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=3))
# Invalid use: this will raise a warning but not an Exception.
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode='unknown')
# Case 6: `ModelCheckpoint` with a combination of `save_freq` and `period`.
# Though `period` is deprecated, we're testing it for
# backward-compatibility.
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath, monitor=monitor, mode=mode, save_freq='epoch', period=5)
]
assert not os.path.exists(filepath.format(epoch=0))
assert not os.path.exists(filepath.format(epoch=5))
model.fit(
x_train,
y_train,
batch_size=2,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=10,
verbose=1)
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=2))
assert not os.path.exists(filepath.format(epoch=3))
assert not os.path.exists(filepath.format(epoch=4))
assert os.path.exists(filepath.format(epoch=5))
assert not os.path.exists(filepath.format(epoch=6))
assert os.path.exists(filepath.format(epoch=10))
os.remove(filepath.format(epoch=5))
os.remove(filepath.format(epoch=10))
# Case 7: `ModelCheckpoint` with an integer `save_freq`
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
cbks = [
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq=15,
period=100) # The period should be ignored (this test tests this).
]
assert not os.path.exists(filepath.format(epoch=3))
model.fit(
x_train,
y_train,
batch_size=2,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=10,
verbose=1)
assert not os.path.exists(filepath.format(epoch=1))
assert not os.path.exists(filepath.format(epoch=2))
assert os.path.exists(filepath.format(epoch=3))
assert not os.path.exists(filepath.format(epoch=4))
assert not os.path.exists(filepath.format(epoch=5))
assert os.path.exists(filepath.format(epoch=6))
assert not os.path.exists(filepath.format(epoch=7))
assert not os.path.exists(filepath.format(epoch=8))
assert os.path.exists(filepath.format(epoch=9))
os.remove(filepath.format(epoch=3))
os.remove(filepath.format(epoch=6))
os.remove(filepath.format(epoch=9))
# Case 8: `ModelCheckpoint` with valid and invalid save_freq argument.
with self.assertRaisesRegexp(ValueError, 'Unrecognized save_freq'):
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq='invalid_save_freq')
# The following should not raise ValueError.
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq='epoch')
keras.callbacks.ModelCheckpoint(
filepath,
monitor=monitor,
save_best_only=save_best_only,
mode=mode,
save_freq=3)
def _get_dummy_resource_for_model_checkpoint_testing(self):
def get_input_datasets():
# Simple training input.
train_input = [[1.]] * 16
train_label = [[0.]] * 16
ds = dataset_ops.Dataset.from_tensor_slices((train_input, train_label))
return ds.batch(8, drop_remainder=True)
# Very simple bias model to eliminate randomness.
optimizer = gradient_descent.SGD(0.1)
model = sequential.Sequential()
model.add(testing_utils.Bias(input_shape=(1,)))
model.compile(loss='mae', optimizer=optimizer, metrics=['mae'])
train_ds = get_input_datasets()
temp_dir = self.get_temp_dir()
filepath = os.path.join(temp_dir, 'checkpoint.epoch{epoch:02d}.h5')
# The filepath shouldn't exist at the beginning.
self.assertFalse(os.path.exists(filepath))
callback = keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=True)
return model, train_ds, callback, filepath
def _run_load_weights_on_restart_test_common_iterations(self):
(model, train_ds, callback,
filepath) = self._get_dummy_resource_for_model_checkpoint_testing()
initial_epochs = 3
model.fit(train_ds, epochs=initial_epochs, callbacks=[callback])
# The files should exist after fitting with callback.
for epoch in range(initial_epochs):
self.assertTrue(os.path.exists(filepath.format(epoch=epoch + 1)))
self.assertFalse(os.path.exists(filepath.format(epoch=initial_epochs + 1)))
self.assertEqual(
callback._get_most_recently_modified_file_matching_pattern(filepath),
filepath.format(epoch=initial_epochs))
model.fit(train_ds, epochs=1)
weights_after_one_more_epoch = model.get_weights()
# The filepath should continue to exist after fitting without callback.
for epoch in range(initial_epochs):
self.assertTrue(os.path.exists(filepath.format(epoch=epoch + 1)))
return model, train_ds, filepath, weights_after_one_more_epoch
@staticmethod
def get_ModelCheckpoint_load_weights_on_restart_true_test(save_weights_only):
def func(self):
(model, train_ds, filepath, weights_after_one_more_epoch
) = self._run_load_weights_on_restart_test_common_iterations()
# Sleep for some short time period ensuring the files are created with
# a different time (in MacOS OSS the granularity is only 1 second).
time.sleep(2)
callback = keras.callbacks.ModelCheckpoint(
filepath=filepath,
save_weights_only=save_weights_only,
load_weights_on_restart=True)
model.fit(train_ds, epochs=1, callbacks=[callback])
weights_after_model_restoring_and_one_more_epoch = model.get_weights()
self.assertEqual(
callback._get_most_recently_modified_file_matching_pattern(filepath),
filepath.format(epoch=1))
model.fit(
train_ds,
epochs=1,
callbacks=[
keras.callbacks.ModelCheckpoint(
filepath=filepath,
save_weights_only=save_weights_only,
load_weights_on_restart=True)
])
weights_with_one_final_extra_epoch = model.get_weights()
# Asserting the weights one epoch after initial fitting and another epoch
# after that are closed, if a ModelCheckpoint with
# load_weights_on_restart=True is given (so the model is restored at the
# beginning of training).
self.assertAllClose(weights_after_one_more_epoch,
weights_after_model_restoring_and_one_more_epoch)
self.assertNotAllClose(weights_after_one_more_epoch,
weights_with_one_final_extra_epoch)
return func
@staticmethod
def get_ModelCheckpoint_load_weights_on_restart_false_test(save_weights_only):
def func(self):
(model, train_ds, filepath, weights_after_one_more_epoch
) = self._run_load_weights_on_restart_test_common_iterations()
model.fit(
train_ds,
epochs=1,
callbacks=[
keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=save_weights_only)
])
weights_after_model_restoring_and_one_more_epoch = model.get_weights()
# Asserting the weights one epoch after initial fitting and another epoch
# after that are different, if a ModelCheckpoint with
# load_weights_on_restart=False is given (so the model is not restored at
# the beginning of training).
self.assertNotAllClose(weights_after_one_more_epoch,
weights_after_model_restoring_and_one_more_epoch)
return func
test_model_checkpoint_load_weights_on_restart_true_save_weights_only_true = \
get_ModelCheckpoint_load_weights_on_restart_true_test.__func__(True)
test_model_checkpoint_load_weights_on_restart_true_save_weights_only_false = \
get_ModelCheckpoint_load_weights_on_restart_true_test.__func__(False)
test_model_checkpoint_load_weights_on_restart_false_save_weights_only_true = \
get_ModelCheckpoint_load_weights_on_restart_false_test.__func__(True)
test_model_checkpoint_load_weights_on_restart_false_save_weights_only_false \
= get_ModelCheckpoint_load_weights_on_restart_false_test.__func__(False)
def test_ModelCheckpoint_override_if_file_exist(self):
(model, train_ds, filepath,
_) = self._run_load_weights_on_restart_test_common_iterations()
# Sleep for some short time period to ensure the files are created with
# a different time (in MacOS OSS the granularity is only 1 second).
time.sleep(2)
callback = keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=True)
model.load_weights(
callback._get_most_recently_modified_file_matching_pattern(filepath))
weights_before_additional_fit = model.get_weights()
model.fit(train_ds, epochs=1, callbacks=[callback])
model.load_weights(
callback._get_most_recently_modified_file_matching_pattern(filepath))
weights_after_additional_fit = model.get_weights()
self.assertNotAllClose(weights_before_additional_fit,
weights_after_additional_fit)
def test_fit_with_ModelCheckpoint_with_tf_config(self):
(model, train_ds, callback,
_) = self._get_dummy_resource_for_model_checkpoint_testing()
os.environ['TF_CONFIG'] = json.dumps({
'cluster': {
'worker': ['localhost:23333']
},
'task': {
'type': 'worker',
'index': 0
}
})
# `model.fit()` should work regardless of the presence of `TF_CONFIG`.
model.fit(train_ds, epochs=1, callbacks=[callback])
def test_fit_with_ModelCheckpoint_with_dir_as_h5_filepath(self):
(model, train_ds, callback,
filepath) = self._get_dummy_resource_for_model_checkpoint_testing()
temp_dir = self.get_temp_dir()
filepath = os.path.join(temp_dir, 'temp.h5')
self.assertFalse(os.path.exists(filepath))
os.mkdir(filepath)
self.assertTrue(os.path.exists(filepath))
callback = keras.callbacks.ModelCheckpoint(filepath=filepath)
with self.assertRaisesRegexp(IOError, 'Please specify a non-directory '
'filepath for ModelCheckpoint.'):
model.fit(train_ds, epochs=1, callbacks=[callback])
def test_ModelCheckpoint_with_bad_path_placeholders(self):
(model, train_ds, callback,
filepath) = self._get_dummy_resource_for_model_checkpoint_testing()
temp_dir = self.get_temp_dir()
filepath = os.path.join(temp_dir, 'chkpt_{epoch:02d}_{mape:.2f}.h5')
callback = keras.callbacks.ModelCheckpoint(filepath=filepath)
with self.assertRaisesRegexp(KeyError, 'Failed to format this callback '
'filepath.*'):
model.fit(train_ds, epochs=1, callbacks=[callback])
def test_EarlyStopping(self):
with self.cached_session():
np.random.seed(123)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
cases = [
('max', 'val_acc'),
('min', 'val_loss'),
('auto', 'val_acc'),
('auto', 'loss'),
('unknown', 'unknown')
]
for mode, monitor in cases:
patience = 0
cbks = [
keras.callbacks.EarlyStopping(
patience=patience, monitor=monitor, mode=mode)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
def test_EarlyStopping_reuse(self):
with self.cached_session():
np.random.seed(1337)
patience = 3
data = np.random.random((100, 1))
labels = np.where(data > 0.5, 1, 0)
model = keras.models.Sequential((keras.layers.Dense(
1, input_dim=1, activation='relu'), keras.layers.Dense(
1, activation='sigmoid'),))
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
weights = model.get_weights()
stopper = keras.callbacks.EarlyStopping(monitor='acc', patience=patience)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
# This should allow training to go for at least `patience` epochs
model.set_weights(weights)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_with_baseline(self):
with self.cached_session():
np.random.seed(1337)
baseline = 0.6
(data, labels), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=50,
input_shape=(1,),
num_classes=NUM_CLASSES)
model = testing_utils.get_small_sequential_mlp(
num_hidden=1, num_classes=1, input_dim=1)
model.compile(
optimizer='sgd', loss='binary_crossentropy', metrics=['acc'])
stopper = keras.callbacks.EarlyStopping(monitor='acc',
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) == 1
patience = 3
stopper = keras.callbacks.EarlyStopping(monitor='acc',
patience=patience,
baseline=baseline)
hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
assert len(hist.epoch) >= patience
def test_EarlyStopping_final_weights_when_restoring_model_weights(self):
class DummyModel(object):
def __init__(self):
self.stop_training = False
self.weights = -1
def get_weights(self):
return self.weights
def set_weights(self, weights):
self.weights = weights
def set_weight_to_epoch(self, epoch):
self.weights = epoch
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss',
patience=2,
restore_best_weights=True)
early_stop.model = DummyModel()
losses = [0.2, 0.15, 0.1, 0.11, 0.12]
# The best configuration is in the epoch 2 (loss = 0.1000).
epochs_trained = 0
early_stop.on_train_begin()
for epoch in range(len(losses)):
epochs_trained += 1
early_stop.model.set_weight_to_epoch(epoch=epoch)
early_stop.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
if early_stop.model.stop_training:
break
# The best configuration is in epoch 2 (loss = 0.1000),
# and while patience = 2, we're restoring the best weights,
# so we end up at the epoch with the best weights, i.e. epoch 2
self.assertEqual(early_stop.model.get_weights(), 2)
def test_RemoteMonitor(self):
if requests is None:
return
monitor = keras.callbacks.RemoteMonitor()
# This will raise a warning since the default address in unreachable:
monitor.on_epoch_end(0, logs={'loss': 0.})
def test_LearningRateScheduler(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [keras.callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.2) < keras.backend.epsilon()
cbks = [keras.callbacks.LearningRateScheduler(lambda x, lr: lr / 2)]
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
assert (
float(keras.backend.get_value(
model.optimizer.lr)) - 0.01 / 4) < keras.backend.epsilon()
cbks = [
keras.callbacks.LearningRateScheduler(
lambda epoch, _: learning_rate_schedule.CosineDecay(0.01, 2)
(epoch))
]
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
cosine_decay_np = 0.5 * (1 + np.cos(np.pi * (1 / 2)))
decayed_learning_rate = 0.01 * cosine_decay_np
assert (float(keras.backend.get_value(model.optimizer.lr)) -
decayed_learning_rate) < keras.backend.epsilon()
def test_ReduceLROnPlateau(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def make_model():
random_seed.set_random_seed(1234)
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.SGD(lr=0.1))
return model
# TODO(psv): Make sure the callback works correctly when min_delta is
# set as 0. Test fails when the order of this callback and assertion is
# interchanged.
model = make_model()
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=0,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.1, atol=1e-4)
model = make_model()
# This should reduce the LR after the first epoch (due to high epsilon).
cbks = [
keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=0.1,
min_delta=10,
patience=1,
cooldown=5)
]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=2)
self.assertAllClose(
float(keras.backend.get_value(model.optimizer.lr)), 0.01, atol=1e-4)
def test_ReduceLROnPlateau_patience(self):
class DummyOptimizer(object):
def __init__(self):
self.lr = keras.backend.variable(1.0)
class DummyModel(object):
def __init__(self):
self.optimizer = DummyOptimizer()
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(
monitor='val_loss', patience=2)
reduce_on_plateau.model = DummyModel()
losses = [0.0860, 0.1096, 0.1040]
lrs = []
for epoch in range(len(losses)):
reduce_on_plateau.on_epoch_end(epoch, logs={'val_loss': losses[epoch]})
lrs.append(keras.backend.get_value(reduce_on_plateau.model.optimizer.lr))
# The learning rates should be 1.0 except the last one
for lr in lrs[:-1]:
self.assertEqual(lr, 1.0)
self.assertLess(lrs[-1], 1.0)
def test_ReduceLROnPlateau_backwards_compatibility(self):
with test.mock.patch.object(logging, 'warning') as mock_log:
reduce_on_plateau = keras.callbacks.ReduceLROnPlateau(epsilon=1e-13)
self.assertRegexpMatches(
str(mock_log.call_args), '`epsilon` argument is deprecated')
self.assertFalse(hasattr(reduce_on_plateau, 'epsilon'))
self.assertTrue(hasattr(reduce_on_plateau, 'min_delta'))
self.assertEqual(reduce_on_plateau.min_delta, 1e-13)
def test_CSVLogger(self):
with self.cached_session():
np.random.seed(1337)
temp_dir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)
filepath = os.path.join(temp_dir, 'log.tsv')
sep = '\t'
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
def make_model():
np.random.seed(1337)
model = testing_utils.get_small_sequential_mlp(
num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
model.compile(
loss='categorical_crossentropy',
optimizer=gradient_descent.SGD(lr=0.1),
metrics=['accuracy'])
return model
# case 1, create new file with defined separator
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
assert os.path.exists(filepath)
with open(filepath) as csvfile:
dialect = csv.Sniffer().sniff(csvfile.read())
assert dialect.delimiter == sep
del model
del cbks
# case 2, append data to existing file, skip header
model = make_model()
cbks = [keras.callbacks.CSVLogger(filepath, separator=sep, append=True)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1,
verbose=0)
# case 3, reuse of CSVLogger object
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=2,
verbose=0)
with open(filepath) as csvfile:
list_lines = csvfile.readlines()
for line in list_lines:
assert line.count(sep) == 4
assert len(list_lines) == 5
output = ' '.join(list_lines)
assert len(re.findall('epoch', output)) == 1
os.remove(filepath)
def test_stop_training_csv(self):
# Test that using the CSVLogger callback with the TerminateOnNaN callback
# does not result in invalid CSVs.
np.random.seed(1337)
tmpdir = self.get_temp_dir()
self.addCleanup(shutil.rmtree, tmpdir, ignore_errors=True)
with self.cached_session():
fp = os.path.join(tmpdir, 'test.csv')
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN(), keras.callbacks.CSVLogger(fp)]
model = keras.models.Sequential()
for _ in range(5):
model.add(keras.layers.Dense(2, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='linear'))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
def data_generator():
i = 0
max_batch_index = len(x_train) // BATCH_SIZE
tot = 0
while 1:
if tot > 3 * len(x_train):
yield (np.ones([BATCH_SIZE, INPUT_DIM]) * np.nan,
np.ones([BATCH_SIZE, NUM_CLASSES]) * np.nan)
else:
yield (x_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE],
y_train[i * BATCH_SIZE: (i + 1) * BATCH_SIZE])
i += 1
tot += 1
i %= max_batch_index
history = model.fit_generator(data_generator(),
len(x_train) // BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
assert len(loss) > 1
assert loss[-1] == np.inf or np.isnan(loss[-1])
values = []
with open(fp) as f:
for x in csv.reader(f):
# In windows, due to \r\n line ends we may end up reading empty lines
# after each line. Skip empty lines.
if x:
values.append(x)
assert 'nan' in values[-1], 'The last epoch was not logged.'
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_TerminateOnNaN(self):
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN()]
model = keras.models.Sequential()
initializer = keras.initializers.Constant(value=1e5)
for _ in range(5):
model.add(
keras.layers.Dense(
2,
input_dim=INPUT_DIM,
activation='relu',
kernel_initializer=initializer))
model.add(keras.layers.Dense(NUM_CLASSES))
model.compile(loss='mean_squared_error', optimizer='rmsprop')
history = model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=20)
loss = history.history['loss']
self.assertEqual(len(loss), 1)
self.assertTrue(np.isnan(loss[0]))
@unittest.skipIf(
os.name == 'nt',
'use_multiprocessing=True does not work on windows properly.')
def test_LambdaCallback(self):
with self.cached_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = np_utils.to_categorical(y_test)
y_train = np_utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# Start an arbitrary process that should run during model
# training and be terminated after training has completed.
e = threading.Event()
def target():
e.wait()
t = threading.Thread(target=target)
t.start()
cleanup_callback = keras.callbacks.LambdaCallback(
on_train_end=lambda logs: e.set())
cbks = [cleanup_callback]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
t.join()
assert not t.is_alive()
def test_RemoteMonitor_np_array(self):
if requests is None:
self.skipTest('`requests` required to run this test')
with test.mock.patch.object(requests, 'post') as requests_post:
monitor = keras.callbacks.RemoteMonitor(send_as_json=True)
a = np.arange(1) # a 1 by 1 array
logs = {'loss': 0., 'val': a}
monitor.on_epoch_end(0, logs=logs)
send = {'loss': 0., 'epoch': 0, 'val': 0}
requests_post.assert_called_once_with(
monitor.root + monitor.path, json=send, headers=monitor.headers)
def test_RemoteMonitor_np_float32(self):
if requests is None:
self.skipTest('`requests` required to run this test')
with test.mock.patch.object(requests, 'post') as requests_post:
monitor = keras.callbacks.RemoteMonitor(send_as_json=True)
a = np.float32(1.0) # a float32 generic type
logs = {'loss': 0., 'val': a}
monitor.on_epoch_end(0, logs=logs)
send = {'loss': 0., 'epoch': 0, 'val': 1.0}
requests_post.assert_called_once_with(
monitor.root + monitor.path, json=send, headers=monitor.headers)
def test_RemoteMonitorWithJsonPayload(self):
if requests is None:
self.skipTest('`requests` required to run this test')
with self.cached_session():
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.np_utils.to_categorical(y_test)
y_train = keras.utils.np_utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
cbks = [keras.callbacks.RemoteMonitor(send_as_json=True)]
with test.mock.patch.object(requests, 'post'):
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=1)
def test_progbar_infers_steps(self):
x, y = np.ones((10, 1)), np.ones((10, 1))
data = dataset_ops.DatasetV2.from_tensor_slices((x, y)).batch(2)
data = data.filter(lambda x, y: True) # Unknown cardinality.
progbar = keras.callbacks.ProgbarLogger('steps')
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
self.assertIsNone(progbar.target)
model.fit(data, epochs=2, callbacks=[progbar])
self.assertEqual(progbar.target, 5)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_callback_passed_floats(self):
class MyCallback(keras.callbacks.Callback):
def on_batch_end(self, batch, logs=None):
assert isinstance(batch, int)
assert isinstance(logs['loss'], float)
self.on_batch_end_called = True
def on_epoch_end(self, batch, logs=None):
assert isinstance(batch, int)
assert isinstance(logs['loss'], float)
self.on_epoch_end_called = True
x, y = np.ones((10, 1)), np.ones((10, 1))
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
callback = MyCallback()
model.fit(x, y, epochs=2, callbacks=[callback])
self.assertTrue(callback.on_batch_end_called)
self.assertTrue(callback.on_batch_end_called)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_implements_batch_hooks(self):
class MyCallbackWithBatchHooks(keras.callbacks.Callback):
def __init__(self):
self.train_batches = 0
self.test_batches = 0
self.predict_batches = 0
def on_train_batch_end(self, batch, logs=None):
self.train_batches += 1
def on_test_batch_end(self, batch, logs=None):
self.test_batches += 1
def on_predict_batch_end(self, batch, logs=None):
self.predict_batches += 1
class MyCallbackWithoutBatchHooks(keras.callbacks.Callback):
def __init__(self):
self.epochs = 0
def on_epoch_end(self, epoch, logs=None):
self.epochs += 1
x, y = np.ones((10, 1)), np.ones((10, 1))
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
my_cb = MyCallbackWithBatchHooks()
cb_list = keras.callbacks.CallbackList([my_cb], verbose=0)
self.assertTrue(cb_list._should_call_train_batch_hooks)
self.assertTrue(cb_list._should_call_test_batch_hooks)
self.assertTrue(cb_list._should_call_predict_batch_hooks)
model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0)
model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0)
model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0)
self.assertEqual(my_cb.train_batches, 2)
self.assertEqual(my_cb.test_batches, 1)
self.assertEqual(my_cb.predict_batches, 1)
my_cb = MyCallbackWithoutBatchHooks()
cb_list = keras.callbacks.CallbackList([my_cb], verbose=0)
self.assertLen(cb_list.callbacks, 1)
self.assertFalse(cb_list._should_call_train_batch_hooks)
self.assertFalse(cb_list._should_call_test_batch_hooks)
self.assertFalse(cb_list._should_call_predict_batch_hooks)
model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0)
model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0)
model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_implements_batch_hooks_override(self):
class MyCallback(keras.callbacks.Callback):
def __init__(self, should_run=True):
self.should_run = should_run
self.train_batches = 0
self.test_batches = 0
self.predict_batches = 0
def on_train_batch_end(self, batch, logs=None):
self.train_batches += 1
def on_test_batch_end(self, batch, logs=None):
self.test_batches += 1
def on_predict_batch_end(self, batch, logs=None):
self.predict_batches += 1
def _implements_train_batch_hooks(self):
return self.should_run
def _implements_test_batch_hooks(self):
return self.should_run
def _implements_predict_batch_hooks(self):
return self.should_run
x, y = np.ones((10, 1)), np.ones((10, 1))
model = keras.Sequential([keras.layers.Dense(1)])
model.compile('sgd', 'mse')
my_cb = MyCallback(should_run=True)
cb_list = keras.callbacks.CallbackList([my_cb], verbose=0)
self.assertTrue(cb_list._should_call_train_batch_hooks)
self.assertTrue(cb_list._should_call_test_batch_hooks)
self.assertTrue(cb_list._should_call_predict_batch_hooks)
model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0)
model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0)
model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0)
self.assertEqual(my_cb.train_batches, 2)
self.assertEqual(my_cb.test_batches, 1)
self.assertEqual(my_cb.predict_batches, 1)
my_cb = MyCallback(should_run=False)
cb_list = keras.callbacks.CallbackList([my_cb], verbose=0)
self.assertFalse(cb_list._should_call_train_batch_hooks)
self.assertFalse(cb_list._should_call_test_batch_hooks)
self.assertFalse(cb_list._should_call_predict_batch_hooks)
model.fit(x, y, epochs=2, batch_size=10, callbacks=[my_cb], verbose=0)
model.evaluate(x, y, batch_size=10, callbacks=[my_cb], verbose=0)
model.predict(x, batch_size=10, callbacks=[my_cb], verbose=0)
self.assertEqual(my_cb.train_batches, 0)
self.assertEqual(my_cb.test_batches, 0)
self.assertEqual(my_cb.predict_batches, 0)
# A summary that was emitted during a test. Fields:
# logdir: str. The logdir of the FileWriter to which the summary was
# written.
# tag: str. The name of the summary.
_ObservedSummary = collections.namedtuple('_ObservedSummary', ('logdir', 'tag'))
class _SummaryFile(object):
"""A record of summary tags and the files to which they were written.
Fields `scalars`, `images`, `histograms`, and `tensors` are sets
containing `_ObservedSummary` values.
"""
def __init__(self):
self.scalars = set()
self.images = set()
self.histograms = set()
self.tensors = set()
def list_summaries(logdir):
"""Read all summaries under the logdir into a `_SummaryFile`.
Args:
logdir: A path to a directory that contains zero or more event
files, either as direct children or in transitive subdirectories.
Summaries in these events must only contain old-style scalars,
images, and histograms. Non-summary events, like `graph_def`s, are
ignored.
Returns:
A `_SummaryFile` object reflecting all summaries written to any
event files in the logdir or any of its descendant directories.
Raises:
ValueError: If an event file contains an summary of unexpected kind.
"""
result = _SummaryFile()
for (dirpath, dirnames, filenames) in os.walk(logdir):
del dirnames # unused
for filename in filenames:
if not filename.startswith('events.out.'):
continue
path = os.path.join(dirpath, filename)
for event in summary_iterator.summary_iterator(path):
if not event.summary: # (e.g., it's a `graph_def` event)
continue
for value in event.summary.value:
tag = value.tag
# Case on the `value` rather than the summary metadata because
# the Keras callback uses `summary_ops_v2` to emit old-style
# summaries. See b/124535134.
kind = value.WhichOneof('value')
container = {
'simple_value': result.scalars,
'image': result.images,
'histo': result.histograms,
'tensor': result.tensors,
}.get(kind)
if container is None:
raise ValueError(
'Unexpected summary kind %r in event file %s:\n%r'
% (kind, path, event))
elif kind == 'tensor' and tag != 'keras':
# Check for V2 scalar summaries, which have a different PB
# structure.
if event.summary.value[
0].metadata.plugin_data.plugin_name == 'scalars':
container = result.scalars
container.add(_ObservedSummary(logdir=dirpath, tag=tag))
return result
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TestTensorBoardV2(keras_parameterized.TestCase):
def setUp(self):
super(TestTensorBoardV2, self).setUp()
self.logdir = os.path.join(self.get_temp_dir(), 'tb')
self.train_dir = os.path.join(self.logdir, 'train')
self.validation_dir = os.path.join(self.logdir, 'validation')
def _get_model(self):
layers = [
keras.layers.Conv2D(8, (3, 3)),
keras.layers.Flatten(),
keras.layers.Dense(1)
]
model = testing_utils.get_model_from_layers(layers, input_shape=(10, 10, 1))
opt = gradient_descent.SGD(learning_rate=0.001)
model.compile(
opt,
'mse',
run_eagerly=testing_utils.should_run_eagerly())
return model
def test_TensorBoard_default_logdir(self):
"""Regression test for cross-platform pathsep in default logdir."""
os.chdir(self.get_temp_dir())
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard() # no logdir specified
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(logdir='.')
train_dir = os.path.join('.', 'logs', 'train')
validation_dir = os.path.join('.', 'logs', 'validation')
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_basic(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_across_invocations(self):
"""Regression test for summary writer resource use-after-free.
See: <https://github.com/tensorflow/tensorflow/issues/25707>
"""
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
for _ in (1, 2):
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars, {
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
})
def test_TensorBoard_no_spurious_event_files(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir)
model.fit(
x,
y,
batch_size=2,
epochs=2,
callbacks=[tb_cbk])
events_file_run_basenames = set()
for (dirpath, dirnames, filenames) in os.walk(self.logdir):
del dirnames # unused
if any(fn.startswith('events.out.') for fn in filenames):
events_file_run_basenames.add(os.path.basename(dirpath))
self.assertEqual(events_file_run_basenames, {'train'})
def test_TensorBoard_batch_metrics(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir, update_freq=1)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='batch_loss'),
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
def test_TensorBoard_weight_histograms(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir, histogram_freq=1)
model_type = testing_utils.get_model_type()
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.histograms, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0'),
},
)
def test_TensorBoard_weight_images(self):
model = self._get_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, write_images=True)
model_type = testing_utils.get_model_type()
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.histograms, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0'),
},
)
self.assertEqual(
self._strip_layer_names(summary_file.images, model_type),
{
_ObservedSummary(logdir=self.train_dir, tag='bias_0/image/0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/0'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/1'),
_ObservedSummary(logdir=self.train_dir, tag='kernel_0/image/2'),
},
)
def test_custom_summary(self):
if not context.executing_eagerly():
self.skipTest('Custom summaries only supported in V2 code path.')
def scalar_v2_mock(name, data, step=None):
"""A reimplementation of the scalar plugin to avoid circular deps."""
metadata = summary_pb2.SummaryMetadata()
# Should match value in tensorboard/plugins/scalar/metadata.py.
metadata.plugin_data.plugin_name = 'scalars'
with summary_ops_v2.summary_scope(
name, 'scalar_summary', values=[data, step]) as (tag, _):
return summary_ops_v2.write(
tag=tag,
tensor=math_ops.cast(data, 'float32'),
step=step,
metadata=metadata)
class LayerWithSummary(keras.layers.Layer):
def call(self, x):
scalar_v2_mock('custom_summary', math_ops.reduce_sum(x))
return x
model = testing_utils.get_model_from_layers([LayerWithSummary()],
input_shape=(5,),
name='model')
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
tb_cbk = keras.callbacks.TensorBoard(self.logdir, update_freq=1)
x, y = np.ones((10, 5)), np.ones((10, 5))
model.fit(x, y, batch_size=2, validation_data=(x, y), callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.scalars,
{
_ObservedSummary(logdir=self.train_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.validation_dir, tag='epoch_loss'),
_ObservedSummary(logdir=self.train_dir, tag='batch_loss'),
_ObservedSummary(
logdir=self.train_dir,
tag='model/layer_with_summary/custom_summary'),
_ObservedSummary(
logdir=self.validation_dir,
tag='model/layer_with_summary/custom_summary')
},
)
def _strip_layer_names(self, summaries, model_type):
"""Deduplicate summary names modulo layer prefix.
This removes the first slash-component of each tag name: for
instance, "foo/bar/baz" becomes "bar/baz".
Args:
summaries: A `set` of `_ObservedSummary` values.
model_type: The model type currently being tested.
Returns:
A new `set` of `_ObservedSummary` values with layer prefixes
removed.
"""
result = set()
for summary in summaries:
if '/' not in summary.tag:
raise ValueError('tag has no layer name: %r' % summary.tag)
start_from = 2 if 'subclass' in model_type else 1
new_tag = '/'.join(summary.tag.split('/')[start_from:])
result.add(summary._replace(tag=new_tag))
return result
def test_TensorBoard_invalid_argument(self):
with self.assertRaisesRegexp(ValueError, 'Unrecognized arguments'):
keras.callbacks.TensorBoard(wwrite_images=True)
# Note that this test specifies model_type explicitly.
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class TestTensorBoardV2NonParameterizedTest(keras_parameterized.TestCase):
def setUp(self):
super(TestTensorBoardV2NonParameterizedTest, self).setUp()
self.logdir = os.path.join(self.get_temp_dir(), 'tb')
self.train_dir = os.path.join(self.logdir, 'train')
self.validation_dir = os.path.join(self.logdir, 'validation')
def _get_seq_model(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3), input_shape=(10, 10, 1)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
opt = gradient_descent.SGD(learning_rate=0.001)
model.compile(
opt,
'mse',
run_eagerly=testing_utils.should_run_eagerly())
return model
def _get_trace_file(self, logdir):
profile_dir = os.path.join(logdir, 'plugins', 'profile')
for (dirpath, dirnames, filenames) in os.walk(profile_dir):
del dirnames # unused
for filename in filenames:
if filename.endswith('.trace.json.gz'):
return os.path.join(dirpath, filename)
return None
def fitModelAndAssertKerasModelWritten(self, model):
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(self.logdir,
write_graph=True,
profile_batch=0)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag='keras'),
},
)
def test_TensorBoard_writeSequentialModel_noInputShape(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
def test_TensorBoard_writeSequentialModel_withInputShape(self):
model = keras.models.Sequential([
keras.layers.Conv2D(8, (3, 3), input_shape=(10, 10, 1)),
keras.layers.Flatten(),
keras.layers.Dense(1),
])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
def test_TensoriBoard_writeModel(self):
inputs = keras.layers.Input([10, 10, 1])
x = keras.layers.Conv2D(8, (3, 3), activation='relu')(inputs)
x = keras.layers.Flatten()(x)
x = keras.layers.Dense(1)(x)
model = keras.models.Model(inputs=inputs, outputs=[x])
model.compile('sgd', 'mse', run_eagerly=False)
self.fitModelAndAssertKerasModelWritten(model)
def test_TensorBoard_autoTrace(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=1, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag=u'batch_1'),
},
)
self.assertIsNotNone(self._get_trace_file(logdir=self.train_dir))
def test_TensorBoard_autoTrace_tagNameWithBatchNum(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=2, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
_ObservedSummary(logdir=self.train_dir, tag=u'batch_2'),
},
)
self.assertIsNotNone(self._get_trace_file(logdir=self.train_dir))
def test_TensorBoard_autoTrace_profileBatchRangeSingle(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch='2,2', write_graph=False)
model.fit(
x,
y,
batch_size=3,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
# Trace will be logged once at the batch it stops profiling.
_ObservedSummary(logdir=self.train_dir, tag=u'batch_2'),
},
)
self.assertIsNotNone(self._get_trace_file(logdir=self.train_dir))
def test_TensorBoard_autoTrace_profileBatchRange(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch='1,3', write_graph=False)
model.fit(
x,
y,
batch_size=4,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
self.assertEqual(
summary_file.tensors,
{
# Trace will be logged once at the batch it stops profiling.
_ObservedSummary(logdir=self.train_dir, tag=u'batch_3'),
},
)
self.assertIsNotNone(self._get_trace_file(logdir=self.train_dir))
def test_TensorBoard_autoTrace_profileInvalidBatchRange(self):
with self.assertRaises(ValueError):
keras.callbacks.TensorBoard(
self.logdir,
histogram_freq=1,
profile_batch='-1,3',
write_graph=False)
with self.assertRaises(ValueError):
keras.callbacks.TensorBoard(
self.logdir,
histogram_freq=1,
profile_batch='1,None',
write_graph=False)
with self.assertRaises(ValueError):
keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch='6,5', write_graph=False)
with self.assertRaises(ValueError):
keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=-1, write_graph=False)
def test_TensorBoard_autoTrace_profile_batch_largerThanBatchCount(self):
model = self._get_seq_model()
x, y = np.ones((10, 10, 10, 1)), np.ones((10, 1))
tb_cbk = keras.callbacks.TensorBoard(
self.logdir, histogram_freq=1, profile_batch=10000, write_graph=False)
model.fit(
x,
y,
batch_size=2,
epochs=2,
validation_data=(x, y),
callbacks=[tb_cbk])
summary_file = list_summaries(self.logdir)
# Enabled trace only on the 10000th batch, thus it should be empty.
self.assertEmpty(summary_file.tensors)
self.assertIsNone(self._get_trace_file(logdir=self.train_dir))
class MostRecentlyModifiedFileMatchingPatternTest(test.TestCase):
def test_get_most_recently_modified_file_matching_pattern(self):
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
file_paths = [
os.path.join(test_dir, file_name) for file_name in
['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.batch01epoch01.h5']
]
for file_path in file_paths:
with open(file_path, 'w') as f:
# Ensure there are some intervals between file creation.
time.sleep(2)
f.write('foo bar')
# Ensure the files have been actually written.
self.assertEqual(
set([
os.path.join(test_dir, file_name)
for file_name in os.listdir(test_dir)
]), set(file_paths))
self.assertEqual(
keras.callbacks.ModelCheckpoint(None)
._get_most_recently_modified_file_matching_pattern(path_pattern),
file_paths[-1])
def test_some_file_not_matching_pattern(self):
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
file_paths = [
os.path.join(test_dir, file_name) for file_name in
['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.baatch01epoch01.h5']
]
for file_path in file_paths:
with open(file_path, 'w') as f:
# Ensure there are some intervals between file creation.
time.sleep(2)
f.write('foo bar')
self.assertEqual(
keras.callbacks.ModelCheckpoint(None)
._get_most_recently_modified_file_matching_pattern(path_pattern),
file_paths[-2])
def test_get_same_file_if_file_name_equals_pattern(self):
file_name = 'f.batch02.h5'
test_dir = self.get_temp_dir()
file_path = os.path.join(test_dir, file_name)
with open(file_path, 'w') as f:
f.write('foo bar')
self.assertEqual(os.path.join(test_dir, os.listdir(test_dir)[0]), file_path)
self.assertEqual(
keras.callbacks.ModelCheckpoint(
None)._get_most_recently_modified_file_matching_pattern(file_path),
file_path)
def test_get_none_if_file_does_not_exist(self):
file_name = 'f.batch02.h5'
test_dir = self.get_temp_dir()
file_path = os.path.join(test_dir, file_name)
self.assertLen(os.listdir(test_dir), 0)
self.assertEqual(
keras.callbacks.ModelCheckpoint(
None)._get_most_recently_modified_file_matching_pattern(file_path),
None)
def test_using_checkpoint_management_latest_checkpoint(self):
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}'
ckpt_file_name = 'f.batchXepochY'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
ckpt_file_path = os.path.join(test_dir, ckpt_file_name)
with open(ckpt_file_path, 'w') as f:
f.write('dummy ckpt')
checkpoint_management.update_checkpoint_state_internal(
test_dir, ckpt_file_path)
file_paths = [
os.path.join(test_dir, file_name)
for file_name in ['f.batch03epoch02', 'f.batch02epoch02']
]
for file_path in file_paths:
with open(file_path, 'w') as f:
f.write('foo bar')
# The result returned from checkpoint_management.latest_checkpoint takes
# priority, so even if it was written earlier, we should still return that.
self.assertEqual(
keras.callbacks.ModelCheckpoint(None)
._get_most_recently_modified_file_matching_pattern(path_pattern),
ckpt_file_path)
if __name__ == '__main__':
test.main()
| apache-2.0 |
nwiizo/workspace_2017 | keras_ex/example/cifar10_cnn.py | 2 | 3817 | '''Train a simple deep CNN on the CIFAR10 small images dataset.
GPU run command with Theano backend (with TensorFlow, the GPU is automatically used):
THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python cifar10_cnn.py
It gets down to 0.65 test logloss in 25 epochs, and down to 0.55 after 50 epochs.
(it's still underfitting at that point, though).
'''
from __future__ import print_function
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
batch_size = 32
nb_classes = 10
nb_epoch = 200
data_augmentation = True
# input image dimensions
img_rows, img_cols = 32, 32
# The CIFAR10 images are RGB.
img_channels = 3
# The data, shuffled and split between train and test sets:
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# Convert class vectors to binary class matrices.
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Convolution2D(32, 3, 3, border_mode='same',
input_shape=X_train.shape[1:]))
model.add(Activation('relu'))
model.add(Convolution2D(32, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Convolution2D(64, 3, 3, border_mode='same'))
model.add(Activation('relu'))
model.add(Convolution2D(64, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
# Let's train the model using RMSprop
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
if not data_augmentation:
print('Not using data augmentation.')
model.fit(X_train, Y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
validation_data=(X_test, Y_test),
shuffle=True)
else:
print('Using real-time data augmentation.')
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
# Compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(X_train)
# Fit the model on the batches generated by datagen.flow().
model.fit_generator(datagen.flow(X_train, Y_train,
batch_size=batch_size),
samples_per_epoch=X_train.shape[0],
nb_epoch=nb_epoch,
validation_data=(X_test, Y_test))
| mit |
sangwook236/general-development-and-testing | sw_dev/python/rnd/test/sequence_processing/tsa_lstm_1.py | 2 | 17351 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# REF [site] >>
# https://www.altumintelligence.com/articles/a/Time-Series-Prediction-Using-LSTM-Deep-Neural-Networks
# https://machinelearningmastery.com/time-series-forecasting-long-short-term-memory-network-python/
# LSTMs are terrible at time series forecasting.
# https://machinelearningmastery.com/multivariate-time-series-forecasting-lstms-keras/
# https://machinelearningmastery.com/multi-step-time-series-forecasting-long-short-term-memory-networks-python/
# https://machinelearningmastery.com/time-series-prediction-lstm-recurrent-neural-networks-python-keras/
import math
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler, LabelEncoder
from sklearn.metrics import mean_squared_error
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM
import matplotlib.pyplot as plt
# Persistence model:
# The simplest model that we could use to make predictions would be to persist the last observation.
# It provides a baseline of performance for the problem that we can use for comparison with an autoregression model.
# REF [site] >> https://machinelearningmastery.com/time-series-forecasting-long-short-term-memory-network-python/
def persistence_model():
# Load dataset.
def parser(x):
return pd.datetime.strptime('190'+x, '%Y-%m')
series = pd.read_csv('./shampoo.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser)
# Split data into train and test.
X = series.values
train, test = X[0:-12], X[-12:]
# Walk-forward validation.
history = [x for x in train]
predictions = list()
for i in range(len(test)):
# Make prediction.
predictions.append(history[-1])
# Observation.
history.append(test[i])
# Report performance.
rmse = math.sqrt(mean_squared_error(test, predictions))
print('RMSE: %.3f' % rmse)
# Line plot of observed vs predicted.
plt.plot(test)
plt.plot(predictions)
plt.show()
# Frame a sequence as a supervised learning problem.
def timeseries_to_supervised(data, lag=1):
df = pd.DataFrame(data)
columns = [df.shift(i) for i in range(lag, 0, -1)]
columns.append(df)
df = pd.concat(columns, axis=1)
df.fillna(0, inplace=True)
return df
# Create a differenced series.
def difference(dataset, interval=1):
diff = list()
for i in range(interval, len(dataset)):
diff.append(dataset[i] - dataset[i - interval])
return np.array(diff)
# Invert differenced value.
def inverse_difference(differenced, dataset):
inverted = list()
for (diff, dat) in zip(differenced, dataset):
inverted.append(diff + dat)
return np.array(inverted)
# Fit an LSTM network to training data.
def fit_lstm(train, batch_size, nb_epoch, neurons):
X, y = train[:, 0:-1], train[:, -1]
X = X.reshape(X.shape[0], 1, X.shape[1])
model = Sequential()
model.add(LSTM(neurons, batch_input_shape=(batch_size, X.shape[1], X.shape[2]), stateful=True))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
for i in range(nb_epoch):
model.fit(X, y, epochs=1, batch_size=batch_size, verbose=0, shuffle=False)
model.reset_states()
return model
# REF [site] >> https://machinelearningmastery.com/time-series-forecasting-long-short-term-memory-network-python/
def data_transformation():
# Load dataset.
def parser(x):
return pd.datetime.strptime('190'+x, '%Y-%m')
series = pd.read_csv('./shampoo.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser)
# Transform to be stationary.
interval = 1
differenced = difference(series, interval)
print(differenced)
# Invert transform.
inverted = inverse_difference(differenced, series)
print(inverted)
# Transform scale.
X = series.values
X = X.reshape(len(X), 1)
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(X)
scaled_X = scaler.transform(X)
scaled_series = pd.Series(scaled_X[:, 0])
print(scaled_series.head())
# Invert transform.
inverted_X = scaler.inverse_transform(scaled_X)
inverted_series = pd.Series(inverted_X[:, 0])
print(inverted_series.head())
# Transform to supervised learning.
time_lag = 3
X = series.values
supervised = timeseries_to_supervised(X, time_lag)
print(supervised.head())
# REF [site] >> https://machinelearningmastery.com/time-series-forecasting-long-short-term-memory-network-python/
def univariate_time_series_with_lstm():
# Load dataset.
def parser(x):
return pd.datetime.strptime('190'+x, '%Y-%m')
series = pd.read_csv('./shampoo.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser)
# Transform data to be stationary.
interval = 1
raw_values = series.values
diff_values = difference(raw_values, interval)
# Transform data to be supervised learning.
time_lag = 1
supervised = timeseries_to_supervised(diff_values, time_lag)
supervised_values = supervised.values
# Split data into train and test sets.
train, test = supervised_values[0:-12], supervised_values[-12:]
y_test_true = raw_values[-12:]
# Transform the scale of the data.
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(train)
train_scaled = scaler.transform(train)
test_scaled = scaler.transform(test)
# Repeat experiment.
num_experiments = 30
error_scores = list()
for r in range(num_experiments):
# Fit the model.
lstm_model = fit_lstm(train_scaled, 1, 3000, 4)
# Forecast the entire training dataset to build up state for forecasting.
train_reshaped = train_scaled[:, 0].reshape(len(train_scaled), 1, 1)
lstm_model.predict(train_reshaped, batch_size=1)
# Walk-forward validation on the test data.
# Make one-step forecast.
X, y = test_scaled[:, 0:-1], test_scaled[:, -1]
X3 = X.reshape(X.shape[0], 1, X.shape[1])
yhat = lstm_model.predict(X3, batch_size=1)
# Invert scaling.
yhat = scaler.inverse_transform(np.hstack((X, yhat)))
yhat = yhat[:,-1]
# Invert differencing.
yhat = inverse_difference(yhat, raw_values[-12-interval:])
# Report performance.
rmse = math.sqrt(mean_squared_error(y_test_true, yhat))
print('%d) Test RMSE: %.3f' % (r+1, rmse))
error_scores.append(rmse)
# Line plot of observed vs predicted.
#plt.plot(y_test_true)
#plt.plot(yhat)
#plt.show()
# Summarize results.
results = pd.DataFrame()
results['rmse'] = error_scores
print(results.describe())
results.boxplot()
plt.show()
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
n_vars = 1 if type(data) is list else data.shape[1]
df = pd.DataFrame(data)
cols, names = list(), list()
# Input sequence (t-n, ..., t-1).
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)]
# Forecast sequence (t, t+1, ..., t+n).
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j+1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)]
# Put it all together.
agg = pd.concat(cols, axis=1)
agg.columns = names
# Drop rows with NaN values.
if dropnan:
agg.dropna(inplace=True)
return agg
# REF [site] >> https://machinelearningmastery.com/multivariate-time-series-forecasting-lstms-keras/
def multivariate_time_series_with_lstm():
"""
# Load data.
def parse(x):
return pd.datetime.strptime(x, '%Y %m %d %H')
dataset = pd.read_csv('pollution_raw.csv', parse_dates = [['year', 'month', 'day', 'hour']], index_col=0, date_parser=parse)
dataset.drop('No', axis=1, inplace=True)
# Manually specify column names.
dataset.columns = ['pollution', 'dew', 'temp', 'press', 'wnd_dir', 'wnd_spd', 'snow', 'rain']
dataset.index.name = 'date'
# Mark all NA values with 0.
dataset['pollution'].fillna(0, inplace=True)
# Drop the first 24 hours.
dataset = dataset[24:]
# Summarize first 5 rows.
print(dataset.head(5))
# Save to file.
dataset.to_csv('./pollution.csv')
"""
# Load dataset.
dataset = pd.read_csv('./pollution.csv', header=0, index_col=0)
values = dataset.values
"""
# Specify columns to plot.
groups = [0, 1, 2, 3, 5, 6, 7]
i = 1
# Plot each column.
plt.figure()
for group in groups:
plt.subplot(len(groups), 1, i)
plt.plot(values[:, group])
plt.title(dataset.columns[group], y=0.5, loc='right')
i += 1
plt.show()
"""
# Integer encode direction.
encoder = LabelEncoder()
values[:,4] = encoder.fit_transform(values[:,4])
# Ensure all data is float.
values = values.astype('float32')
# Normalize features.
scaler = MinMaxScaler(feature_range=(0, 1))
scaled = scaler.fit_transform(values)
# Specify the number of lag hours.
n_hours = 3
n_features = 8
# Frame as supervised learning.
reframed = series_to_supervised(scaled, n_hours, 1)
# Drop columns we don't want to predict.
reframed.drop(reframed.columns[[9, 10, 11, 12, 13, 14, 15]], axis=1, inplace=True)
#print(reframed.head())
# Split into train and test sets.
values = reframed.values
n_train_hours = 365 * 24
train = values[:n_train_hours, :]
test = values[n_train_hours:, :]
# Split into input and outputs.
n_obs = n_hours * n_features
train_X, train_y = train[:, :n_obs], train[:, -n_features]
test_X, test_y = test[:, :n_obs], test[:, -n_features]
# Reshape input to be 3D [samples, timesteps, features].
train_X = train_X.reshape((train_X.shape[0], n_hours, n_features))
test_X = test_X.reshape((test_X.shape[0], n_hours, n_features))
#print(train_X.shape, train_y.shape, test_X.shape, test_y.shape)
# Design network.
model = Sequential()
model.add(LSTM(50, input_shape=(train_X.shape[1], train_X.shape[2])))
model.add(Dense(1))
model.compile(loss='mae', optimizer='adam')
# Fit network.
history = model.fit(train_X, train_y, epochs=50, batch_size=72, validation_data=(test_X, test_y), verbose=2, shuffle=False)
# Plot history.
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='test')
plt.legend()
plt.show()
# Make a prediction.
yhat = model.predict(test_X)
test_X = test_X.reshape((test_X.shape[0], n_hours * n_features))
# Invert scaling for forecast.
inv_yhat = np.concatenate((yhat, test_X[:, -(n_features-1):]), axis=1)
inv_yhat = scaler.inverse_transform(inv_yhat)
inv_yhat = inv_yhat[:,0]
# Invert scaling for actual.
test_y = test_y.reshape((len(test_y), 1))
inv_y = np.concatenate((test_y, test_X[:, -(n_features-1):]), axis=1)
inv_y = scaler.inverse_transform(inv_y)
inv_y = inv_y[:,0]
# Calculate RMSE.
rmse = math.sqrt(mean_squared_error(inv_y, inv_yhat))
print('Test RMSE: %.3f' % rmse)
# Transform series into train and test sets for supervised learning.
def prepare_data(series, n_test, n_lag, n_seq):
# Extract raw values.
raw_values = series.values
raw_values = raw_values.reshape(len(raw_values), 1)
# Transform into supervised learning problem X, y.
supervised = series_to_supervised(raw_values, n_lag, n_seq)
supervised_values = supervised.values
# Split into train and test sets.
train, test = supervised_values[0:-n_test], supervised_values[-n_test:]
return train, test
# Make a persistence forecast.
def persistence(last_ob, n_seq):
return [last_ob for i in range(n_seq)]
# Evaluate the persistence model.
def make_forecasts_for_persistence_model(train, test, n_lag, n_seq):
forecasts = list()
for i in range(len(test)):
X, y = test[i, 0:n_lag], test[i, n_lag:]
# Make forecast.
forecast = persistence(X[-1], n_seq)
# Store the forecast.
forecasts.append(forecast)
return forecasts
# Evaluate the RMSE for each forecast time step.
def evaluate_forecasts(test, forecasts, n_lag, n_seq):
for i in range(n_seq):
actual = test[:,(n_lag+i)]
predicted = [forecast[i] for forecast in forecasts]
rmse = math.sqrt(mean_squared_error(actual, predicted))
print('t+%d RMSE: %f' % ((i+1), rmse))
# Plot the forecasts in the context of the original dataset.
def plot_forecasts(series, forecasts, n_test):
# Plot the entire dataset in blue.
plt.plot(series.values)
# Plot the forecasts in red.
for i in range(len(forecasts)):
off_s = len(series) - 12 + i - 1
off_e = off_s + len(forecasts[i]) + 1
xaxis = [x for x in range(off_s, off_e)]
yaxis = [series.values[off_s]] + forecasts[i]
plt.plot(xaxis, yaxis, color='red')
# Show the plot.
plt.show()
# Transform series into train and test sets for supervised learning.
def prepare_data_with_scaler(series, n_test, n_lag, n_seq):
# Extract raw values.
raw_values = series.values
# Transform data to be stationary.
diff_series = difference(raw_values, 1)
diff_values = diff_series
diff_values = diff_values.reshape(len(diff_values), 1)
# Rescale values to -1, 1.
scaler = MinMaxScaler(feature_range=(-1, 1))
scaled_values = scaler.fit_transform(diff_values)
scaled_values = scaled_values.reshape(len(scaled_values), 1)
# Transform into supervised learning problem X, y.
supervised = series_to_supervised(scaled_values, n_lag, n_seq)
supervised_values = supervised.values
# Split into train and test sets.
train, test = supervised_values[0:-n_test], supervised_values[-n_test:]
return scaler, train, test
# Fit an LSTM network to training data.
def fit_lstm_for_multi_step_lstm(train, n_lag, n_seq, n_batch, nb_epoch, n_neurons):
# Reshape training into [samples, timesteps, features].
X, y = train[:, 0:n_lag], train[:, n_lag:]
X = X.reshape(X.shape[0], 1, X.shape[1])
# Design network.
model = Sequential()
model.add(LSTM(n_neurons, batch_input_shape=(n_batch, X.shape[1], X.shape[2]), stateful=True))
model.add(Dense(y.shape[1]))
model.compile(loss='mean_squared_error', optimizer='adam')
# Fit network.
for i in range(nb_epoch):
model.fit(X, y, epochs=1, batch_size=n_batch, verbose=0, shuffle=False)
model.reset_states()
return model
# Make one forecast with an LSTM.
def forecast_lstm(model, X, n_batch):
# Reshape input pattern to [samples, timesteps, features].
X = X.reshape(1, 1, len(X))
# Make forecast.
forecast = model.predict(X, batch_size=n_batch)
# Convert to array.
return [x for x in forecast[0, :]]
# Evaluate the LSTM model.
def make_forecasts(model, n_batch, train, test, n_lag, n_seq):
forecasts = list()
for i in range(len(test)):
X, y = test[i, 0:n_lag], test[i, n_lag:]
# Make forecast.
forecast = forecast_lstm(model, X, n_batch)
# Store the forecast.
forecasts.append(forecast)
return forecasts
# Invert differenced forecast.
def inverse_difference_for_multi_step_lstm(last_ob, forecast):
# Invert first forecast.
inverted = list()
inverted.append(forecast[0] + last_ob)
# Propagate difference forecast using inverted first value.
for i in range(1, len(forecast)):
inverted.append(forecast[i] + inverted[i-1])
return inverted
# Inverse data transform on forecasts.
def inverse_transform(series, forecasts, scaler, n_test):
inverted = list()
for i in range(len(forecasts)):
# Create array from forecast.
forecast = np.array(forecasts[i])
forecast = forecast.reshape(1, len(forecast))
# Invert scaling.
inv_scale = scaler.inverse_transform(forecast)
inv_scale = inv_scale[0, :]
# Invert differencing.
index = len(series) - n_test + i - 1
last_ob = series.values[index]
inv_diff = inverse_difference_for_multi_step_lstm(last_ob, inv_scale)
# Store.
inverted.append(inv_diff)
return inverted
def evaluate_forecasts_for_multi_step_lstm(test, forecasts, n_lag, n_seq):
for i in range(n_seq):
actual = [row[i] for row in test]
predicted = [forecast[i] for forecast in forecasts]
rmse = math.sqrt(mean_squared_error(actual, predicted))
print('t+%d RMSE: %f' % ((i+1), rmse))
def multi_step_time_series_with_lstm():
# Load dataset.
def parser(x):
return pd.datetime.strptime('190'+x, '%Y-%m')
series = pd.read_csv('./shampoo.csv', header=0, parse_dates=[0], index_col=0, squeeze=True, date_parser=parser)
"""
# Summarize first few rows.
print(series.head())
# Line plot.
series.plot()
plt.show()
"""
#--------------------
# Persistence model.
n_lag, n_seq, n_test = 1, 3, 10
# Prepare data.
train, test = prepare_data(series, n_test, n_lag, n_seq)
print(test)
print('Train: %s, Test: %s' % (train.shape, test.shape))
# Make forecasts.
forecasts = make_forecasts_for_persistence_model(train, test, n_lag, n_seq)
# Evaluate forecasts.
evaluate_forecasts(test, forecasts, n_lag, n_seq)
# Plot forecasts.
plot_forecasts(series, forecasts, n_test + 2)
#--------------------
n_lag, n_seq, n_test, n_epochs, n_batch, n_neurons = 4, 3, 10, 1500, 1, 1
# Prepare data.
scaler, train, test = prepare_data_with_scaler(series, n_test, n_lag, n_seq)
# Fit model.
model = fit_lstm_for_multi_step_lstm(train, n_lag, n_seq, n_batch, n_epochs, n_neurons)
# Make forecasts.
forecasts = make_forecasts(model, n_batch, train, test, n_lag, n_seq)
# Inverse transform forecasts and test.
forecasts = inverse_transform(series, forecasts, scaler, n_test + 2)
actual = [row[n_lag:] for row in test]
actual = inverse_transform(series, actual, scaler, n_test + 2)
# Evaluate forecasts.
evaluate_forecasts_for_multi_step_lstm(actual, forecasts, n_lag, n_seq)
# Plot forecasts.
plot_forecasts(series, forecasts, n_test + 2)
def main():
#data_transformation()
#persistence_model()
#univariate_time_series_with_lstm()
#multivariate_time_series_with_lstm()
multi_step_time_series_with_lstm()
#--------------------------------------------------------------------
if '__main__' == __name__:
main()
| gpl-2.0 |
rodvieirasilva/clustering-sentcolletion | simplelinkage.py | 1 | 1717 | """
-- Sent Collection v.1 para análise de agrupamento --
-- Grupo 1 --
--Marciele de Menezes Bittencourt --
--Rodrigo Vieira da Silva --
--Washington Rodrigo Dias da Silva --
-----------------------------------------------------
"""
from scipy.cluster.hierarchy import linkage, fcluster, dendrogram, cut_tree
import matplotlib.pyplot as plt
import json
from scipy.spatial.distance import pdist
from util import mkdir
class SimpleLinkage:
simpleLinkage = None
labels_ = None
distance = None
k = None
title = None
name = None
alg = None
def __init__(self, distance, k, alg):
self.k = k
self.distance = distance
self.alg = alg
def fit(self, data):
self.simpleLinkage = linkage(self.distance, self.alg)
self.labels_ = fcluster(self.simpleLinkage, self.k, criterion='maxclust')
def dendrogram(self):
Z = self.simpleLinkage
if Z is None:
Z = linkage(self.distance, self.alg)
plt.figure(figsize=(25, 10))
dn = dendrogram(Z, orientation="top", truncate_mode='lastp', p=20, leaf_font_size=20)
plt.savefig('{0}/{1}-dendrogram-20.png'.format(self.name, self.title))
def main():
with open('basesjson/sklearn_bagofwords.json') as json_data:
bagofwords = json.load(json_data)
distance = pdist(bagofwords, metric='euclidean')
wardLink = SimpleLinkage(distance, 0, 'ward')
wardLink.dendrogram()
singleLink = SimpleLinkage(distance, 0, 'single')
singleLink.dendrogram()
print("Finished")
if __name__ == '__main__':
main()
| mit |
milljm/moose | modules/porous_flow/examples/multiapp_fracture_flow/3dFracture/fracture_only_aperture_changing.py | 15 | 3507 | #!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import sys
import numpy as np
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
labels = {'0': '9.2m', '1': '4.6m', '2': '2.3m', '3': '1.15m'}
colours = {'0': 'g', '1': 'k', '2': 'b', '3': 'r'}
production_starts = {} # index into data at which production starts (P_out >= 10.6)
for i in ['0', '1', '2', '3']:
data = np.genfromtxt(r'gold/fracture_only_aperture_changing_out_' + i + '.csv', delimiter=',', names=True)
for ind in range(1, len(data)):
if data['P_out'][ind] >= 10.6:
production_starts[i] = ind
break
at_2hrs = {}
plt.figure()
for i in ['0', '1', '2', '3']:
data = np.genfromtxt(r'gold/fracture_only_aperture_changing_out_' + i + '.csv', delimiter=',', names=True)
plt.plot(data['time'][1:production_starts[i]+1] / 3600.0, data['TK_out'][1:production_starts[i]+1] - 273, colours[i] + ":")
plt.plot(data['time'][production_starts[i]] / 3600.0, data['TK_out'][production_starts[i]] - 273, colours[i] + "o")
plt.plot(data['time'][production_starts[i]:] / 3600.0, data['TK_out'][production_starts[i]:] - 273, colours[i], label=labels[i])
for ind in range(len(data)):
if data['time'][ind] / 3600.0 > 2.0:
t0 = data['time'][ind - 1] / 3600.0
t1 = data['time'][ind] / 3600.0
T0 = data['TK_out'][ind - 1] - 273
T1 = data['TK_out'][ind] - 273
at_2hrs[i] = (T0 * (t1 - 2.0) + T1 * (t0 - 2.0)) / (t1 - t0)
break
plt.grid()
plt.legend()
plt.title("Production-point temperature: no heat transfer, various mesh sizes")
plt.xlim([0, 4])
plt.xlabel("time (hours)")
plt.ylabel("T (degC)")
plt.savefig("fracture_only_aperture_changing_T_out.png")
plt.show()
plt.close()
plt.figure()
xvals = np.array([9.2, 4.6, 2.3, 1.15]).reshape((-1, 1))
yvals = np.array([at_2hrs['0'], at_2hrs['1'], at_2hrs['2'], at_2hrs['3']])
plt.plot(xvals, yvals, 'o', label="Simulation")
ord = 0.15
reg = LinearRegression().fit(np.power(xvals, ord), yvals)
x2 = np.arange(1, np.power(10, ord), 0.01).reshape((-1, 1))
print(reg.intercept_, reg.coef_)
plt.plot(np.power(x2, 1.0 / ord), reg.predict(x2), label="Fit: error = size^0.15")
plt.grid()
plt.legend()
plt.xlabel("Element size (m)")
plt.ylabel("T (degC)")
plt.title("Production temperature after 2 hours")
plt.savefig("fracture_only_aperture_changing_T_2hrs.png")
plt.show()
plt.close()
plt.figure()
for i in ['0', '1', '2', '3']:
data = np.genfromtxt(r'gold/fracture_only_aperture_changing_out_' + i + '.csv', delimiter=',', names=True)
plt.plot(data['time'][1:production_starts[i]+1] / 3600.0, data['P_out'][1:production_starts[i]+1], colours[i] + ":")
plt.plot(data['time'][production_starts[i]] / 3600.0, data['P_out'][production_starts[i]], colours[i] + "o")
plt.plot(data['time'][production_starts[i]:] / 3600.0, data['P_out'][production_starts[i]:], colours[i], label=labels[i])
plt.grid()
plt.legend()
plt.title("Production-point porepressure: no heat transfer, various mesh sizes")
plt.xlim([0, 4])
plt.xlabel("time (hours)")
plt.ylabel("P (MPa)")
plt.savefig("fracture_only_aperture_changing_P_out.png")
plt.show()
plt.close()
sys.exit(0)
| lgpl-2.1 |
sangwook236/general-development-and-testing | sw_dev/python/rnd/test/machine_learning/sklearn/sklearn_feature_selection.py | 2 | 3531 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import time
import numpy as np
import sklearn.feature_selection, sklearn.datasets, sklearn.svm, sklearn.ensemble, sklearn.linear_model
import matplotlib.pyplot as plt
# REF [site] >> https://scikit-learn.org/stable/modules/feature_selection.html
def basic_example():
X, y = sklearn.datasets.load_iris(return_X_y=True)
print('Input shape (before) = {}.'.format(X.shape))
clf = sklearn.svm.LinearSVC(C=0.01, penalty='l1', dual=False)
clf = clf.fit(X, y)
# The estimator should have a feature_importances_ or coef_ attribute after fitting. Otherwise, the importance_getter parameter should be used.
model = sklearn.feature_selection.SelectFromModel(clf, prefit=True)
X_new = model.transform(X)
print('Input shape (after) = {}.'.format(X_new.shape))
#--------------------
clf = sklearn.ensemble.ExtraTreesClassifier(n_estimators=50)
clf = clf.fit(X, y)
print('Feature importance = {}.'.format(clf.feature_importances_))
model = sklearn.feature_selection.SelectFromModel(clf, prefit=True)
X_new = model.transform(X)
print('Input shape (after) = {}.'.format(X_new.shape))
# REF [site] >> https://scikit-learn.org/stable/auto_examples/feature_selection/plot_select_from_model_diabetes.html
def sequential_feature_selection_example():
diabetes = sklearn.datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
#print(diabetes.DESCR)
# Feature importance from coefficients.
ridge = sklearn.linear_model.RidgeCV(alphas=np.logspace(-6, 6, num=5))
ridge = ridge.fit(X, y)
importance = np.abs(ridge.coef_)
feature_names = np.array(diabetes.feature_names)
plt.bar(height=importance, x=feature_names)
plt.title('Feature importances via coefficients')
# Selecting features based on importance.
threshold = np.sort(importance)[-3] + 0.01
tic = time.time()
sfm = sklearn.feature_selection.SelectFromModel(ridge, threshold=threshold)
sfm = sfm.fit(X, y)
toc = time.time()
print(f'Features selected by SelectFromModel: {feature_names[sfm.get_support(indices=False)]}')
print(f'Done in {toc - tic:.3f}s')
#X_new = sfm.transform(X)
# Selecting features with sequential feature selection.
tic_fwd = time.time()
sfs_forward = sklearn.feature_selection.SequentialFeatureSelector(ridge, n_features_to_select=2, direction='forward')
#sfs_forward = sklearn.feature_selection.SequentialFeatureSelector(ridge, n_features_to_select='auto', tol=None, direction='forward', scoring=None, cv=None, n_jobs=None)
sfs_forward = sfs_forward.fit(X, y)
toc_fwd = time.time()
#X_new = sfs_forward.transform(X)
tic_bwd = time.time()
sfs_backward = sklearn.feature_selection.SequentialFeatureSelector(ridge, n_features_to_select=2, direction='backward')
#sfs_backward = sklearn.feature_selection.SequentialFeatureSelector(ridge, n_features_to_select='auto', tol=None, direction='backward', scoring=None, cv=None, n_jobs=None)
sfs_backward = sfs_backward.fit(X, y)
toc_bwd = time.time()
#X_new = sfs_backward.transform(X)
print(
'Features selected by forward sequential selection: '
f'{feature_names[sfs_forward.get_support(indices=False)]}'
)
print(f'Done in {toc_fwd - tic_fwd:.3f}s')
print(
'Features selected by backward sequential selection: '
f'{feature_names[sfs_backward.get_support(indices=False)]}'
)
print(f'Done in {toc_bwd - tic_bwd:.3f}s')
plt.show()
def main():
#basic_example()
sequential_feature_selection_example()
#--------------------------------------------------------------------
if '__main__' == __name__:
main()
| gpl-2.0 |
nwiizo/workspace_2017 | keras_ex/example/reuters_mlp.py | 2 | 1907 | '''Trains and evaluate a simple MLP
on the Reuters newswire topic classification task.
'''
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.datasets import reuters
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.utils import np_utils
from keras.preprocessing.text import Tokenizer
max_words = 1000
batch_size = 32
nb_epoch = 5
print('Loading data...')
(X_train, y_train), (X_test, y_test) = reuters.load_data(nb_words=max_words, test_split=0.2)
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')
nb_classes = np.max(y_train) + 1
print(nb_classes, 'classes')
print('Vectorizing sequence data...')
tokenizer = Tokenizer(nb_words=max_words)
X_train = tokenizer.sequences_to_matrix(X_train, mode='binary')
X_test = tokenizer.sequences_to_matrix(X_test, mode='binary')
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
print('Convert class vector to binary class matrix (for use with categorical_crossentropy)')
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
print('Y_train shape:', Y_train.shape)
print('Y_test shape:', Y_test.shape)
print('Building model...')
model = Sequential()
model.add(Dense(512, input_shape=(max_words,)))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
history = model.fit(X_train, Y_train,
nb_epoch=nb_epoch, batch_size=batch_size,
verbose=1, validation_split=0.1)
score = model.evaluate(X_test, Y_test,
batch_size=batch_size, verbose=1)
print('Test score:', score[0])
print('Test accuracy:', score[1])
| mit |
herilalaina/scikit-learn | sklearn/linear_model/randomized_l1.py | 25 | 25868 | """
Randomized Lasso/Logistic: feature selection based on Lasso and
sparse Logistic Regression
"""
# Author: Gael Varoquaux, Alexandre Gramfort
#
# License: BSD 3 clause
import warnings
import itertools
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from scipy import sparse
from scipy.interpolate import interp1d
from .base import _preprocess_data
from ..base import BaseEstimator
from ..externals import six
from ..externals.joblib import Memory, Parallel, delayed
from ..feature_selection.base import SelectorMixin
from ..utils import (as_float_array, check_random_state, check_X_y, safe_mask,
deprecated)
from ..utils.validation import check_is_fitted
from .least_angle import lars_path, LassoLarsIC
from .logistic import LogisticRegression
from ..exceptions import ConvergenceWarning
###############################################################################
# Randomized linear model: feature selection
def _resample_model(estimator_func, X, y, scaling=.5, n_resampling=200,
n_jobs=1, verbose=False, pre_dispatch='3*n_jobs',
random_state=None, sample_fraction=.75, **params):
random_state = check_random_state(random_state)
# We are generating 1 - weights, and not weights
n_samples, n_features = X.shape
if not (0 < scaling < 1):
raise ValueError(
"'scaling' should be between 0 and 1. Got %r instead." % scaling)
scaling = 1. - scaling
scores_ = 0.0
for active_set in Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)(
delayed(estimator_func)(
X, y, weights=scaling * random_state.randint(
0, 2, size=(n_features,)),
mask=(random_state.rand(n_samples) < sample_fraction),
verbose=max(0, verbose - 1),
**params)
for _ in range(n_resampling)):
scores_ += active_set
scores_ /= n_resampling
return scores_
@deprecated("The class BaseRandomizedLinearModel is deprecated in 0.19"
" and will be removed in 0.21.")
class BaseRandomizedLinearModel(six.with_metaclass(ABCMeta, BaseEstimator,
SelectorMixin)):
"""Base class to implement randomized linear models for feature selection
This implements the strategy by Meinshausen and Buhlman:
stability selection with randomized sampling, and random re-weighting of
the penalty.
"""
@abstractmethod
def __init__(self):
pass
_preprocess_data = staticmethod(_preprocess_data)
def fit(self, X, y):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples]
Target values. Will be cast to X's dtype if necessary
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, ['csr', 'csc'], y_numeric=True,
ensure_min_samples=2, estimator=self)
X = as_float_array(X, copy=False)
n_samples, n_features = X.shape
X, y, X_offset, y_offset, X_scale = \
self._preprocess_data(X, y, self.fit_intercept, self.normalize)
estimator_func, params = self._make_estimator_and_params(X, y)
memory = self.memory
if memory is None:
memory = Memory(cachedir=None, verbose=0)
elif isinstance(memory, six.string_types):
memory = Memory(cachedir=memory, verbose=0)
elif not isinstance(memory, Memory):
raise ValueError("'memory' should either be a string or"
" a sklearn.externals.joblib.Memory"
" instance, got 'memory={!r}' instead.".format(
type(memory)))
scores_ = memory.cache(
_resample_model, ignore=['verbose', 'n_jobs', 'pre_dispatch']
)(
estimator_func, X, y,
scaling=self.scaling, n_resampling=self.n_resampling,
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=self.pre_dispatch, random_state=self.random_state,
sample_fraction=self.sample_fraction, **params)
if scores_.ndim == 1:
scores_ = scores_[:, np.newaxis]
self.all_scores_ = scores_
self.scores_ = np.max(self.all_scores_, axis=1)
return self
def _make_estimator_and_params(self, X, y):
"""Return the parameters passed to the estimator"""
raise NotImplementedError
def _get_support_mask(self):
"""Get the boolean mask indicating which features are selected.
Returns
-------
support : boolean array of shape [# input features]
An element is True iff its corresponding feature is selected
for retention.
"""
check_is_fitted(self, 'scores_')
return self.scores_ > self.selection_threshold
###############################################################################
# Randomized lasso: regression settings
def _randomized_lasso(X, y, weights, mask, alpha=1., verbose=False,
precompute=False, eps=np.finfo(np.float).eps,
max_iter=500):
X = X[safe_mask(X, mask)]
y = y[mask]
# Center X and y to avoid fit the intercept
X -= X.mean(axis=0)
y -= y.mean()
alpha = np.atleast_1d(np.asarray(alpha, dtype=np.float64))
X = (1 - weights) * X
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas_, _, coef_ = lars_path(X, y,
Gram=precompute, copy_X=False,
copy_Gram=False, alpha_min=np.min(alpha),
method='lasso', verbose=verbose,
max_iter=max_iter, eps=eps)
if len(alpha) > 1:
if len(alphas_) > 1: # np.min(alpha) < alpha_min
interpolator = interp1d(alphas_[::-1], coef_[:, ::-1],
bounds_error=False, fill_value=0.)
scores = (interpolator(alpha) != 0.0)
else:
scores = np.zeros((X.shape[1], len(alpha)), dtype=np.bool)
else:
scores = coef_[:, -1] != 0.0
return scores
@deprecated("The class RandomizedLasso is deprecated in 0.19"
" and will be removed in 0.21.")
class RandomizedLasso(BaseRandomizedLinearModel):
"""Randomized Lasso.
Randomized Lasso works by subsampling the training data and
computing a Lasso estimate where the penalty of a random subset of
coefficients has been scaled. By performing this double
randomization several times, the method assigns high scores to
features that are repeatedly selected across randomizations. This
is known as stability selection. In short, features selected more
often are considered good features.
Parameters
----------
alpha : float, 'aic', or 'bic', optional
The regularization parameter alpha parameter in the Lasso.
Warning: this is not the alpha parameter in the stability selection
article which is scaling.
scaling : float, optional
The s parameter used to randomly scale the penalty of different
features.
Should be between 0 and 1.
sample_fraction : float, optional
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional
Number of randomized models.
selection_threshold : float, optional
The score above which features should be selected.
fit_intercept : boolean, optional
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default True
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learned more robust and almost independent of
the number of samples. The same property is not valid for
standardized data. However, if you wish to standardize, please
use `preprocessing.StandardScaler` before calling `fit` on an
estimator with `normalize=False`.
precompute : True | False | 'auto' | array-like
Whether to use a precomputed Gram matrix to speed up calculations.
If set to 'auto' let us decide.
The Gram matrix can also be passed as argument, but it will be used
only for the selection of parameter alpha, if alpha is 'aic' or 'bic'.
max_iter : integer, optional
Maximum number of iterations to perform in the Lars algorithm.
eps : float, optional
The machine-precision regularization in the computation of the
Cholesky diagonal factors. Increase this for very ill-conditioned
systems. Unlike the 'tol' parameter in some iterative
optimization-based algorithms, this parameter does not control
the tolerance of the optimization.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : None, str or object with the joblib.Memory interface, optional \
(default=None)
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max of \
``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLasso
>>> randomized_lasso = RandomizedLasso()
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLogisticRegression, Lasso, ElasticNet
"""
def __init__(self, alpha='aic', scaling=.5, sample_fraction=.75,
n_resampling=200, selection_threshold=.25,
fit_intercept=True, verbose=False,
normalize=True, precompute='auto',
max_iter=500,
eps=np.finfo(np.float).eps, random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=None):
self.alpha = alpha
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.max_iter = max_iter
self.verbose = verbose
self.normalize = normalize
self.precompute = precompute
self.eps = eps
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
alpha = self.alpha
if isinstance(alpha, six.string_types) and alpha in ('aic', 'bic'):
model = LassoLarsIC(precompute=self.precompute,
criterion=self.alpha,
max_iter=self.max_iter,
eps=self.eps)
model.fit(X, y)
self.alpha_ = alpha = model.alpha_
precompute = self.precompute
# A precomputed Gram array is useless, since _randomized_lasso
# change X a each iteration
if hasattr(precompute, '__array__'):
precompute = 'auto'
assert precompute in (True, False, None, 'auto')
return _randomized_lasso, dict(alpha=alpha, max_iter=self.max_iter,
eps=self.eps,
precompute=precompute)
###############################################################################
# Randomized logistic: classification settings
def _randomized_logistic(X, y, weights, mask, C=1., verbose=False,
fit_intercept=True, tol=1e-3):
X = X[safe_mask(X, mask)]
y = y[mask]
if issparse(X):
size = len(weights)
weight_dia = sparse.dia_matrix((1 - weights, 0), (size, size))
X = X * weight_dia
else:
X *= (1 - weights)
C = np.atleast_1d(np.asarray(C, dtype=np.float64))
if C.ndim > 1:
raise ValueError("C should be 1-dimensional array-like, "
"but got a {}-dimensional array-like instead: {}."
.format(C.ndim, C))
scores = np.zeros((X.shape[1], len(C)), dtype=np.bool)
for this_C, this_scores in zip(C, scores.T):
# XXX : would be great to do it with a warm_start ...
clf = LogisticRegression(C=this_C, tol=tol, penalty='l1', dual=False,
fit_intercept=fit_intercept)
clf.fit(X, y)
this_scores[:] = np.any(
np.abs(clf.coef_) > 10 * np.finfo(np.float).eps, axis=0)
return scores
@deprecated("The class RandomizedLogisticRegression is deprecated in 0.19"
" and will be removed in 0.21.")
class RandomizedLogisticRegression(BaseRandomizedLinearModel):
"""Randomized Logistic Regression
Randomized Logistic Regression works by subsampling the training
data and fitting a L1-penalized LogisticRegression model where the
penalty of a random subset of coefficients has been scaled. By
performing this double randomization several times, the method
assigns high scores to features that are repeatedly selected across
randomizations. This is known as stability selection. In short,
features selected more often are considered good features.
Parameters
----------
C : float or array-like of shape [n_reg_parameter], optional, default=1
The regularization parameter C in the LogisticRegression.
When C is an array, fit will take each regularization parameter in C
one by one for LogisticRegression and store results for each one
in ``all_scores_``, where columns and rows represent corresponding
reg_parameters and features.
scaling : float, optional, default=0.5
The s parameter used to randomly scale the penalty of different
features.
Should be between 0 and 1.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
n_resampling : int, optional, default=200
Number of randomized models.
selection_threshold : float, optional, default=0.25
The score above which features should be selected.
tol : float, optional, default=1e-3
tolerance for stopping criteria of LogisticRegression
fit_intercept : boolean, optional, default=True
whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
verbose : boolean or integer, optional
Sets the verbosity amount
normalize : boolean, optional, default True
If True, the regressors X will be normalized before regression.
This parameter is ignored when `fit_intercept` is set to False.
When the regressors are normalized, note that this makes the
hyperparameters learnt more robust and almost independent of the number
of samples. The same property is not valid for standardized data.
However, if you wish to standardize, please use
`preprocessing.StandardScaler` before calling `fit` on an estimator
with `normalize=False`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
memory : None, str or object with the joblib.Memory interface, optional \
(default=None)
Used for internal caching. By default, no caching is done.
If a string is given, it is the path to the caching directory.
Attributes
----------
scores_ : array, shape = [n_features]
Feature scores between 0 and 1.
all_scores_ : array, shape = [n_features, n_reg_parameter]
Feature scores between 0 and 1 for all values of the regularization \
parameter. The reference article suggests ``scores_`` is the max \
of ``all_scores_``.
Examples
--------
>>> from sklearn.linear_model import RandomizedLogisticRegression
>>> randomized_logistic = RandomizedLogisticRegression()
References
----------
Stability selection
Nicolai Meinshausen, Peter Buhlmann
Journal of the Royal Statistical Society: Series B
Volume 72, Issue 4, pages 417-473, September 2010
DOI: 10.1111/j.1467-9868.2010.00740.x
See also
--------
RandomizedLasso, LogisticRegression
"""
def __init__(self, C=1, scaling=.5, sample_fraction=.75,
n_resampling=200,
selection_threshold=.25, tol=1e-3,
fit_intercept=True, verbose=False,
normalize=True,
random_state=None,
n_jobs=1, pre_dispatch='3*n_jobs',
memory=None):
self.C = C
self.scaling = scaling
self.sample_fraction = sample_fraction
self.n_resampling = n_resampling
self.fit_intercept = fit_intercept
self.verbose = verbose
self.normalize = normalize
self.tol = tol
self.random_state = random_state
self.n_jobs = n_jobs
self.selection_threshold = selection_threshold
self.pre_dispatch = pre_dispatch
self.memory = memory
def _make_estimator_and_params(self, X, y):
params = dict(C=self.C, tol=self.tol,
fit_intercept=self.fit_intercept)
return _randomized_logistic, params
def _preprocess_data(self, X, y, fit_intercept, normalize=False):
"""Center the data in X but not in y"""
X, _, X_offset, _, X_scale = _preprocess_data(X, y, fit_intercept,
normalize=normalize)
return X, y, X_offset, y, X_scale
###############################################################################
# Stability paths
def _lasso_stability_path(X, y, mask, weights, eps):
"Inner loop of lasso_stability_path"
X = X * weights[np.newaxis, :]
X = X[safe_mask(X, mask), :]
y = y[mask]
alpha_max = np.max(np.abs(np.dot(X.T, y))) / X.shape[0]
alpha_min = eps * alpha_max # set for early stopping in path
with warnings.catch_warnings():
warnings.simplefilter('ignore', ConvergenceWarning)
alphas, _, coefs = lars_path(X, y, method='lasso', verbose=False,
alpha_min=alpha_min)
# Scale alpha by alpha_max
alphas /= alphas[0]
# Sort alphas in ascending order
alphas = alphas[::-1]
coefs = coefs[:, ::-1]
# Get rid of the alphas that are too small
mask = alphas >= eps
# We also want to keep the first one: it should be close to the OLS
# solution
mask[0] = True
alphas = alphas[mask]
coefs = coefs[:, mask]
return alphas, coefs
@deprecated("The function lasso_stability_path is deprecated in 0.19"
" and will be removed in 0.21.")
def lasso_stability_path(X, y, scaling=0.5, random_state=None,
n_resampling=200, n_grid=100,
sample_fraction=0.75,
eps=4 * np.finfo(np.float).eps, n_jobs=1,
verbose=False):
"""Stability path based on randomized Lasso estimates
Parameters
----------
X : array-like, shape = [n_samples, n_features]
training data.
y : array-like, shape = [n_samples]
target values.
scaling : float, optional, default=0.5
The alpha parameter in the stability selection article used to
randomly scale the features. Should be between 0 and 1.
random_state : int, RandomState instance or None, optional, default=None
The generator used to randomize the design. If int, random_state is
the seed used by the random number generator; If RandomState instance,
random_state is the random number generator; If None, the random number
generator is the RandomState instance used by `np.random`.
n_resampling : int, optional, default=200
Number of randomized models.
n_grid : int, optional, default=100
Number of grid points. The path is linearly reinterpolated
on a grid between 0 and 1 before computing the scores.
sample_fraction : float, optional, default=0.75
The fraction of samples to be used in each randomized design.
Should be between 0 and 1. If 1, all samples are used.
eps : float, optional
Smallest value of alpha / alpha_max considered
n_jobs : integer, optional
Number of CPUs to use during the resampling. If '-1', use
all the CPUs
verbose : boolean or integer, optional
Sets the verbosity amount
Returns
-------
alphas_grid : array, shape ~ [n_grid]
The grid points between 0 and 1: alpha/alpha_max
scores_path : array, shape = [n_features, n_grid]
The scores for each feature along the path.
"""
X, y = check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'])
rng = check_random_state(random_state)
if not (0 < scaling < 1):
raise ValueError("Parameter 'scaling' should be between 0 and 1."
" Got %r instead." % scaling)
n_samples, n_features = X.shape
paths = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_lasso_stability_path)(
X, y, mask=rng.rand(n_samples) < sample_fraction,
weights=1. - scaling * rng.randint(0, 2, size=(n_features,)),
eps=eps)
for k in range(n_resampling))
all_alphas = sorted(list(set(itertools.chain(*[p[0] for p in paths]))))
# Take approximately n_grid values
stride = int(max(1, int(len(all_alphas) / float(n_grid))))
all_alphas = all_alphas[::stride]
if not all_alphas[-1] == 1:
all_alphas.append(1.)
all_alphas = np.array(all_alphas)
scores_path = np.zeros((n_features, len(all_alphas)))
for alphas, coefs in paths:
if alphas[0] != 0:
alphas = np.r_[0, alphas]
coefs = np.c_[np.ones((n_features, 1)), coefs]
if alphas[-1] != all_alphas[-1]:
alphas = np.r_[alphas, all_alphas[-1]]
coefs = np.c_[coefs, np.zeros((n_features, 1))]
scores_path += (interp1d(alphas, coefs,
kind='nearest', bounds_error=False,
fill_value=0, axis=-1)(all_alphas) != 0)
scores_path /= n_resampling
return all_alphas, scores_path
| bsd-3-clause |
hackrflov/BilibiliCrawler | analyst/video.py | 1 | 4384 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
File Name: video.py
Date: 08/18/2017
Author: hackrflov
Email: [email protected]
Python Version: 2.7
"""
import sys
from datetime import datetime as dt
import numpy as np
from pymongo import UpdateOne
from sklearn import linear_model, svm
import matplotlib.pyplot as plt
from bili_util import BiliUtil
class Video(BiliUtil):
clt_name = 'video'
def sort_by_aid(self, limit=10):
self.sort_by_key('aid', int(limit))
def sort_by_coin(self):
self.sort_by_key('coin')
def sort_by_view(self):
self.sort_by_key('view')
def sort_by_danmaku(self):
self.sort_by_key('danmaku')
def sort_by_pubdate(self):
self.sort_by_key('pubdate')
def sort_by_favorate(self):
self.sort_by_key('favorate')
def sort_by_reply(self):
self.sort_by_key('reply')
def count_by_typename(self):
self.count_by_key('typename')
def count_by_toptype(self):
self.count_by_key('toptype')
def count_by_aid_part(self):
docs = self.db.video.aggregate([
{ '$project' : { 'id_part' : { '$floor' : { '$divide' : [ '$aid', 1000000 ] } } } },
{ '$group' : { '_id' : '$id_part', 'count' : { '$sum' : 1 } } },
{ '$sort' : { '_id' : 1 } }
])
self.show(docs)
"""
method: 按视频tag寻找发布该类视频的用户
"""
def find_user_with_tag(self, tag):
docs = self.db.video.aggregate([
{ '$match' : { 'tag' : tag } },
#{ '$sample' : { 'size' : 100 } },
{ '$lookup' :
{ 'from' : 'user',
'localField' : 'mid',
'foreignField' : 'mid',
'as' : 'author'
}
},
{ '$limit' : 10 }
])
self.show(docs)
def find_by_tag(self, tag):
self.find_by_field('tags', [tag])
def join_with_user(self):
docs = self.db[self.clt_name].aggregate([
{ '$sample' : { 'size' : 10 } },
{ '$lookup' :
{ 'from' : 'user',
'localField' : 'mid',
'foreignField' : 'mid',
'as' : 'author'
}
},
{ '$limit' : 10 }
])
self.show(docs)
#========== Machine Learning ==========#
"""
method: Use linear regression to predict coin
input @view: view amount
input @fav: favorite amount
input @reply: reply amount
"""
def predict_coin(self, view, fav, reply):
docs = self.db.video.find({},['view','coin','favorite','reply']).limit(10000)
doc_list = [doc for doc in docs]
data = np.array([ [ doc['view'],doc['favorite'], doc['reply'] ] for doc in doc_list])
target = np.array([doc['coin'] for doc in doc_list])
reg = linear_model.LinearRegression()
reg.fit(data, target)
print reg.coef_
print reg.predict([[int(view), int(fav), int(reply)]])
"""
method: Use linear regression to predict pub amount
"""
def predict_pub_amount(self):
docs = self.db.video.aggregate([
# { '$sample' : { 'size' : 10000 } },
{ '$match' : { 'pubdate' : { '$exists' : 1 } } },
{ '$project' : { 'date' : { '$dateToString' : { 'format' : '%Y-%m', 'date' : '$pubdate' } } } },
{ '$group' : { '_id' : '$date', 'count' : { '$sum' : 1 } } },
])
data = np.array([ doc for doc in docs])
data = sorted(data, key=lambda doc: doc['_id'].split('-'))
dx, dy = [], []
for doc in data:
(year, month) = doc['_id'].split('-')
date = dt(int(year), int(month), 1).strftime('%y-%m')
dx.append(date)
dy.append(doc['count'])
x = np.array(dx)
y = np.array(dy)
print x, y
N = len(x)
ind = np.arange(N)
width = 0.35
plt.bar(ind, y, width, align='center')
plt.xticks(ind, x, rotation=45)
plt.show()
if __name__ == '__main__':
video = Video()
if len(sys.argv) >= 2:
func_name = sys.argv[1]
if len(sys.argv) >= 3:
getattr(video, func_name)(*sys.argv[2:])
else:
getattr(video, func_name)()
else:
video.list()
| mit |
schets/scikit-learn | sklearn/preprocessing/label.py | 13 | 28598 | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Joel Nothman <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.fixes import np_version
from ..utils.fixes import sparse_min_max
from ..utils.fixes import astype
from ..utils.fixes import in1d
from ..utils import deprecated, column_or_1d
from ..utils.validation import check_array
from ..utils.validation import _num_samples
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
def _check_numpy_unicode_bug(labels):
"""Check that user is not subject to an old numpy bug
Fixed in master before 1.7.0:
https://github.com/numpy/numpy/pull/243
"""
if np_version[:3] < (1, 7, 0) and labels.dtype.kind == 'U':
raise RuntimeError("NumPy < 1.7.0 does not implement searchsorted"
" on unicode data correctly. Please upgrade"
" NumPy to use LabelEncoder with unicode inputs.")
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Attributes
----------
classes_ : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
"""
def _check_fitted(self):
if not hasattr(self, "classes_"):
raise ValueError("LabelEncoder was not fitted yet.")
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_, y = np.unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
self._check_fitted()
classes = np.unique(y)
_check_numpy_unicode_bug(classes)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
self._check_fitted()
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False)
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
y_type_ : str,
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'mutliclass-multioutput', 'multilabel-sequences',
'multilabel-indicator', and 'unknown'.
multilabel_ : boolean
True if the transformer was fitted on a multilabel rather than a
multiclass set of labels. The ``multilabel_`` attribute is deprecated
and will be removed in 0.18
sparse_input_ : boolean,
True if the input data to transform is given as a sparse matrix, False
otherwise.
indicator_matrix_ : str
'sparse' when the input data to tansform is a multilable-indicator and
is sparse, None otherwise. The ``indicator_matrix_`` attribute is
deprecated as of version 0.16 and will be removed in 0.18
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
"""
def __init__(self, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
@property
@deprecated("Attribute ``indicator_matrix_`` is deprecated and will be "
"removed in 0.17. Use ``y_type_ == 'multilabel-indicator'`` "
"instead")
def indicator_matrix_(self):
return self.y_type_ == 'multilabel-indicator'
@property
@deprecated("Attribute ``multilabel_`` is deprecated and will be removed "
"in 0.17. Use ``y_type_.startswith('multilabel')`` "
"instead")
def multilabel_(self):
return self.y_type_.startswith('multilabel')
def _check_fitted(self):
if not hasattr(self, "classes_"):
raise ValueError("LabelBinarizer was not fitted yet.")
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : numpy array of shape (n_samples,) or (n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as the
1-of-K coding scheme.
Parameters
----------
y : numpy array or sparse matrix of shape (n_samples,) or
(n_samples, n_classes) Target values. The 2-d matrix should only
contain 0 and 1, represents multilabel classification. Sparse
matrix can be CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
self._check_fitted()
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when:
- Y contains the output of decision_function (classifier)
Use 0.5 when:
- Y contains the output of predict_proba
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
self._check_fitted()
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def label_binarize(y, classes, neg_label=0, pos_label=1,
sparse_output=False, multilabel=None):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See also
--------
LabelBinarizer : class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)
else:
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
if multilabel is not None:
warnings.warn("The multilabel parameter is deprecated as of version "
"0.15 and will be removed in 0.17. The parameter is no "
"longer necessary because the value is automatically "
"inferred.", DeprecationWarning)
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if len(classes) == 1:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if (y_type == "multilabel-indicator" and classes.size != y.shape[1]):
raise ValueError("classes {0} missmatch with the labels {1}"
"found in the data".format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
elif y_type == "multilabel-sequences":
Y = MultiLabelBinarizer(classes=classes,
sparse_output=sparse_output).fit_transform(y)
if sp.issparse(Y):
Y.data[:] = pos_label
else:
Y[Y == 1] = pos_label
return Y
if not sparse_output:
Y = Y.toarray()
Y = astype(Y, int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = astype(Y.data, int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.searchsorted(sorted_class, classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = sparse_min_max(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i + 1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=np.int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=np.int)
else:
y = np.array(y > threshold, dtype=np.int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
y = np.empty(len(y), dtype=classes.dtype)
y.fill(classes[0])
return y
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
elif output_type == "multilabel-sequences":
warnings.warn('Direct support for sequence of sequences multilabel '
'representation will be unavailable from version 0.17. '
'Use sklearn.preprocessing.MultiLabelBinarizer to '
'convert to a label indicator representation.',
DeprecationWarning)
mlb = MultiLabelBinarizer(classes=classes).fit([])
return mlb.inverse_transform(y)
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(BaseEstimator, TransformerMixin):
"""Transform between iterable of iterables and a multilabel format
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape [n_classes] (optional)
Indicates an ordering for the class labels
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Attributes
----------
classes_ : array of labels
A copy of the `classes` parameter where provided,
or otherwise, the sorted set of classes found when fitting.
Examples
--------
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
"""
def __init__(self, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing `classes_`
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
else:
classes = self.classes
dtype = np.int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = np.int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
yt.indices = np.take(inverse, yt.indices)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
class_to_index = dict(zip(self.classes_, range(len(self.classes_))))
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
-------
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix
"""
indices = array.array('i')
indptr = array.array('i', [0])
for labels in y:
indices.extend(set(class_mapping[label] for label in labels))
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets
Parameters
----------
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
| bsd-3-clause |
herilalaina/scikit-learn | examples/decomposition/plot_pca_vs_fa_model_selection.py | 52 | 4523 | """
===============================================================
Model selection with Probabilistic PCA and Factor Analysis (FA)
===============================================================
Probabilistic PCA and Factor Analysis are probabilistic models.
The consequence is that the likelihood of new data can be used
for model selection and covariance estimation.
Here we compare PCA and FA with cross-validation on low rank data corrupted
with homoscedastic noise (noise variance
is the same for each feature) or heteroscedastic noise (noise variance
is the different for each feature). In a second step we compare the model
likelihood to the likelihoods obtained from shrinkage covariance estimators.
One can observe that with homoscedastic noise both FA and PCA succeed
in recovering the size of the low rank subspace. The likelihood with PCA
is higher than FA in this case. However PCA fails and overestimates
the rank when heteroscedastic noise is present. Under appropriate
circumstances the low rank models are more likely than shrinkage models.
The automatic estimation from
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
by Thomas P. Minka is also compared.
"""
# Authors: Alexandre Gramfort
# Denis A. Engemann
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
print(__doc__)
# #############################################################################
# Create the data
n_samples, n_features, rank = 1000, 50, 10
sigma = 1.
rng = np.random.RandomState(42)
U, _, _ = linalg.svd(rng.randn(n_features, n_features))
X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)
# Adding homoscedastic noise
X_homo = X + sigma * rng.randn(n_samples, n_features)
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
# #############################################################################
# Fit the models
n_components = np.arange(0, n_features, 5) # options for n_components
def compute_scores(X):
pca = PCA(svd_solver='full')
fa = FactorAnalysis()
pca_scores, fa_scores = [], []
for n in n_components:
pca.n_components = n
fa.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
fa_scores.append(np.mean(cross_val_score(fa, X)))
return pca_scores, fa_scores
def shrunk_cov_score(X):
shrinkages = np.logspace(-2, 0, 30)
cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages})
return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))
def lw_score(X):
return np.mean(cross_val_score(LedoitWolf(), X))
for X, title in [(X_homo, 'Homoscedastic Noise'),
(X_hetero, 'Heteroscedastic Noise')]:
pca_scores, fa_scores = compute_scores(X)
n_components_pca = n_components[np.argmax(pca_scores)]
n_components_fa = n_components[np.argmax(fa_scores)]
pca = PCA(svd_solver='full', n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("best n_components by PCA CV = %d" % n_components_pca)
print("best n_components by FactorAnalysis CV = %d" % n_components_fa)
print("best n_components by PCA MLE = %d" % n_components_pca_mle)
plt.figure()
plt.plot(n_components, pca_scores, 'b', label='PCA scores')
plt.plot(n_components, fa_scores, 'r', label='FA scores')
plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-')
plt.axvline(n_components_pca, color='b',
label='PCA CV: %d' % n_components_pca, linestyle='--')
plt.axvline(n_components_fa, color='r',
label='FactorAnalysis CV: %d' % n_components_fa,
linestyle='--')
plt.axvline(n_components_pca_mle, color='k',
label='PCA MLE: %d' % n_components_pca_mle, linestyle='--')
# compare with other covariance estimators
plt.axhline(shrunk_cov_score(X), color='violet',
label='Shrunk Covariance MLE', linestyle='-.')
plt.axhline(lw_score(X), color='orange',
label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.')
plt.xlabel('nb of components')
plt.ylabel('CV scores')
plt.legend(loc='lower right')
plt.title(title)
plt.show()
| bsd-3-clause |
herilalaina/scikit-learn | sklearn/metrics/tests/test_common.py | 9 | 43823 | from __future__ import division, print_function
from functools import partial
from itertools import product
import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.validation import _num_samples
from sklearn.utils.validation import check_random_state
from sklearn.utils import shuffle
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import balanced_accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import brier_score_loss
from sklearn.metrics import cohen_kappa_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import coverage_error
from sklearn.metrics import explained_variance_score
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import precision_score
from sklearn.metrics import r2_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import zero_one_loss
# TODO Curve are currently not covered by invariance test
# from sklearn.metrics import precision_recall_curve
# from sklearn.metrics import roc_curve
from sklearn.metrics.base import _average_binary_score
# Note toward developers about metric testing
# -------------------------------------------
# It is often possible to write one general test for several metrics:
#
# - invariance properties, e.g. invariance to sample order
# - common behavior for an argument, e.g. the "normalize" with value True
# will return the mean of the metrics and with value False will return
# the sum of the metrics.
#
# In order to improve the overall metric testing, it is a good idea to write
# first a specific test for the given metric and then add a general test for
# all metrics that have the same behavior.
#
# Two types of datastructures are used in order to implement this system:
# dictionaries of metrics and lists of metrics wit common properties.
#
# Dictionaries of metrics
# ------------------------
# The goal of having those dictionaries is to have an easy way to call a
# particular metric and associate a name to each function:
#
# - REGRESSION_METRICS: all regression metrics.
# - CLASSIFICATION_METRICS: all classification metrics
# which compare a ground truth and the estimated targets as returned by a
# classifier.
# - THRESHOLDED_METRICS: all classification metrics which
# compare a ground truth and a score, e.g. estimated probabilities or
# decision function (format might vary)
#
# Those dictionaries will be used to test systematically some invariance
# properties, e.g. invariance toward several input layout.
#
REGRESSION_METRICS = {
"mean_absolute_error": mean_absolute_error,
"mean_squared_error": mean_squared_error,
"median_absolute_error": median_absolute_error,
"explained_variance_score": explained_variance_score,
"r2_score": partial(r2_score, multioutput='variance_weighted'),
}
CLASSIFICATION_METRICS = {
"accuracy_score": accuracy_score,
"balanced_accuracy_score": balanced_accuracy_score,
"unnormalized_accuracy_score": partial(accuracy_score, normalize=False),
"confusion_matrix": confusion_matrix,
"hamming_loss": hamming_loss,
"jaccard_similarity_score": jaccard_similarity_score,
"unnormalized_jaccard_similarity_score":
partial(jaccard_similarity_score, normalize=False),
"zero_one_loss": zero_one_loss,
"unnormalized_zero_one_loss": partial(zero_one_loss, normalize=False),
# These are needed to test averaging
"precision_score": precision_score,
"recall_score": recall_score,
"f1_score": f1_score,
"f2_score": partial(fbeta_score, beta=2),
"f0.5_score": partial(fbeta_score, beta=0.5),
"matthews_corrcoef_score": matthews_corrcoef,
"weighted_f0.5_score": partial(fbeta_score, average="weighted", beta=0.5),
"weighted_f1_score": partial(f1_score, average="weighted"),
"weighted_f2_score": partial(fbeta_score, average="weighted", beta=2),
"weighted_precision_score": partial(precision_score, average="weighted"),
"weighted_recall_score": partial(recall_score, average="weighted"),
"micro_f0.5_score": partial(fbeta_score, average="micro", beta=0.5),
"micro_f1_score": partial(f1_score, average="micro"),
"micro_f2_score": partial(fbeta_score, average="micro", beta=2),
"micro_precision_score": partial(precision_score, average="micro"),
"micro_recall_score": partial(recall_score, average="micro"),
"macro_f0.5_score": partial(fbeta_score, average="macro", beta=0.5),
"macro_f1_score": partial(f1_score, average="macro"),
"macro_f2_score": partial(fbeta_score, average="macro", beta=2),
"macro_precision_score": partial(precision_score, average="macro"),
"macro_recall_score": partial(recall_score, average="macro"),
"samples_f0.5_score": partial(fbeta_score, average="samples", beta=0.5),
"samples_f1_score": partial(f1_score, average="samples"),
"samples_f2_score": partial(fbeta_score, average="samples", beta=2),
"samples_precision_score": partial(precision_score, average="samples"),
"samples_recall_score": partial(recall_score, average="samples"),
"cohen_kappa_score": cohen_kappa_score,
}
THRESHOLDED_METRICS = {
"coverage_error": coverage_error,
"label_ranking_loss": label_ranking_loss,
"log_loss": log_loss,
"unnormalized_log_loss": partial(log_loss, normalize=False),
"hinge_loss": hinge_loss,
"brier_score_loss": brier_score_loss,
"roc_auc_score": roc_auc_score,
"weighted_roc_auc": partial(roc_auc_score, average="weighted"),
"samples_roc_auc": partial(roc_auc_score, average="samples"),
"micro_roc_auc": partial(roc_auc_score, average="micro"),
"macro_roc_auc": partial(roc_auc_score, average="macro"),
"average_precision_score": average_precision_score,
"weighted_average_precision_score":
partial(average_precision_score, average="weighted"),
"samples_average_precision_score":
partial(average_precision_score, average="samples"),
"micro_average_precision_score":
partial(average_precision_score, average="micro"),
"macro_average_precision_score":
partial(average_precision_score, average="macro"),
"label_ranking_average_precision_score":
label_ranking_average_precision_score,
}
ALL_METRICS = dict()
ALL_METRICS.update(THRESHOLDED_METRICS)
ALL_METRICS.update(CLASSIFICATION_METRICS)
ALL_METRICS.update(REGRESSION_METRICS)
# Lists of metrics with common properties
# ---------------------------------------
# Lists of metrics with common properties are used to test systematically some
# functionalities and invariance, e.g. SYMMETRIC_METRICS lists all metrics that
# are symmetric with respect to their input argument y_true and y_pred.
#
# When you add a new metric or functionality, check if a general test
# is already written.
# Those metrics don't support binary inputs
METRIC_UNDEFINED_BINARY = [
"samples_f0.5_score",
"samples_f1_score",
"samples_f2_score",
"samples_precision_score",
"samples_recall_score",
"coverage_error",
"average_precision_score",
"weighted_average_precision_score",
"micro_average_precision_score",
"macro_average_precision_score",
"samples_average_precision_score",
"label_ranking_loss",
"label_ranking_average_precision_score",
]
# Those metrics don't support multiclass inputs
METRIC_UNDEFINED_MULTICLASS = [
"brier_score_loss",
"balanced_accuracy_score",
"roc_auc_score",
"micro_roc_auc",
"weighted_roc_auc",
"macro_roc_auc",
"samples_roc_auc",
# with default average='binary', multiclass is prohibited
"precision_score",
"recall_score",
"f1_score",
"f2_score",
"f0.5_score",
]
# Metric undefined with "binary" or "multiclass" input
METRIC_UNDEFINED_BINARY_MULTICLASS = set(METRIC_UNDEFINED_BINARY).union(
set(METRIC_UNDEFINED_MULTICLASS))
# Metrics with an "average" argument
METRICS_WITH_AVERAGING = [
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score"
]
# Threshold-based metrics with an "average" argument
THRESHOLDED_METRICS_WITH_AVERAGING = [
"roc_auc_score", "average_precision_score",
]
# Metrics with a "pos_label" argument
METRICS_WITH_POS_LABEL = [
"roc_curve",
"brier_score_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
# pos_label support deprecated; to be removed in 0.18:
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
]
# Metrics with a "labels" argument
# TODO: Handle multi_class metrics that has a labels argument as well as a
# decision function argument. e.g hinge_loss
METRICS_WITH_LABELS = [
"confusion_matrix",
"hamming_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
"cohen_kappa_score",
]
# Metrics with a "normalize" option
METRICS_WITH_NORMALIZE_OPTION = [
"accuracy_score",
"jaccard_similarity_score",
"zero_one_loss",
]
# Threshold-based metrics with "multilabel-indicator" format support
THRESHOLDED_MULTILABEL_METRICS = [
"log_loss",
"unnormalized_log_loss",
"roc_auc_score", "weighted_roc_auc", "samples_roc_auc",
"micro_roc_auc", "macro_roc_auc",
"average_precision_score", "weighted_average_precision_score",
"samples_average_precision_score", "micro_average_precision_score",
"macro_average_precision_score",
"coverage_error", "label_ranking_loss",
]
# Classification metrics with "multilabel-indicator" format
MULTILABELS_METRICS = [
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"jaccard_similarity_score", "unnormalized_jaccard_similarity_score",
"zero_one_loss", "unnormalized_zero_one_loss",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"samples_f0.5_score", "samples_f1_score", "samples_f2_score",
"samples_precision_score", "samples_recall_score",
]
# Regression metrics with "multioutput-continuous" format support
MULTIOUTPUT_METRICS = [
"mean_absolute_error", "mean_squared_error", "r2_score",
"explained_variance_score"
]
# Symmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) == metric(y_pred, y_true).
SYMMETRIC_METRICS = [
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"jaccard_similarity_score", "unnormalized_jaccard_similarity_score",
"zero_one_loss", "unnormalized_zero_one_loss",
"f1_score", "micro_f1_score", "macro_f1_score",
"weighted_recall_score",
# P = R = F = accuracy in multiclass case
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"matthews_corrcoef_score", "mean_absolute_error", "mean_squared_error",
"median_absolute_error",
"cohen_kappa_score",
]
# Asymmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) != metric(y_pred, y_true).
NOT_SYMMETRIC_METRICS = [
"balanced_accuracy_score",
"explained_variance_score",
"r2_score",
"confusion_matrix",
"precision_score", "recall_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score",
"macro_f0.5_score", "macro_f2_score", "macro_precision_score",
"macro_recall_score", "log_loss", "hinge_loss"
]
# No Sample weight support
METRICS_WITHOUT_SAMPLE_WEIGHT = [
"confusion_matrix", # Left this one here because the tests in this file do
# not work for confusion_matrix, as its output is a
# matrix instead of a number. Testing of
# confusion_matrix with sample_weight is in
# test_classification.py
"median_absolute_error",
]
@ignore_warnings
def test_symmetry():
# Test the symmetry of score and loss functions
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
# We shouldn't forget any metrics
assert_equal(set(SYMMETRIC_METRICS).union(
NOT_SYMMETRIC_METRICS, THRESHOLDED_METRICS,
METRIC_UNDEFINED_BINARY_MULTICLASS),
set(ALL_METRICS))
assert_equal(
set(SYMMETRIC_METRICS).intersection(set(NOT_SYMMETRIC_METRICS)),
set([]))
# Symmetric metric
for name in SYMMETRIC_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_pred),
metric(y_pred, y_true),
err_msg="%s is not symmetric" % name)
# Not symmetric metrics
for name in NOT_SYMMETRIC_METRICS:
metric = ALL_METRICS[name]
assert_true(np.any(metric(y_true, y_pred) != metric(y_pred, y_true)),
msg="%s seems to be symmetric" % name)
@ignore_warnings
def test_sample_order_invariance():
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
y_true_shuffle, y_pred_shuffle = shuffle(y_true, y_pred, random_state=0)
for name, metric in ALL_METRICS.items():
if name in METRIC_UNDEFINED_BINARY_MULTICLASS:
continue
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
@ignore_warnings
def test_sample_order_invariance_multilabel_and_multioutput():
random_state = check_random_state(0)
# Generate some data
y_true = random_state.randint(0, 2, size=(20, 25))
y_pred = random_state.randint(0, 2, size=(20, 25))
y_score = random_state.normal(size=y_true.shape)
y_true_shuffle, y_pred_shuffle, y_score_shuffle = shuffle(y_true,
y_pred,
y_score,
random_state=0)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
for name in THRESHOLDED_MULTILABEL_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant"
% name)
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant"
% name)
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
@ignore_warnings
def test_format_invariance_with_1d_vectors():
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_list = list(y1)
y2_list = list(y2)
y1_1d, y2_1d = np.array(y1), np.array(y2)
assert_equal(y1_1d.ndim, 1)
assert_equal(y2_1d.ndim, 1)
y1_column = np.reshape(y1_1d, (-1, 1))
y2_column = np.reshape(y2_1d, (-1, 1))
y1_row = np.reshape(y1_1d, (1, -1))
y2_row = np.reshape(y2_1d, (1, -1))
for name, metric in ALL_METRICS.items():
if name in METRIC_UNDEFINED_BINARY_MULTICLASS:
continue
measure = metric(y1, y2)
assert_almost_equal(metric(y1_list, y2_list), measure,
err_msg="%s is not representation invariant "
"with list" % name)
assert_almost_equal(metric(y1_1d, y2_1d), measure,
err_msg="%s is not representation invariant "
"with np-array-1d" % name)
assert_almost_equal(metric(y1_column, y2_column), measure,
err_msg="%s is not representation invariant "
"with np-array-column" % name)
# Mix format support
assert_almost_equal(metric(y1_1d, y2_list), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and list" % name)
assert_almost_equal(metric(y1_list, y2_1d), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and list" % name)
assert_almost_equal(metric(y1_1d, y2_column), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and np-array-column"
% name)
assert_almost_equal(metric(y1_column, y2_1d), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and np-array-column"
% name)
assert_almost_equal(metric(y1_list, y2_column), measure,
err_msg="%s is not representation invariant "
"with mix list and np-array-column"
% name)
assert_almost_equal(metric(y1_column, y2_list), measure,
err_msg="%s is not representation invariant "
"with mix list and np-array-column"
% name)
# These mix representations aren't allowed
assert_raises(ValueError, metric, y1_1d, y2_row)
assert_raises(ValueError, metric, y1_row, y2_1d)
assert_raises(ValueError, metric, y1_list, y2_row)
assert_raises(ValueError, metric, y1_row, y2_list)
assert_raises(ValueError, metric, y1_column, y2_row)
assert_raises(ValueError, metric, y1_row, y2_column)
# NB: We do not test for y1_row, y2_row as these may be
# interpreted as multilabel or multioutput data.
if (name not in (MULTIOUTPUT_METRICS + THRESHOLDED_MULTILABEL_METRICS +
MULTILABELS_METRICS)):
assert_raises(ValueError, metric, y1_row, y2_row)
@ignore_warnings
def test_invariance_string_vs_numbers_labels():
# Ensure that classification metrics with string labels
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_str = np.array(["eggs", "spam"])[y1]
y2_str = np.array(["eggs", "spam"])[y2]
pos_label_str = "spam"
labels_str = ["eggs", "spam"]
for name, metric in CLASSIFICATION_METRICS.items():
if name in METRIC_UNDEFINED_BINARY_MULTICLASS:
continue
measure_with_number = metric(y1, y2)
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number invariance "
"test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
if name in METRICS_WITH_LABELS:
metric_str = partial(metric_str, labels=labels_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string vs number "
"invariance test".format(name))
for name, metric in THRESHOLDED_METRICS.items():
if name not in METRIC_UNDEFINED_BINARY:
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_number = metric(y1, y2)
measure_with_str = metric_str(y1_str, y2)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric(y1_str.astype('O'), y2)
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
else:
# TODO those metrics doesn't support string label yet
assert_raises(ValueError, metric, y1_str, y2)
assert_raises(ValueError, metric, y1_str.astype('O'), y2)
def test_inf_nan_input():
invalids =[([0, 1], [np.inf, np.inf]),
([0, 1], [np.nan, np.nan]),
([0, 1], [np.nan, np.inf])]
METRICS = dict()
METRICS.update(THRESHOLDED_METRICS)
METRICS.update(REGRESSION_METRICS)
for metric in METRICS.values():
for y_true, y_score in invalids:
assert_raise_message(ValueError,
"contains NaN, infinity",
metric, y_true, y_score)
# Classification metrics all raise a mixed input exception
for metric in CLASSIFICATION_METRICS.values():
for y_true, y_score in invalids:
assert_raise_message(ValueError,
"Classification metrics can't handle a mix "
"of binary and continuous targets",
metric, y_true, y_score)
@ignore_warnings
def check_single_sample(name):
# Non-regression test: scores should work with a single sample.
# This is important for leave-one-out cross validation.
# Score functions tested are those that formerly called np.squeeze,
# which turns an array of size 1 into a 0-d array (!).
metric = ALL_METRICS[name]
# assert that no exception is thrown
for i, j in product([0, 1], repeat=2):
metric([i], [j])
@ignore_warnings
def check_single_sample_multioutput(name):
metric = ALL_METRICS[name]
for i, j, k, l in product([0, 1], repeat=4):
metric(np.array([[i, j]]), np.array([[k, l]]))
def test_single_sample():
for name in ALL_METRICS:
if (name in METRIC_UNDEFINED_BINARY_MULTICLASS or
name in THRESHOLDED_METRICS):
# Those metrics are not always defined with one sample
# or in multiclass classification
continue
yield check_single_sample, name
for name in MULTIOUTPUT_METRICS + MULTILABELS_METRICS:
yield check_single_sample_multioutput, name
def test_multioutput_number_of_output_differ():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0], [1, 0], [0, 0]])
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_raises(ValueError, metric, y_true, y_pred)
def test_multioutput_regression_invariance_to_dimension_shuffling():
# test invariance to dimension shuffling
random_state = check_random_state(0)
y_true = random_state.uniform(0, 2, size=(20, 5))
y_pred = random_state.uniform(0, 2, size=(20, 5))
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
error = metric(y_true, y_pred)
for _ in range(3):
perm = random_state.permutation(y_true.shape[1])
assert_almost_equal(metric(y_true[:, perm], y_pred[:, perm]),
error,
err_msg="%s is not dimension shuffling "
"invariant" % name)
@ignore_warnings
def test_multilabel_representation_invariance():
# Generate some data
n_classes = 4
n_samples = 50
_, y1 = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=0, n_samples=n_samples,
allow_unlabeled=True)
_, y2 = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=1, n_samples=n_samples,
allow_unlabeled=True)
# To make sure at least one empty label is present
y1 = np.vstack([y1, [[0] * n_classes]])
y2 = np.vstack([y2, [[0] * n_classes]])
y1_sparse_indicator = sp.coo_matrix(y1)
y2_sparse_indicator = sp.coo_matrix(y2)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
# XXX cruel hack to work with partial functions
if isinstance(metric, partial):
metric.__module__ = 'tmp'
metric.__name__ = name
measure = metric(y1, y2)
# Check representation invariance
assert_almost_equal(metric(y1_sparse_indicator,
y2_sparse_indicator),
measure,
err_msg="%s failed representation invariance "
"between dense and sparse indicator "
"formats." % name)
def test_raise_value_error_multilabel_sequences():
# make sure the multilabel-sequence format raises ValueError
multilabel_sequences = [
[[0, 1]],
[[1], [2], [0, 1]],
[(), (2), (0, 1)],
[[]],
[()],
np.array([[], [1, 2]], dtype='object')]
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
for seq in multilabel_sequences:
assert_raises(ValueError, metric, seq, seq)
def test_normalize_option_binary_classification(n_samples=20):
# Test in the binary case
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure)
def test_normalize_option_multiclass_classification():
# Test in the multiclass case
random_state = check_random_state(0)
y_true = random_state.randint(0, 4, size=(20, ))
y_pred = random_state.randint(0, 4, size=(20, ))
n_samples = y_true.shape[0]
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure)
def test_normalize_option_multilabel_classification():
# Test in the multilabel case
n_classes = 4
n_samples = 100
# for both random_state 0 and 1, y_true and y_pred has at least one
# unlabelled entry
_, y_true = make_multilabel_classification(n_features=1,
n_classes=n_classes,
random_state=0,
allow_unlabeled=True,
n_samples=n_samples)
_, y_pred = make_multilabel_classification(n_features=1,
n_classes=n_classes,
random_state=1,
allow_unlabeled=True,
n_samples=n_samples)
# To make sure at least one empty label is present
y_true += [0]*n_classes
y_pred += [0]*n_classes
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure,
err_msg="Failed with %s" % name)
@ignore_warnings
def _check_averaging(metric, y_true, y_pred, y_true_binarize, y_pred_binarize,
is_multilabel):
n_samples, n_classes = y_true_binarize.shape
# No averaging
label_measure = metric(y_true, y_pred, average=None)
assert_array_almost_equal(label_measure,
[metric(y_true_binarize[:, i],
y_pred_binarize[:, i])
for i in range(n_classes)])
# Micro measure
micro_measure = metric(y_true, y_pred, average="micro")
assert_almost_equal(micro_measure, metric(y_true_binarize.ravel(),
y_pred_binarize.ravel()))
# Macro measure
macro_measure = metric(y_true, y_pred, average="macro")
assert_almost_equal(macro_measure, np.mean(label_measure))
# Weighted measure
weights = np.sum(y_true_binarize, axis=0, dtype=int)
if np.sum(weights) != 0:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_almost_equal(weighted_measure, np.average(label_measure,
weights=weights))
else:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_almost_equal(weighted_measure, 0)
# Sample measure
if is_multilabel:
sample_measure = metric(y_true, y_pred, average="samples")
assert_almost_equal(sample_measure,
np.mean([metric(y_true_binarize[i],
y_pred_binarize[i])
for i in range(n_samples)]))
assert_raises(ValueError, metric, y_true, y_pred, average="unknown")
assert_raises(ValueError, metric, y_true, y_pred, average="garbage")
def check_averaging(name, y_true, y_true_binarize, y_pred, y_pred_binarize,
y_score):
is_multilabel = type_of_target(y_true).startswith("multilabel")
metric = ALL_METRICS[name]
if name in METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel)
elif name in THRESHOLDED_METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_score, y_true_binarize,
y_score, is_multilabel)
else:
raise ValueError("Metric is not recorded as having an average option")
def test_averaging_multiclass(n_samples=50, n_classes=3):
random_state = check_random_state(0)
y_true = random_state.randint(0, n_classes, size=(n_samples, ))
y_pred = random_state.randint(0, n_classes, size=(n_samples, ))
y_score = random_state.uniform(size=(n_samples, n_classes))
lb = LabelBinarizer().fit(y_true)
y_true_binarize = lb.transform(y_true)
y_pred_binarize = lb.transform(y_pred)
for name in METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize,
y_pred, y_pred_binarize, y_score)
def test_averaging_multilabel(n_classes=5, n_samples=40):
_, y = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=5, n_samples=n_samples,
allow_unlabeled=False)
y_true = y[:20]
y_pred = y[20:]
y_score = check_random_state(0).normal(size=(20, n_classes))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING + THRESHOLDED_METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize,
y_pred, y_pred_binarize, y_score)
def test_averaging_multilabel_all_zeroes():
y_true = np.zeros((20, 3))
y_pred = np.zeros((20, 3))
y_score = np.zeros((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize,
y_pred, y_pred_binarize, y_score)
# Test _average_binary_score for weight.sum() == 0
binary_metric = (lambda y_true, y_score, average="macro":
_average_binary_score(
precision_score, y_true, y_score, average))
_check_averaging(binary_metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel=True)
def test_averaging_multilabel_all_ones():
y_true = np.ones((20, 3))
y_pred = np.ones((20, 3))
y_score = np.ones((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize,
y_pred, y_pred_binarize, y_score)
@ignore_warnings
def check_sample_weight_invariance(name, metric, y1, y2):
rng = np.random.RandomState(0)
sample_weight = rng.randint(1, 10, size=len(y1))
# check that unit weights gives the same score as no weight
unweighted_score = metric(y1, y2, sample_weight=None)
assert_almost_equal(
unweighted_score,
metric(y1, y2, sample_weight=np.ones(shape=len(y1))),
err_msg="For %s sample_weight=None is not equivalent to "
"sample_weight=ones" % name)
# check that the weighted and unweighted scores are unequal
weighted_score = metric(y1, y2, sample_weight=sample_weight)
assert_not_equal(
unweighted_score, weighted_score,
msg="Unweighted and weighted scores are unexpectedly "
"equal (%f) for %s" % (weighted_score, name))
# check that sample_weight can be a list
weighted_score_list = metric(y1, y2,
sample_weight=sample_weight.tolist())
assert_almost_equal(
weighted_score, weighted_score_list,
err_msg=("Weighted scores for array and list "
"sample_weight input are not equal (%f != %f) for %s") % (
weighted_score, weighted_score_list, name))
# check that integer weights is the same as repeated samples
repeat_weighted_score = metric(
np.repeat(y1, sample_weight, axis=0),
np.repeat(y2, sample_weight, axis=0), sample_weight=None)
assert_almost_equal(
weighted_score, repeat_weighted_score,
err_msg="Weighting %s is not equal to repeating samples" % name)
# check that ignoring a fraction of the samples is equivalent to setting
# the corresponding weights to zero
sample_weight_subset = sample_weight[1::2]
sample_weight_zeroed = np.copy(sample_weight)
sample_weight_zeroed[::2] = 0
y1_subset = y1[1::2]
y2_subset = y2[1::2]
weighted_score_subset = metric(y1_subset, y2_subset,
sample_weight=sample_weight_subset)
weighted_score_zeroed = metric(y1, y2,
sample_weight=sample_weight_zeroed)
assert_almost_equal(
weighted_score_subset, weighted_score_zeroed,
err_msg=("Zeroing weights does not give the same result as "
"removing the corresponding samples (%f != %f) for %s" %
(weighted_score_zeroed, weighted_score_subset, name)))
if not name.startswith('unnormalized'):
# check that the score is invariant under scaling of the weights by a
# common factor
for scaling in [2, 0.3]:
assert_almost_equal(
weighted_score,
metric(y1, y2, sample_weight=sample_weight * scaling),
err_msg="%s sample_weight is not invariant "
"under scaling" % name)
# Check that if number of samples in y_true and sample_weight are not
# equal, meaningful error is raised.
error_message = ("Found input variables with inconsistent numbers of "
"samples: [{}, {}, {}]".format(
_num_samples(y1), _num_samples(y2),
_num_samples(sample_weight) * 2))
assert_raise_message(ValueError, error_message, metric, y1, y2,
sample_weight=np.hstack([sample_weight,
sample_weight]))
def test_sample_weight_invariance(n_samples=50):
random_state = check_random_state(0)
# regression
y_true = random_state.random_sample(size=(n_samples,))
y_pred = random_state.random_sample(size=(n_samples,))
for name in ALL_METRICS:
if name not in REGRESSION_METRICS:
continue
if name in METRICS_WITHOUT_SAMPLE_WEIGHT:
continue
metric = ALL_METRICS[name]
yield check_sample_weight_invariance, name, metric, y_true, y_pred
# binary
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples,))
for name in ALL_METRICS:
if name in REGRESSION_METRICS:
continue
if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or
name in METRIC_UNDEFINED_BINARY):
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield check_sample_weight_invariance, name, metric, y_true, y_score
else:
yield check_sample_weight_invariance, name, metric, y_true, y_pred
# multiclass
random_state = check_random_state(0)
y_true = random_state.randint(0, 5, size=(n_samples, ))
y_pred = random_state.randint(0, 5, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples, 5))
for name in ALL_METRICS:
if name in REGRESSION_METRICS:
continue
if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or
name in METRIC_UNDEFINED_BINARY_MULTICLASS):
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield check_sample_weight_invariance, name, metric, y_true, y_score
else:
yield check_sample_weight_invariance, name, metric, y_true, y_pred
# multilabel indicator
_, ya = make_multilabel_classification(n_features=1, n_classes=20,
random_state=0, n_samples=100,
allow_unlabeled=False)
_, yb = make_multilabel_classification(n_features=1, n_classes=20,
random_state=1, n_samples=100,
allow_unlabeled=False)
y_true = np.vstack([ya, yb])
y_pred = np.vstack([ya, ya])
y_score = random_state.randint(1, 4, size=y_true.shape)
for name in (MULTILABELS_METRICS + THRESHOLDED_MULTILABEL_METRICS +
MULTIOUTPUT_METRICS):
if name in METRICS_WITHOUT_SAMPLE_WEIGHT:
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield (check_sample_weight_invariance, name, metric,
y_true, y_score)
else:
yield (check_sample_weight_invariance, name, metric,
y_true, y_pred)
@ignore_warnings
def test_no_averaging_labels():
# test labels argument when not using averaging
# in multi-class and multi-label cases
y_true_multilabel = np.array([[1, 1, 0, 0], [1, 1, 0, 0]])
y_pred_multilabel = np.array([[0, 0, 1, 1], [0, 1, 1, 0]])
y_true_multiclass = np.array([0, 1, 2])
y_pred_multiclass = np.array([0, 2, 3])
labels = np.array([3, 0, 1, 2])
_, inverse_labels = np.unique(labels, return_inverse=True)
for name in METRICS_WITH_AVERAGING:
for y_true, y_pred in [[y_true_multiclass, y_pred_multiclass],
[y_true_multilabel, y_pred_multilabel]]:
if name not in MULTILABELS_METRICS and y_pred.ndim > 1:
continue
metric = ALL_METRICS[name]
score_labels = metric(y_true, y_pred, labels=labels, average=None)
score = metric(y_true, y_pred, average=None)
assert_array_equal(score_labels, score[inverse_labels])
| bsd-3-clause |
herilalaina/scikit-learn | benchmarks/bench_plot_lasso_path.py | 82 | 4005 | """Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
from collections import defaultdict
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path
from sklearn.linear_model import lasso_path
from sklearn.datasets.samples_generator import make_regression
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
dataset_kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_features / 10,
'effective_rank': min(n_samples, n_features) / 10,
#'effective_rank': None,
'bias': 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (without Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (without Gram)'].append(delta)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 2000, 5).astype(np.int)
features_range = np.linspace(10, 2000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure('scikit-learn Lasso path benchmark results')
i = 1
for c, (label, timings) in zip('bcry', sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection='3d')
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
# ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
# ax.legend()
i += 1
plt.show()
| bsd-3-clause |
FrancescAlted/PyTables | tables/description.py | 3 | 38129 | """Classes for describing columns for ``Table`` objects."""
import copy
import warnings
import numpy as np
from . import atom
from .path import check_name_validity
__docformat__ = 'reStructuredText'
"""The format of documentation strings in this module."""
def same_position(oldmethod):
"""Decorate `oldmethod` to also compare the `_v_pos` attribute."""
def newmethod(self, other):
try:
other._v_pos
except AttributeError:
return False # not a column definition
return self._v_pos == other._v_pos and oldmethod(self, other)
newmethod.__name__ = oldmethod.__name__
newmethod.__doc__ = oldmethod.__doc__
return newmethod
class Col(atom.Atom, metaclass=type):
"""Defines a non-nested column.
Col instances are used as a means to declare the different properties of a
non-nested column in a table or nested column. Col classes are descendants
of their equivalent Atom classes (see :ref:`AtomClassDescr`), but their
instances have an additional _v_pos attribute that is used to decide the
position of the column inside its parent table or nested column (see the
IsDescription class in :ref:`IsDescriptionClassDescr` for more information
on column positions).
In the same fashion as Atom, you should use a particular Col descendant
class whenever you know the exact type you will need when writing your
code. Otherwise, you may use one of the Col.from_*() factory methods.
Each factory method inherited from the Atom class is available with the
same signature, plus an additional pos parameter (placed in last position)
which defaults to None and that may take an integer value. This parameter
might be used to specify the position of the column in the table.
Besides, there are the next additional factory methods, available only for
Col objects.
The following parameters are available for most Col-derived constructors.
Parameters
----------
itemsize : int
For types with a non-fixed size, this sets the size in bytes of
individual items in the column.
shape : tuple
Sets the shape of the column. An integer shape of N is equivalent to
the tuple (N,).
dflt
Sets the default value for the column.
pos : int
Sets the position of column in table. If unspecified, the position
will be randomly selected.
"""
_class_from_prefix = {} # filled as column classes are created
"""Maps column prefixes to column classes."""
@classmethod
def prefix(cls):
"""Return the column class prefix."""
cname = cls.__name__
return cname[:cname.rfind('Col')]
@classmethod
def from_atom(cls, atom, pos=None, _offset=None):
"""Create a Col definition from a PyTables atom.
An optional position may be specified as the pos argument.
"""
prefix = atom.prefix()
kwargs = atom._get_init_args()
colclass = cls._class_from_prefix[prefix]
return colclass(pos=pos, _offset=_offset, **kwargs)
@classmethod
def from_sctype(cls, sctype, shape=(), dflt=None, pos=None):
"""Create a `Col` definition from a NumPy scalar type `sctype`.
Optional shape, default value and position may be specified as
the `shape`, `dflt` and `pos` arguments, respectively.
Information in the `sctype` not represented in a `Col` is
ignored.
"""
newatom = atom.Atom.from_sctype(sctype, shape, dflt)
return cls.from_atom(newatom, pos=pos)
@classmethod
def from_dtype(cls, dtype, dflt=None, pos=None, _offset=None):
"""Create a `Col` definition from a NumPy `dtype`.
Optional default value and position may be specified as the
`dflt` and `pos` arguments, respectively. The `dtype` must have
a byte order which is irrelevant or compatible with that of the
system. Information in the `dtype` not represented in a `Col`
is ignored.
"""
newatom = atom.Atom.from_dtype(dtype, dflt)
return cls.from_atom(newatom, pos=pos, _offset=_offset)
@classmethod
def from_type(cls, type, shape=(), dflt=None, pos=None):
"""Create a `Col` definition from a PyTables `type`.
Optional shape, default value and position may be specified as
the `shape`, `dflt` and `pos` arguments, respectively.
"""
newatom = atom.Atom.from_type(type, shape, dflt)
return cls.from_atom(newatom, pos=pos)
@classmethod
def from_kind(cls, kind, itemsize=None, shape=(), dflt=None, pos=None):
"""Create a `Col` definition from a PyTables `kind`.
Optional item size, shape, default value and position may be
specified as the `itemsize`, `shape`, `dflt` and `pos`
arguments, respectively. Bear in mind that not all columns
support a default item size.
"""
newatom = atom.Atom.from_kind(kind, itemsize, shape, dflt)
return cls.from_atom(newatom, pos=pos)
@classmethod
def _subclass_from_prefix(cls, prefix):
"""Get a column subclass for the given `prefix`."""
cname = '%sCol' % prefix
class_from_prefix = cls._class_from_prefix
if cname in class_from_prefix:
return class_from_prefix[cname]
atombase = getattr(atom, '%sAtom' % prefix)
class NewCol(cls, atombase):
"""Defines a non-nested column of a particular type.
The constructor accepts the same arguments as the equivalent
`Atom` class, plus an additional ``pos`` argument for
position information, which is assigned to the `_v_pos`
attribute.
"""
def __init__(self, *args, **kwargs):
pos = kwargs.pop('pos', None)
offset = kwargs.pop('_offset', None)
class_from_prefix = self._class_from_prefix
atombase.__init__(self, *args, **kwargs)
# The constructor of an abstract atom may have changed
# the class of `self` to something different of `NewCol`
# and `atombase` (that's why the prefix map is saved).
if self.__class__ is not NewCol:
colclass = class_from_prefix[self.prefix()]
self.__class__ = colclass
self._v_pos = pos
self._v_offset = offset
__eq__ = same_position(atombase.__eq__)
_is_equal_to_atom = same_position(atombase._is_equal_to_atom)
# XXX: API incompatible change for PyTables 3 line
# Overriding __eq__ blocks inheritance of __hash__ in 3.x
# def __hash__(self):
# return hash((self._v_pos, self.atombase))
if prefix == 'Enum':
_is_equal_to_enumatom = same_position(
atombase._is_equal_to_enumatom)
NewCol.__name__ = cname
class_from_prefix[prefix] = NewCol
return NewCol
def __repr__(self):
# Reuse the atom representation.
atomrepr = super().__repr__()
lpar = atomrepr.index('(')
rpar = atomrepr.rindex(')')
atomargs = atomrepr[lpar + 1:rpar]
classname = self.__class__.__name__
return f'{classname}({atomargs}, pos={self._v_pos})'
def _get_init_args(self):
"""Get a dictionary of instance constructor arguments."""
kwargs = {arg: getattr(self, arg) for arg in ('shape', 'dflt')}
kwargs['pos'] = getattr(self, '_v_pos', None)
return kwargs
def _generate_col_classes():
"""Generate all column classes."""
# Abstract classes are not in the class map.
cprefixes = ['Int', 'UInt', 'Float', 'Time']
for (kind, kdata) in atom.atom_map.items():
if hasattr(kdata, 'kind'): # atom class: non-fixed item size
atomclass = kdata
cprefixes.append(atomclass.prefix())
else: # dictionary: fixed item size
for atomclass in kdata.values():
cprefixes.append(atomclass.prefix())
# Bottom-level complex classes are not in the type map, of course.
# We still want the user to get the compatibility warning, though.
cprefixes.extend(['Complex32', 'Complex64', 'Complex128'])
if hasattr(atom, 'Complex192Atom'):
cprefixes.append('Complex192')
if hasattr(atom, 'Complex256Atom'):
cprefixes.append('Complex256')
for cprefix in cprefixes:
newclass = Col._subclass_from_prefix(cprefix)
yield newclass
# Create all column classes.
# for _newclass in _generate_col_classes():
# exec('%s = _newclass' % _newclass.__name__)
# del _newclass
StringCol = Col._subclass_from_prefix('String')
BoolCol = Col._subclass_from_prefix('Bool')
EnumCol = Col._subclass_from_prefix('Enum')
IntCol = Col._subclass_from_prefix('Int')
Int8Col = Col._subclass_from_prefix('Int8')
Int16Col = Col._subclass_from_prefix('Int16')
Int32Col = Col._subclass_from_prefix('Int32')
Int64Col = Col._subclass_from_prefix('Int64')
UIntCol = Col._subclass_from_prefix('UInt')
UInt8Col = Col._subclass_from_prefix('UInt8')
UInt16Col = Col._subclass_from_prefix('UInt16')
UInt32Col = Col._subclass_from_prefix('UInt32')
UInt64Col = Col._subclass_from_prefix('UInt64')
FloatCol = Col._subclass_from_prefix('Float')
if hasattr(atom, 'Float16Atom'):
Float16Col = Col._subclass_from_prefix('Float16')
Float32Col = Col._subclass_from_prefix('Float32')
Float64Col = Col._subclass_from_prefix('Float64')
if hasattr(atom, 'Float96Atom'):
Float96Col = Col._subclass_from_prefix('Float96')
if hasattr(atom, 'Float128Atom'):
Float128Col = Col._subclass_from_prefix('Float128')
ComplexCol = Col._subclass_from_prefix('Complex')
Complex32Col = Col._subclass_from_prefix('Complex32')
Complex64Col = Col._subclass_from_prefix('Complex64')
Complex128Col = Col._subclass_from_prefix('Complex128')
if hasattr(atom, 'Complex192Atom'):
Complex192Col = Col._subclass_from_prefix('Complex192')
if hasattr(atom, 'Complex256Atom'):
Complex256Col = Col._subclass_from_prefix('Complex256')
TimeCol = Col._subclass_from_prefix('Time')
Time32Col = Col._subclass_from_prefix('Time32')
Time64Col = Col._subclass_from_prefix('Time64')
# Table description classes
# =========================
class Description:
"""This class represents descriptions of the structure of tables.
An instance of this class is automatically bound to Table (see
:ref:`TableClassDescr`) objects when they are created. It provides a
browseable representation of the structure of the table, made of non-nested
(Col - see :ref:`ColClassDescr`) and nested (Description) columns.
Column definitions under a description can be accessed as attributes of it
(*natural naming*). For instance, if table.description is a Description
instance with a column named col1 under it, the later can be accessed as
table.description.col1. If col1 is nested and contains a col2 column, this
can be accessed as table.description.col1.col2. Because of natural naming,
the names of members start with special prefixes, like in the Group class
(see :ref:`GroupClassDescr`).
.. rubric:: Description attributes
.. attribute:: _v_colobjects
A dictionary mapping the names of the columns hanging
directly from the associated table or nested column to their
respective descriptions (Col - see :ref:`ColClassDescr` or
Description - see :ref:`DescriptionClassDescr` instances).
.. versionchanged:: 3.0
The *_v_colObjects* attribute has been renamed into
*_v_colobjects*.
.. attribute:: _v_dflts
A dictionary mapping the names of non-nested columns
hanging directly from the associated table or nested column
to their respective default values.
.. attribute:: _v_dtype
The NumPy type which reflects the structure of this
table or nested column. You can use this as the
dtype argument of NumPy array factories.
.. attribute:: _v_dtypes
A dictionary mapping the names of non-nested columns
hanging directly from the associated table or nested column
to their respective NumPy types.
.. attribute:: _v_is_nested
Whether the associated table or nested column contains
further nested columns or not.
.. attribute:: _v_itemsize
The size in bytes of an item in this table or nested column.
.. attribute:: _v_name
The name of this description group. The name of the
root group is '/'.
.. attribute:: _v_names
A list of the names of the columns hanging directly
from the associated table or nested column. The order of the
names matches the order of their respective columns in the
containing table.
.. attribute:: _v_nested_descr
A nested list of pairs of (name, format) tuples for all the columns
under this table or nested column. You can use this as the dtype and
descr arguments of NumPy array factories.
.. versionchanged:: 3.0
The *_v_nestedDescr* attribute has been renamed into
*_v_nested_descr*.
.. attribute:: _v_nested_formats
A nested list of the NumPy string formats (and shapes) of all the
columns under this table or nested column. You can use this as the
formats argument of NumPy array factories.
.. versionchanged:: 3.0
The *_v_nestedFormats* attribute has been renamed into
*_v_nested_formats*.
.. attribute:: _v_nestedlvl
The level of the associated table or nested column in the nested
datatype.
.. attribute:: _v_nested_names
A nested list of the names of all the columns under this table or
nested column. You can use this as the names argument of NumPy array
factories.
.. versionchanged:: 3.0
The *_v_nestedNames* attribute has been renamed into
*_v_nested_names*.
.. attribute:: _v_pathname
Pathname of the table or nested column.
.. attribute:: _v_pathnames
A list of the pathnames of all the columns under this table or nested
column (in preorder). If it does not contain nested columns, this is
exactly the same as the :attr:`Description._v_names` attribute.
.. attribute:: _v_types
A dictionary mapping the names of non-nested columns hanging directly
from the associated table or nested column to their respective PyTables
types.
.. attribute:: _v_offsets
A list of offsets for all the columns. If the list is empty, means
that there are no padding in the data structure. However, the support
for offsets is currently limited to flat tables; for nested tables, the
potential padding is always removed (exactly the same as in pre-3.5
versions), and this variable is set to empty.
.. versionadded:: 3.5
Previous to this version all the compound types were converted
internally to 'packed' types, i.e. with no padding between the
component types. Starting with 3.5, the holes in native HDF5
types (non-nested) are honored and replicated during dataset
and attribute copies.
"""
def __init__(self, classdict, nestedlvl=-1, validate=True, ptparams=None):
if not classdict:
raise ValueError("cannot create an empty data type")
# Do a shallow copy of classdict just in case this is going to
# be shared by other instances
newdict = self.__dict__
newdict["_v_name"] = "/" # The name for root descriptor
newdict["_v_names"] = []
newdict["_v_dtypes"] = {}
newdict["_v_types"] = {}
newdict["_v_dflts"] = {}
newdict["_v_colobjects"] = {}
newdict["_v_is_nested"] = False
nestedFormats = []
nestedDType = []
if not hasattr(newdict, "_v_nestedlvl"):
newdict["_v_nestedlvl"] = nestedlvl + 1
cols_with_pos = [] # colum (position, name) pairs
cols_no_pos = [] # just column names
cols_offsets = [] # the offsets of the columns
valid_offsets = False # by default there a no valid offsets
# Check for special variables and convert column descriptions
for (name, descr) in classdict.items():
if name.startswith('_v_'):
if name in newdict:
# print("Warning!")
# special methods &c: copy to newdict, warn about conflicts
warnings.warn("Can't set attr %r in description class %r"
% (name, self))
else:
# print("Special variable!-->", name, classdict[name])
newdict[name] = descr
continue # This variable is not needed anymore
columns = None
if (type(descr) == type(IsDescription) and
issubclass(descr, IsDescription)):
# print("Nested object (type I)-->", name)
columns = descr().columns
elif (type(descr.__class__) == type(IsDescription) and
issubclass(descr.__class__, IsDescription)):
# print("Nested object (type II)-->", name)
columns = descr.columns
elif isinstance(descr, dict):
# print("Nested object (type III)-->", name)
columns = descr
else:
# print("Nested object (type IV)-->", name)
descr = copy.copy(descr)
# The copies above and below ensure that the structures
# provided by the user will remain unchanged even if we
# tamper with the values of ``_v_pos`` here.
if columns is not None:
descr = Description(copy.copy(columns), self._v_nestedlvl,
ptparams=ptparams)
classdict[name] = descr
pos = getattr(descr, '_v_pos', None)
if pos is None:
cols_no_pos.append(name)
else:
cols_with_pos.append((pos, name))
offset = getattr(descr, '_v_offset', None)
if offset is not None:
cols_offsets.append(offset)
# Sort field names:
#
# 1. Fields with explicit positions, according to their
# positions (and their names if coincident).
# 2. Fields with no position, in alphabetical order.
cols_with_pos.sort()
cols_no_pos.sort()
keys = [name for (pos, name) in cols_with_pos] + cols_no_pos
pos = 0
nested = False
# Get properties for compound types
for k in keys:
if validate:
# Check for key name validity
check_name_validity(k)
# Class variables
object = classdict[k]
newdict[k] = object # To allow natural naming
if not isinstance(object, (Col, Description)):
raise TypeError('Passing an incorrect value to a table column.'
' Expected a Col (or subclass) instance and '
'got: "%s". Please make use of the Col(), or '
'descendant, constructor to properly '
'initialize columns.' % object)
object._v_pos = pos # Set the position of this object
object._v_parent = self # The parent description
pos += 1
newdict['_v_colobjects'][k] = object
newdict['_v_names'].append(k)
object.__dict__['_v_name'] = k
if not isinstance(k, str):
# numpy only accepts "str" for field names
# Python 3.x: bytes --> str (unicode)
kk = k.decode()
else:
kk = k
if isinstance(object, Col):
dtype = object.dtype
newdict['_v_dtypes'][k] = dtype
newdict['_v_types'][k] = object.type
newdict['_v_dflts'][k] = object.dflt
nestedFormats.append(object.recarrtype)
baserecarrtype = dtype.base.str[1:]
nestedDType.append((kk, baserecarrtype, dtype.shape))
else: # A description
nestedFormats.append(object._v_nested_formats)
nestedDType.append((kk, object._v_dtype))
nested = True
# Useful for debugging purposes
# import traceback
# if ptparams is None:
# print("*** print_stack:")
# traceback.print_stack()
# Check whether we are gonna use padding or not. Two possibilities:
# 1) Make padding True by default (except if ALLOW_PADDING is set
# to False)
# 2) Make padding False by default (except if ALLOW_PADDING is set
# to True)
# Currently we choose 1) because it favours honoring padding even on
# unhandled situations (should be very few).
# However, for development, option 2) is recommended as it catches
# most of the unhandled situations.
allow_padding = ptparams is None or ptparams['ALLOW_PADDING']
# allow_padding = ptparams is not None and ptparams['ALLOW_PADDING']
if (allow_padding and
len(cols_offsets) > 1 and
len(keys) == len(cols_with_pos) and
len(keys) == len(cols_offsets) and
not nested): # TODO: support offsets with nested types
# We have to sort the offsets too, as they must follow the column
# order. As the offsets and the pos should be place in the same
# order, a single sort is enough here.
cols_offsets.sort()
valid_offsets = True
else:
newdict['_v_offsets'] = []
# Assign the format list to _v_nested_formats
newdict['_v_nested_formats'] = nestedFormats
if self._v_nestedlvl == 0:
# Get recursively nested _v_nested_names and _v_nested_descr attrs
self._g_set_nested_names_descr()
# Get pathnames for nested groups
self._g_set_path_names()
# Check the _v_byteorder has been used an issue an Error
if hasattr(self, "_v_byteorder"):
raise ValueError(
"Using a ``_v_byteorder`` in the description is obsolete. "
"Use the byteorder parameter in the constructor instead.")
# Compute the dtype with offsets or without
# print("offsets ->", cols_offsets, nestedDType, nested, valid_offsets)
if valid_offsets:
# TODO: support offsets within nested types
dtype_fields = {
'names': newdict['_v_names'], 'formats': nestedFormats,
'offsets': cols_offsets}
itemsize = newdict.get('_v_itemsize', None)
if itemsize is not None:
dtype_fields['itemsize'] = itemsize
dtype = np.dtype(dtype_fields)
else:
dtype = np.dtype(nestedDType)
newdict['_v_dtype'] = dtype
newdict['_v_itemsize'] = dtype.itemsize
newdict['_v_offsets'] = [dtype.fields[name][1] for name in dtype.names]
def _g_set_nested_names_descr(self):
"""Computes the nested names and descriptions for nested datatypes."""
names = self._v_names
fmts = self._v_nested_formats
self._v_nested_names = names[:] # Important to do a copy!
self._v_nested_descr = list(zip(names, fmts))
for i, name in enumerate(names):
new_object = self._v_colobjects[name]
if isinstance(new_object, Description):
new_object._g_set_nested_names_descr()
# replace the column nested name by a correct tuple
self._v_nested_names[i] = (name, new_object._v_nested_names)
self._v_nested_descr[i] = (name, new_object._v_nested_descr)
# set the _v_is_nested flag
self._v_is_nested = True
def _g_set_path_names(self):
"""Compute the pathnames for arbitrary nested descriptions.
This method sets the ``_v_pathname`` and ``_v_pathnames``
attributes of all the elements (both descriptions and columns)
in this nested description.
"""
def get_cols_in_order(description):
return [description._v_colobjects[colname]
for colname in description._v_names]
def join_paths(path1, path2):
if not path1:
return path2
return f'{path1}/{path2}'
# The top of the stack always has a nested description
# and a list of its child columns
# (be they nested ``Description`` or non-nested ``Col`` objects).
# In the end, the list contains only a list of column paths
# under this one.
#
# For instance, given this top of the stack::
#
# (<Description X>, [<Column A>, <Column B>])
#
# After computing the rest of the stack, the top is::
#
# (<Description X>, ['a', 'a/m', 'a/n', ... , 'b', ...])
stack = []
# We start by pushing the top-level description
# and its child columns.
self._v_pathname = ''
stack.append((self, get_cols_in_order(self)))
while stack:
desc, cols = stack.pop()
head = cols[0]
# What's the first child in the list?
if isinstance(head, Description):
# A nested description. We remove it from the list and
# push it with its child columns. This will be the next
# handled description.
head._v_pathname = join_paths(desc._v_pathname, head._v_name)
stack.append((desc, cols[1:])) # alter the top
stack.append((head, get_cols_in_order(head))) # new top
elif isinstance(head, Col):
# A non-nested column. We simply remove it from the
# list and append its name to it.
head._v_pathname = join_paths(desc._v_pathname, head._v_name)
cols.append(head._v_name) # alter the top
stack.append((desc, cols[1:])) # alter the top
else:
# Since paths and names are appended *to the end* of
# children lists, a string signals that no more children
# remain to be processed, so we are done with the
# description at the top of the stack.
assert isinstance(head, str)
# Assign the computed set of descendent column paths.
desc._v_pathnames = cols
if len(stack) > 0:
# Compute the paths with respect to the parent node
# (including the path of the current description)
# and append them to its list.
descName = desc._v_name
colPaths = [join_paths(descName, path) for path in cols]
colPaths.insert(0, descName)
parentCols = stack[-1][1]
parentCols.extend(colPaths)
# (Nothing is pushed, we are done with this description.)
def _f_walk(self, type='All'):
"""Iterate over nested columns.
If type is 'All' (the default), all column description objects (Col and
Description instances) are yielded in top-to-bottom order (preorder).
If type is 'Col' or 'Description', only column descriptions of that
type are yielded.
"""
if type not in ["All", "Col", "Description"]:
raise ValueError("""\
type can only take the parameters 'All', 'Col' or 'Description'.""")
stack = [self]
while stack:
object = stack.pop(0) # pop at the front so as to ensure the order
if type in ["All", "Description"]:
yield object # yield description
for name in object._v_names:
new_object = object._v_colobjects[name]
if isinstance(new_object, Description):
stack.append(new_object)
else:
if type in ["All", "Col"]:
yield new_object # yield column
def __repr__(self):
"""Gives a detailed Description column representation."""
rep = ['%s\"%s\": %r' %
(" " * self._v_nestedlvl, k, self._v_colobjects[k])
for k in self._v_names]
return '{\n %s}' % (',\n '.join(rep))
def __str__(self):
"""Gives a brief Description representation."""
return f'Description({self._v_nested_descr})'
class MetaIsDescription(type):
"""Helper metaclass to return the class variables as a dictionary."""
def __new__(mcs, classname, bases, classdict):
"""Return a new class with a "columns" attribute filled."""
newdict = {"columns": {}, }
if '__doc__' in classdict:
newdict['__doc__'] = classdict['__doc__']
for b in bases:
if "columns" in b.__dict__:
newdict["columns"].update(b.__dict__["columns"])
for k in classdict:
# if not (k.startswith('__') or k.startswith('_v_')):
# We let pass _v_ variables to configure class behaviour
if not (k.startswith('__')):
newdict["columns"][k] = classdict[k]
# Return a new class with the "columns" attribute filled
return type.__new__(mcs, classname, bases, newdict)
class IsDescription(metaclass=MetaIsDescription):
"""Description of the structure of a table or nested column.
This class is designed to be used as an easy, yet meaningful way to
describe the structure of new Table (see :ref:`TableClassDescr`) datasets
or nested columns through the definition of *derived classes*. In order to
define such a class, you must declare it as descendant of IsDescription,
with as many attributes as columns you want in your table. The name of each
attribute will become the name of a column, and its value will hold a
description of it.
Ordinary columns can be described using instances of the Col class (see
:ref:`ColClassDescr`). Nested columns can be described by using classes
derived from IsDescription, instances of it, or name-description
dictionaries. Derived classes can be declared in place (in which case the
column takes the name of the class) or referenced by name.
Nested columns can have a _v_pos special attribute which sets the
*relative* position of the column among sibling columns *also having
explicit positions*. The pos constructor argument of Col instances is used
for the same purpose. Columns with no explicit position will be placed
afterwards in alphanumeric order.
Once you have created a description object, you can pass it to the Table
constructor, where all the information it contains will be used to define
the table structure.
.. rubric:: IsDescription attributes
.. attribute:: _v_pos
Sets the position of a possible nested column description among its
sibling columns. This attribute can be specified *when declaring*
an IsDescription subclass to complement its *metadata*.
.. attribute:: columns
Maps the name of each column in the description to its own descriptive
object. This attribute is *automatically created* when an IsDescription
subclass is declared. Please note that declared columns can no longer
be accessed as normal class variables after its creation.
"""
def descr_from_dtype(dtype_, ptparams=None):
"""Get a description instance and byteorder from a (nested) NumPy dtype."""
fields = {}
fbyteorder = '|'
for name in dtype_.names:
dtype, offset = dtype_.fields[name][:2]
kind = dtype.base.kind
byteorder = dtype.base.byteorder
if byteorder in '><=':
if fbyteorder not in ['|', byteorder]:
raise NotImplementedError(
"structured arrays with mixed byteorders "
"are not supported yet, sorry")
fbyteorder = byteorder
# Non-nested column
if kind in 'biufSUc':
col = Col.from_dtype(dtype, pos=offset, _offset=offset)
# Nested column
elif kind == 'V' and dtype.shape in [(), (1,)]:
if dtype.shape != ():
warnings.warn(
"nested descriptions will be converted to scalar")
col, _ = descr_from_dtype(dtype.base, ptparams=ptparams)
col._v_pos = offset
col._v_offset = offset
else:
raise NotImplementedError(
"structured arrays with columns with type description ``%s`` "
"are not supported yet, sorry" % dtype)
fields[name] = col
return Description(fields, ptparams=ptparams), fbyteorder
def dtype_from_descr(descr, byteorder=None, ptparams=None):
"""Get a (nested) NumPy dtype from a description instance and byteorder.
The descr parameter can be a Description or IsDescription
instance, sub-class of IsDescription or a dictionary.
"""
if isinstance(descr, dict):
descr = Description(descr, ptparams=ptparams)
elif (type(descr) == type(IsDescription)
and issubclass(descr, IsDescription)):
descr = Description(descr().columns, ptparams=ptparams)
elif isinstance(descr, IsDescription):
descr = Description(descr.columns, ptparams=ptparams)
elif not isinstance(descr, Description):
raise ValueError('invalid description: %r' % descr)
dtype_ = descr._v_dtype
if byteorder and byteorder != '|':
dtype_ = dtype_.newbyteorder(byteorder)
return dtype_
if __name__ == "__main__":
"""Test code."""
class Info(IsDescription):
_v_pos = 2
Name = UInt32Col()
Value = Float64Col()
class Test(IsDescription):
"""A description that has several columns."""
x = Col.from_type("int32", 2, 0, pos=0)
y = Col.from_kind('float', dflt=1, shape=(2, 3))
z = UInt8Col(dflt=1)
color = StringCol(2, dflt=" ")
# color = UInt32Col(2)
Info = Info()
class info(IsDescription):
_v_pos = 1
name = UInt32Col()
value = Float64Col(pos=0)
y2 = Col.from_kind('float', dflt=1, shape=(2, 3), pos=1)
z2 = UInt8Col(dflt=1)
class info2(IsDescription):
y3 = Col.from_kind('float', dflt=1, shape=(2, 3))
z3 = UInt8Col(dflt=1)
name = UInt32Col()
value = Float64Col()
class info3(IsDescription):
name = UInt32Col()
value = Float64Col()
y4 = Col.from_kind('float', dflt=1, shape=(2, 3))
z4 = UInt8Col(dflt=1)
# class Info(IsDescription):
# _v_pos = 2
# Name = StringCol(itemsize=2)
# Value = ComplexCol(itemsize=16)
# class Test(IsDescription):
# """A description that has several columns"""
# x = Col.from_type("int32", 2, 0, pos=0)
# y = Col.from_kind('float', dflt=1, shape=(2,3))
# z = UInt8Col(dflt=1)
# color = StringCol(2, dflt=" ")
# Info = Info()
# class info(IsDescription):
# _v_pos = 1
# name = StringCol(itemsize=2)
# value = ComplexCol(itemsize=16, pos=0)
# y2 = Col.from_kind('float', dflt=1, shape=(2,3), pos=1)
# z2 = UInt8Col(dflt=1)
# class info2(IsDescription):
# y3 = Col.from_kind('float', dflt=1, shape=(2,3))
# z3 = UInt8Col(dflt=1)
# name = StringCol(itemsize=2)
# value = ComplexCol(itemsize=16)
# class info3(IsDescription):
# name = StringCol(itemsize=2)
# value = ComplexCol(itemsize=16)
# y4 = Col.from_kind('float', dflt=1, shape=(2,3))
# z4 = UInt8Col(dflt=1)
# example cases of class Test
klass = Test()
# klass = Info()
desc = Description(klass.columns)
print("Description representation (short) ==>", desc)
print("Description representation (long) ==>", repr(desc))
print("Column names ==>", desc._v_names)
print("Column x ==>", desc.x)
print("Column Info ==>", desc.Info)
print("Column Info.value ==>", desc.Info.Value)
print("Nested column names ==>", desc._v_nested_names)
print("Defaults ==>", desc._v_dflts)
print("Nested Formats ==>", desc._v_nested_formats)
print("Nested Descriptions ==>", desc._v_nested_descr)
print("Nested Descriptions (info) ==>", desc.info._v_nested_descr)
print("Total size ==>", desc._v_dtype.itemsize)
# check _f_walk
for object in desc._f_walk():
if isinstance(object, Description):
print("******begin object*************", end=' ')
print("name -->", object._v_name)
# print("name -->", object._v_dtype.name)
# print("object childs-->", object._v_names)
# print("object nested childs-->", object._v_nested_names)
print("totalsize-->", object._v_dtype.itemsize)
else:
# pass
print("leaf -->", object._v_name, object.dtype)
class testDescParent(IsDescription):
c = Int32Col()
class testDesc(testDescParent):
pass
assert 'c' in testDesc.columns
| bsd-3-clause |
herilalaina/scikit-learn | examples/plot_missing_values.py | 33 | 3059 | """
======================================================
Imputing missing values before building an estimator
======================================================
This example shows that imputing the missing values can give better
results than discarding the samples containing any missing value.
Imputing does not always improve the predictions, so please check via
cross-validation. Sometimes dropping rows or using marker values is
more effective.
Missing values can be replaced by the mean, the median or the most frequent
value using the ``strategy`` hyper-parameter.
The median is a more robust estimator for data with high magnitude variables
which could dominate results (otherwise known as a 'long tail').
Script output::
Score with the entire dataset = 0.56
Score without the samples containing missing values = 0.48
Score after imputation of the missing values = 0.55
In this case, imputing helps the classifier get close to the original score.
"""
import numpy as np
from sklearn.datasets import load_boston
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.model_selection import cross_val_score
rng = np.random.RandomState(0)
dataset = load_boston()
X_full, y_full = dataset.data, dataset.target
n_samples = X_full.shape[0]
n_features = X_full.shape[1]
# Estimate the score on the entire dataset, with no missing values
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_full, y_full).mean()
print("Score with the entire dataset = %.2f" % score)
# Add missing values in 75% of the lines
missing_rate = 0.75
n_missing_samples = int(np.floor(n_samples * missing_rate))
missing_samples = np.hstack((np.zeros(n_samples - n_missing_samples,
dtype=np.bool),
np.ones(n_missing_samples,
dtype=np.bool)))
rng.shuffle(missing_samples)
missing_features = rng.randint(0, n_features, n_missing_samples)
# Estimate the score without the lines containing missing values
X_filtered = X_full[~missing_samples, :]
y_filtered = y_full[~missing_samples]
estimator = RandomForestRegressor(random_state=0, n_estimators=100)
score = cross_val_score(estimator, X_filtered, y_filtered).mean()
print("Score without the samples containing missing values = %.2f" % score)
# Estimate the score after imputation of the missing values
X_missing = X_full.copy()
X_missing[np.where(missing_samples)[0], missing_features] = 0
y_missing = y_full.copy()
estimator = Pipeline([("imputer", Imputer(missing_values=0,
strategy="mean",
axis=0)),
("forest", RandomForestRegressor(random_state=0,
n_estimators=100))])
score = cross_val_score(estimator, X_missing, y_missing).mean()
print("Score after imputation of the missing values = %.2f" % score)
| bsd-3-clause |
zmr/namsel | recognize.py | 1 | 48961 | #! /usr/bin/python
# encoding: utf-8
'''Primary routines that manage OCR recognition'''
from PIL import Image
from bisect import bisect, bisect_left
import cPickle as pickle
from classify import load_cls
import codecs
from config_manager import Config
from cv2 import drawContours
import cv2 as cv
import datetime
from fast_utils import fadd_padding, ftrim
from feature_extraction import normalize_and_extract_features
from line_breaker import LineCut, LineCluster
import logging
import numpy as np
import os
from page_elements2 import PageElements as PE2
from random import choice
from root_based_finder import is_non_std, word_parts
from segment import Segmenter, combine_many_boxes
import shelve
import signal
import simplejson as json
from sklearn.externals import joblib
import sys
from termset import syllables
from tparser import parse_syllables
from utils import local_file
from viterbi_cython import viterbi_cython
# from viterbi_search import viterbi_search, word_bigram
import warnings
cls = load_cls('logistic-cls')
## Ignore warnings. THis is mostlu in response to incessant sklearn
## warnings about passing in 1d arrays
warnings.filterwarnings("ignore")
print 'ignoring all warnings'
###
rbfcls = load_cls('rbf-cls')
predict_log_proba = cls.predict_log_proba
predict_proba = cls.predict_proba
# Trained characters are labeled by number. Open the shelve that contains
# the mappings between the Unicode character and its number label.
allchars = shelve.open(local_file('allchars_dict2'))
char_to_dig = allchars['allchars']
dig_to_char = allchars['label_chars']
allchars.close()
## Uncomment the line below when enabling viterbi_hidden_tsek
gram3 = pickle.load(open(local_file('3gram_stack_dict.pkl'),'rb'))
word_parts = set(word_parts)
PCA_TRANS = False
trs_prob = np.load(open(local_file('stack_bigram_mat.npz')))
trs_prob = trs_prob[trs_prob.files[0]]
cdmap = pickle.load(open(local_file('extended_char_dig.pkl')))
# HMM data structures
trans_p = np.load(open(local_file('stack_bigram_logprob32.npz')))
trans_p = trans_p[trans_p.files[0]].transpose()
start_p = np.load(open(local_file('stack_start_logprob32.npz')))
start_p = start_p[start_p.files[0]]
start_p_nonlog = np.exp(start_p)
## Uncomment below for syllable bigram
syllable_bigram = pickle.load(open(local_file('syllable_bigram.pkl'), 'rb')) #THIS ONE
def get_trans_prob(stack1, stack2):
try:
return trs_prob[cdmap[stack1], cdmap[stack2]]
except KeyError:
print 'Warning: Transition matrix char-dig map has not been updated with new chars'
return .25
#############################################
### Post-processing functions ###
#############################################
def viterbi(states, start_p, trans_p, emit_prob):
'''A basic viterbi decoder implementation
states: a vector or list of states 0 to n
start_p: a matrix or vector of start probabilities
trans_p: a matrix of transition probabilities
emit_prob: an nxT matrix of per-class output probabilities
where n is the number of states and t is the number
of transitions
'''
V = [{}]
path = {}
for y in states:
V[0][y] = start_p[y] * emit_prob[0][y]
path[y] = [y]
# Run Viterbi for t > 0
for t in range(1,len(emit_prob)):
V.append({})
newpath = {}
for y in states:
(prob, state) = max([(V[t-1][y0] * trans_p[y0][y] * emit_prob[t][y], y0) for y0 in states])
V[t][y] = prob
newpath[y] = path[state] + [y]
path = newpath
(prob, state) = max([(V[len(emit_prob) - 1][y], y) for y in states])
return ''.join(dig_to_char[s] for s in path[state])
def viterbi_hidden_tsek(states, start_p, trans_p, emit_prob):
'''Given a series of recognized characters, infer
likely positions of missing punctuation
Parameters
--------
states: the possible classes that can be assigned to (integer codes of stacks)
start_p: pre-computed starting probabilities of Tibetan syllables
trans_p: pre-computed transition probabilities between Tibetan stacks
emit_prob: matrix of per-class probability for t steps
Returns:
List of possible string candidates with tsek inserted
'''
V = [{}]
path = {}
tsek_dig = char_to_dig[u'་']
# Initialize base cases (t == 0)
for y in states:
V[0][y] = start_p[y] * emit_prob[0][y]
path[y] = [y]
num_obs = len(emit_prob)
# Run Viterbi for t > 0
for t in range(1,num_obs*2-1):
V.append({})
newpath = {}
if t % 2 == 1:
prob_states = []
for y0 in states:
im_path = path.get(y0)
if not im_path:
continue
if len(im_path) > 1:
run_without_tsek = 0
for i in im_path[::-1]:
if i != tsek_dig:
run_without_tsek += 1
else:
break
pr3 = gram3.get(path[y0][-2], {}).get(path[y0][-1],{}).get(tsek_dig,.5)*(1+run_without_tsek*2)
else:
pr3 = .75
try:
prob_states.append((V[t-1][y0]*trans_p[y0][tsek_dig]*pr3, y0))
except:
print '-'*20
print trans_p[y0]
print V[t-1]
print '-'*20
raise
prob, state = max(prob_states)
V[t][tsek_dig] = prob
newpath[tsek_dig] = path[state] + [tsek_dig]
path.update(newpath)
else:
srted = np.argsort(emit_prob[t/2])
for y in srted[-50:]:
#### normal
# prob, state = max([(V[t-2][y0]*trans_p[y0][y]*emit_prob[t/2][y], y0) for y0 in states])
####
#### Experimental
prob_states = []
for y0 in states:
im_path = path.get(y0,[])[-4:] # immediate n-2 in path
t_m2 = V[t-2].get(y0)
if not im_path or not t_m2:
continue
prob_states.append((V[t-2][y0]*trans_p[y0][y]*emit_prob[t/2][y], y0))
if not prob_states:
continue
prob, state = max(prob_states)
tsek_prob, tsek_dig = (V[t-1][tsek_dig]*trans_p[tsek_dig][y]*emit_prob[t/2][y], tsek_dig)
if tsek_prob > prob:
prob = tsek_prob
state = tsek_dig
V[t][y] = prob
newpath[y] = path[state] + [y]
path = newpath
if not V[t].keys():
raise ValueError
(prob, state) = max([(V[t][y], y) for y in V[t].keys()])
(prob, state) = max([(V[len(V)-1][y], y) for y in V[len(V)-1].keys()])
str_perms = _get_tsek_permutations(''.join(dig_to_char[s] for s in path[state]))
return str_perms
def _get_tsek_permutations(tsr):
tsek_count = tsr.count(u'་')
syls = parse_syllables(tsr, omit_tsek=False)
all_candidates = []
if tsek_count > 8:
print 'too many permutations'
return [tsr]
elif tsek_count == 0:
print 'no tsek'
return [tsr]
else:
ops = [['0','1'] for i in range(tsek_count)]
allops = iter(_enumrate_full_paths(ops))
for op in allops:
nstr = []
op = list(op[::-1])
for i in syls:
if i == u'་' :
cur_op = op.pop()
if cur_op == '0':
continue
else:
nstr.append(i)
else:
nstr.append(i)
nstr = ''.join(nstr)
new_parse = parse_syllables(nstr)
for p in new_parse:
if is_non_std(p) and p not in syllables:
print nstr, 'rejected'
break
else:
print nstr, 'accepted'
all_candidates.append(nstr)
if len(all_candidates) == 0:
all_candidates = [tsr]
return all_candidates
def hmm_recognize(segmentation):
'''Only used in speical case where doing tsek-insertion post-process
Parameters:
__________
segmentioatn: a segmentation object
Returns
_______
A tuple (prob, string) corresponding the probability of a
segmented and recognized string, and its probability
'''
nstates = trs_prob.shape[0]
states = range(start_p.shape[0])
obs = []
bxs = []
for num, line in enumerate(segmentation.vectors):
line_boxes = segmentation.new_boxes[num]
for obn, ob in enumerate(line):
if not isinstance(ob, unicode):
obs.append(ob.flatten())
bxs.append(line_boxes[obn])
else:
print ob,
print 'hmm omitting unicode part'
if bxs:
outbox = list(combine_many_boxes(bxs))
else:
print 'RETURNED NONE'
return (0, '')
emit_p = cls.predict_proba(obs)
results = []
syllable = []
for em in emit_p:
char = dig_to_char[np.argmax(em)]
if char in (u'་', u'།'):
if syllable:
prob, res = viterbi_hidden_tsek(states, start_p, trs_prob, syllable)
results.append(res)
results.append(char)
syllable = []
else:
syllable.append(em)
if syllable:
prob, hmm_out = viterbi_hidden_tsek(states, start_p, trs_prob, syllable)
results.append(hmm_out)
else:
prob = 0
hmm_out = ''
results = ''.join(results)
print results, '<---RESULT'
return (prob, results)
def _enumrate_full_paths(tree):
if len(tree) == 1:
return tree[0]
combs = []
frow = tree[-1]
srow = tree[-2]
for s in srow:
for f in frow:
combs.append(s+f)
tree.pop()
tree.pop()
tree.append(combs)
return _enumrate_full_paths(tree)
def bigram_prob(syl_list):
return np.prod([syllable_bigram.get(syl_list[i], {}).get(syl_list[i+1], 1e-5) \
for i in range(len(syl_list) -1 )])
def max_syllable_bigram(choices):
best_prob = 0.0
best_s = ''
for s in choices:
print s, 'is a choice'
if not isinstance(s, list):
s = parse_syllables(s)
prob = bigram_prob(s)
if prob > best_prob:
best_prob = prob
best_s = s
best_s = u'་'.join(best_s)
return best_prob, best_s
def hmm_recognize_bigram(segmentation):
states = range(start_p.shape[0])
obs = []
bxs = []
for num, line in enumerate(segmentation.vectors):
line_boxes = segmentation.new_boxes[num]
for obn, ob in enumerate(line):
if hasattr(ob, 'flatten'):
obs.append(ob.flatten())
bxs.append(line_boxes[obn])
else:
print ob,
print 'hmm omitting unicode part'
if not obs:
return (0, '')
emit_p = cls.predict_proba(obs)
results = []
syllable = []
for em in emit_p:
char = dig_to_char[np.argmax(em)]
if char in (u'་', u'།'):
if syllable:
res = viterbi_hidden_tsek(states, start_p_nonlog, trs_prob, syllable)
results.append(res)
results.append(char)
syllable = []
else:
syllable.append(em)
if syllable:
hmm_out = viterbi_hidden_tsek(states, start_p_nonlog, trs_prob, syllable)
results.append(hmm_out)
else:
prob = 0
hmm_out = ''
all_paths = _enumrate_full_paths(results)
prob, results = max_syllable_bigram(all_paths)
print results, 'RESULTS'
return (prob, results)
#############################################
### Recognizers
#############################################
def recognize_chars(segmentation, tsek_insert_method='baseline', ):
'''Recognize characters using segmented char data
Parameters:
--------------------
segmentation: an instance of PechaCharSegmenter or Segmenter
Returns:
--------------
results: Unicode string containing recognized text'''
results = []
tsek_mean = segmentation.final_box_info.tsek_mean
width_dists = {}
for l, vectors in enumerate(segmentation.vectors):
if not vectors:
print 'no vectors...'
continue
tmp_result = []
new_boxes = segmentation.new_boxes[l]
small_chars = segmentation.line_info.small_cc_lines_chars[l]
#FIXME: define emph lines for line cut
#### Line Cut has no emph_lines object so need to work around for now...
emph_markers = getattr(segmentation.line_info, 'emph_lines', [])
if emph_markers:
emph_markers = emph_markers[l]
img_arr = segmentation.line_info.shapes.img_arr
left_edges = [b[0] for b in new_boxes]
tsek_widths = []
for s in small_chars[::-1]: # consider small char from end of line going backward. backward useful for misplaced tsek often and maybe for TOC though should check
# for s in small_chars: # consider small char from end of line going backward. backward useful for misplaced tsek often and maybe for TOC though should check
cnt = segmentation.line_info.shapes.contours[s]
bx = segmentation.line_info.shapes.get_boxes()[s]
bx = list(bx)
x,y,w,h = bx
char_arr = np.ones((h,w), dtype=np.uint8)
offset = (-x, -y)
drawContours(char_arr, [cnt], -1,0, thickness = -1, offset=offset)
feature_vect = normalize_and_extract_features(char_arr)
prd = classify(feature_vect)
insertion_pos = bisect(left_edges, x)
left_items = 6
right_items = 5
if insertion_pos >= len(new_boxes):
# insertion is at or near end of line and needs more left
# neighbors to compensate for there being less chars to define the baseline
left_items = 12
elif insertion_pos <= len(new_boxes):
# same as above except at front of line
right_items = 12
if tsek_insert_method == 'baseline':
top = 1000000 # arbitrary high number
bottom = 0
#### Get min or max index to avoid reaching beyond edges of the line
lower = max(insertion_pos - left_items, 0)
upper = min(len(new_boxes)-1, insertion_pos+right_items)
####
left = new_boxes[lower][0]
right = new_boxes[upper][0] + new_boxes[upper][2]
if insertion_pos < len(new_boxes):
mid = new_boxes[insertion_pos][0] + new_boxes[insertion_pos][2]
else:
mid = right
for j in new_boxes[lower:upper]:
if j[1] < top:
top = j[1]
if j[1] + j[3] > bottom:
bottom = j[1] + j[3]
local_span = bottom - top
if prd == u'་' and local_span > 0:
left_sum = img_arr[top:bottom,left:mid].sum(axis=1)
right_sum = img_arr[top:bottom,mid:right].sum(axis=1)
local_baseline_left = top + left_sum.argmin()
if mid != right:
local_baseline_right = top + right_sum.argmin()
else:
local_baseline_right = local_baseline_left
if ((local_baseline_left >= bx[1] and local_baseline_left <= bx[1] + bx[3]) or
(local_baseline_right >= bx[1] and local_baseline_right <= bx[1] + bx[3])): #or
# (entire_local_baseline >= bx[1] and entire_local_baseline <= bx[1] + bx[3])):
### Account for fact that the placement of a tsek could be
# before or after its indicated insertion pos
### experimental.. only need with certain fonts e.g. "book 6"
## in samples
if insertion_pos <= len(new_boxes):
# cur_box_in_pos = new_boxes[insertion_pos]
prev_box = new_boxes[insertion_pos-1]
# left_cur = cur_box_in_pos[0]
left_prev = prev_box[0]
if 0 <= x - left_prev < w and 2*w < prev_box[2]:
insertion_pos -= 1
vectors.insert(insertion_pos, prd)
new_boxes.insert(insertion_pos, bx)
left_edges.insert(insertion_pos, bx[0])
tsek_widths.append(bx[2])
elif bx[1] >= top -.25*local_span and bx[1] + bx[3] <= bottom + local_span*.25:
vectors.insert(insertion_pos, prd)
new_boxes.insert(insertion_pos, bx)
left_edges.insert(insertion_pos, bx[0])
else:
vectors.insert(insertion_pos, prd)
new_boxes.insert(insertion_pos, bx)
left_edges.insert(insertion_pos, bx[0])
tsek_mean = np.mean(tsek_widths)
for em in emph_markers:
marker = dig_to_char[segmentation.line_info.shapes.cached_pred_prob[em][0]]
marker_prob = segmentation.line_info.shapes.cached_pred_prob[em][1]
bx = segmentation.line_info.shapes.get_boxes()[em]
bx = list(bx)
x,y,w,h = bx
insertion_pos = bisect(left_edges, x)
bx.append(marker_prob)
bx.append(marker)
vectors.insert(insertion_pos, marker)
new_boxes.insert(insertion_pos, bx)
left_edges.insert(insertion_pos, bx[0])
# tsek_std = np.std(tsek_widths)
if len(vectors) == 1: i = -1
for i, v in enumerate(vectors[:-1]):
if new_boxes[i+1][0] - (new_boxes[i][0] + new_boxes[i][2]) >= 2*tsek_mean:
if not isinstance(v, unicode):
prd = classify(v, pca_trans=PCA_TRANS, multi=False)
else:
prd = v
new_boxes[i].append(prd)
tmp_result.append(new_boxes[i])
tmp_result.append([-1,-1,-1,-1, u' '])
else:
if not isinstance(v, unicode):
prd = classify(v, pca_trans=PCA_TRANS, multi=False)
### Assume that a tsek shouldn't show up at this point
### a more reliable way to do this is to better
# if prd == u'་':
# prbs = cls.predict_proba(v)[0]
# ind_probs = zip(range(len(prbs)), prbs)
# ind_probs.sort(key=lambda x: x[1])
# prd = dig_to_char[ind_probs[-2][0]]
else:
prd = v
if not width_dists.get(prd):
width_dists[prd] = [new_boxes[i][2]]
else:
width_dists[prd].append(new_boxes[i][2])
new_boxes[i].append(prd)
tmp_result.append(new_boxes[i])
if not isinstance(vectors[-1], unicode):
prd = classify(vectors[-1], pca_trans=PCA_TRANS, multi=False)
else:
prd = vectors[-1]
new_boxes[-1].append(prd)
tmp_result.append(new_boxes[-1])
results.append(tmp_result)
return results
def recognize_chars_hmm(segmentation, tsek_insert_method='baseline', ):
'''Recognize characters using segmented char data
Parameters:
--------------------
segmentation: an instance of PechaCharSegmenter or Segmenter
Returns:
--------------
results: list of lists containing [x,y,width, height, prob, unicode], specifying the
coordinates of the bounding box of stack, it probability, and its unicode
characters -- on each line of the page
'''
n_states = trans_p.shape[0]
results = []
tsek_mean = segmentation.final_box_info.tsek_mean
cached_features = segmentation.line_info.shapes.cached_features
cached_pred_prob = segmentation.line_info.shapes.cached_pred_prob
# width_dists = {}
# times = []
for l, vectors in enumerate(segmentation.vectors):
if not vectors:
print 'no vectors...'
continue
tmp_result = []
new_boxes = segmentation.new_boxes[l]
small_chars = segmentation.line_info.small_cc_lines_chars[l]
#FIXME: define emph lines for line cut
#### Line Cut has no emph_lines object so need to work around for now...
emph_markers = getattr(segmentation.line_info, 'emph_lines', [])
if emph_markers:
emph_markers = emph_markers[l]
img_arr = segmentation.line_info.shapes.img_arr
left_edges = [b[0] for b in new_boxes]
tsek_widths = []
for s in small_chars[::-1]: # consider small char from end of line going backward. backward useful for misplaced tsek often and maybe for TOC though should check
bx = segmentation.line_info.shapes.get_boxes()[s]
bx = list(bx)
x,y,w,h = bx
try:
feature_vect = cached_features[s]
inx, probs = cached_pred_prob[s]
prob = probs[inx]
prd = dig_to_char[inx]
# else:
# vect = normalize_and_extract_features(letter)
except:
cnt = segmentation.line_info.shapes.contours[s]
char_arr = np.ones((h,w), dtype=np.uint8)
offset = (-x, -y)
drawContours(char_arr, [cnt], -1,0, thickness = -1, offset=offset)
feature_vect = normalize_and_extract_features(char_arr)
# prd = classify(feature_vect)
prd, prob = prd_prob(feature_vect)
# print prd, max(cls.predict_proba(feature_vect)[0])
insertion_pos = bisect(left_edges, x)
left_items = 6
right_items = 5
if insertion_pos >= len(new_boxes):
left_items = 12
elif insertion_pos <= len(new_boxes):
# same as above except at front of line
right_items = 12
if tsek_insert_method == 'baseline':
top = 1000000 # arbitrary high number
bottom = 0
#### Get min or max index to avoid reaching beyond edges of the line
lower = max(insertion_pos - left_items, 0)
upper = min(len(new_boxes)-1, insertion_pos+right_items)
####
left = new_boxes[lower][0]
right = new_boxes[upper][0] + new_boxes[upper][2]
if insertion_pos < len(new_boxes):
mid = new_boxes[insertion_pos][0] + new_boxes[insertion_pos][2]
else:
mid = right
for j in new_boxes[lower:upper]:
if j[1] < top:
top = j[1]
try:
if j[1] + j[3] > bottom:
bottom = j[1] + j[3]
except IndexError:
print new_boxes[lower:upper]
print j
raise
local_span = bottom - top
left_sum = img_arr[top:bottom,left:mid].sum(axis=1)
right_sum = img_arr[top:bottom,mid:right].sum(axis=1)
try:
local_baseline_left = top + left_sum.argmin()
except:
local_baseline_left = top
if mid != right:
local_baseline_right = top + right_sum.argmin()
else:
local_baseline_right = local_baseline_left
if prd == u'་' and local_span > 0:
if ((local_baseline_left >= bx[1] and local_baseline_left <= bx[1] + bx[3]) or
(local_baseline_right >= bx[1] and local_baseline_right <= bx[1] + bx[3])) or (insertion_pos == len(vectors)): #or
if insertion_pos <= len(new_boxes):
prev_box = new_boxes[insertion_pos-1]
left_prev = prev_box[0]
if 0 <= x - left_prev < w and 2*w < prev_box[2]:
insertion_pos -= 1
new_boxes.insert(insertion_pos, bx)
bx.append(prob)
bx.append(prd)
vectors.insert(insertion_pos, bx)
left_edges.insert(insertion_pos, bx[0])
tsek_widths.append(bx[2])
elif ((bx[1] >= top -.25*local_span and bx[1] + bx[3] <=
bottom + local_span*.25) or
(insertion_pos == len(vectors))) and bx[1] - local_baseline_left < 2*tsek_mean:
vectors.insert(insertion_pos, prd)
new_boxes.insert(insertion_pos, bx)
new_boxes[insertion_pos].append(prob)
new_boxes[insertion_pos].append(prd)
left_edges.insert(insertion_pos, bx[0])
else:
print 'small contour reject at', l, s, 'local height span', local_span, 'box height', bx[3]
else:
vectors.insert(insertion_pos, prd)
new_boxes.insert(insertion_pos, bx)
new_boxes[insertion_pos].append(prob)
new_boxes[insertion_pos].append(prd)
left_edges.insert(insertion_pos, bx[0])
for em in emph_markers:
mkinx = segmentation.line_info.shapes.cached_pred_prob[em][0]
marker = dig_to_char[mkinx]
marker_prob = segmentation.line_info.shapes.cached_pred_prob[em][1][mkinx]
bx = segmentation.line_info.shapes.get_boxes()[em]
bx = list(bx)
x,y,w,h = bx
insertion_pos = bisect(left_edges, x)
vectors.insert(insertion_pos, marker)
bx.append(marker_prob)
bx.append(marker)
new_boxes.insert(insertion_pos, bx)
left_edges.insert(insertion_pos, bx[0])
if len(vectors) == 1: i = -1
skip_next_n = 0
###HMM PHASE
allstrs = []
curstr = []
allinx = []
curinx = []
for j, v in enumerate(vectors):
islist = isinstance(v, list)
if isinstance(v, unicode) or islist:
allstrs.append(curstr)
allinx.append(curinx)
curstr = []
curinx = []
else:
curstr.append(v)
curinx.append(j)
if curstr:
allstrs.append(curstr)
allinx.append(curinx)
for f, group in enumerate(allstrs):
if not group: continue
try:
probs = predict_log_proba(group)
except:
print v,
# raise
LPROB = len(probs)
if LPROB == 1:
inx = probs[0].argmax()
prb = probs[0][inx]
prds = [inx]
else:
probs = probs.astype(np.float32)
prb, prds = viterbi_cython(LPROB, n_states, start_p, trans_p, probs)
prb = np.exp(prb)
inx = allinx[f]
for vv, c in enumerate(range(len(prds))):
ind = inx[c]
cprob = probs[c].max()
#######replace low prob stacks using svm rbf classifier
####### warning: this may undo decisions made by hmm classifier
# if np.exp(cprob) <= .98:
# # print prds, type(prds)
# print 'replacing', dig_to_char[prds[c]], 'with',
# prds[c] = rbfcls.predict(group[vv])[0]
# # print prds, type(prds)
# # print prds[c]
# print dig_to_char[prds[c]]
# print
#######################
new_boxes[ind].append(np.exp(cprob))
try:
new_boxes[ind].append(dig_to_char[prds[c]])
except KeyError:
new_boxes[ind].append('PROB')
for ind, b in enumerate(new_boxes):
tmp_result.append(new_boxes[ind])
if not len(new_boxes[ind]) == 6:
print l, ind, new_boxes[ind], '<-----'
if ind + 1 < len(new_boxes) and new_boxes[ind+1][0] - (new_boxes[ind][0] + new_boxes[ind][2]) >= 1.5*tsek_mean:
tmp_result.append([-1,-1,-1,-1, 1.0, u' '])
results.append(tmp_result)
return results
def recognize_chars_probout(segmentation, tsek_insert_method='baseline', ):
'''Recognize characters using segmented char data
Parameters:
--------------------
segmentation: an instance of PechaCharSegmenter or Segmenter
Returns:
--------------
results: list of lists containing [x,y,width, height, prob, unicode], specifying the
coordinates of the bounding box of stack, it probability, and its unicode
characters -- on each line of the page'''
results = []
tsek_mean = segmentation.final_box_info.tsek_mean
cached_features = segmentation.line_info.shapes.cached_features
cached_pred_prob = segmentation.line_info.shapes.cached_pred_prob
for l, vectors in enumerate(segmentation.vectors):
if not vectors:
print 'no vectors...'
continue
tmp_result = []
new_boxes = segmentation.new_boxes[l]
scale_w = segmentation.final_box_info.transitions[l]
small_chars = segmentation.line_info.small_cc_lines_chars[l]
#FIXME: define emph lines for line cut
#### Line Cut has no emph_lines object so need to work around for now...
emph_markers = getattr(segmentation.line_info, 'emph_lines', [])
if emph_markers:
emph_markers = emph_markers[l]
img_arr = segmentation.line_info.shapes.img_arr
left_edges = [b[0] for b in new_boxes]
tsek_widths = []
for s in small_chars[::-1]: # consider small char from end of line going backward. backward useful for misplaced tsek often and maybe for TOC though should check
bx = segmentation.line_info.shapes.get_boxes()[s]
bx = list(bx)
x,y,w,h = bx
try:
feature_vect = cached_features[s]
inx, probs = cached_pred_prob[s]
prob = probs[inx]
prd = dig_to_char[inx]
except:
cnt = segmentation.line_info.shapes.contours[s]
char_arr = np.ones((h,w), dtype=np.uint8)
offset = (-x, -y)
drawContours(char_arr, [cnt], -1,0, thickness = -1, offset=offset)
feature_vect = normalize_and_extract_features(char_arr)
prd, prob = prd_prob(feature_vect)
insertion_pos = bisect(left_edges, x)
left_items = 6
right_items = 5
if insertion_pos >= len(new_boxes):
# insertion is at or near end of line and needs more left
# neighbors to compensate for there being less chars to define the baseline
left_items = 12
elif insertion_pos <= len(new_boxes):
# same as above except at front of line
right_items = 12
# right_items = 5 # bias slightly toward the left.
if tsek_insert_method == 'baseline':
top = 1000000 # arbitrary high number
bottom = 0
#### Get min or max index to avoid reaching beyond edges of the line
lower = max(insertion_pos - left_items, 0)
upper = min(len(new_boxes)-1, insertion_pos+right_items)
left = new_boxes[lower][0]
right = new_boxes[upper][0] + new_boxes[upper][2]
if insertion_pos < len(new_boxes):
mid = new_boxes[insertion_pos][0] + new_boxes[insertion_pos][2]
else:
mid = right
for j in new_boxes[lower:upper]:
if j[1] < top:
top = j[1]
if j[1] + j[3] > bottom:
bottom = j[1] + j[3]
local_span = bottom - top
top, bottom, left, right, mid = [int(np.round(ff)) for ff in [top, bottom, left, right, mid]]
if prd == u'་' and local_span > 0:
left_sum = img_arr[top:bottom,left:mid].sum(axis=1)
right_sum = img_arr[top:bottom,mid:right].sum(axis=1)
local_baseline_left = top + left_sum.argmin()
if mid != right:
local_baseline_right = top + right_sum.argmin()
else:
local_baseline_right = local_baseline_left
if ((local_baseline_left >= bx[1] and local_baseline_left <= bx[1] + bx[3]) or
(local_baseline_right >= bx[1] and local_baseline_right <= bx[1] + bx[3])) or (insertion_pos == len(vectors)): #or
# (entire_local_baseline >= bx[1] and entire_local_baseline <= bx[1] + bx[3])):
### Account for fact that the placement of a tsek could be
# before or after its indicated insertion pos
### experimental.. only need with certain fonts e.g. "book 6"
## in samples
if insertion_pos <= len(new_boxes):
prev_box = new_boxes[insertion_pos-1]
left_prev = prev_box[0]
if 0 <= x - left_prev < w and 2*w < prev_box[2]:
insertion_pos -= 1
vectors.insert(insertion_pos, prd)
new_boxes.insert(insertion_pos, bx)
new_boxes[insertion_pos].append(prob)
new_boxes[insertion_pos].append(prd)
left_edges.insert(insertion_pos, bx[0])
tsek_widths.append(bx[2])
elif (bx[1] >= top -.25*local_span and bx[1] + bx[3] <= bottom + local_span*.25) or (insertion_pos == len(vectors)):
vectors.insert(insertion_pos, prd)
new_boxes.insert(insertion_pos, bx)
new_boxes[insertion_pos].append(prob)
new_boxes[insertion_pos].append(prd)
left_edges.insert(insertion_pos, bx[0])
else:
vectors.insert(insertion_pos, prd)
new_boxes.insert(insertion_pos, bx)
new_boxes[insertion_pos].append(prob)
new_boxes[insertion_pos].append(prd)
left_edges.insert(insertion_pos, bx[0])
for em in emph_markers:
bx = segmentation.line_info.shapes.get_boxes()[em]
mkinx = segmentation.line_info.shapes.cached_pred_prob[em][0]
marker = dig_to_char[mkinx]
marker_prob = segmentation.line_info.shapes.cached_pred_prob[em][1][mkinx]
bx = list(bx)
x,y,w,h = bx
bx.append(marker_prob)
bx.append(marker)
insertion_pos = bisect(left_edges, x)
vectors.insert(insertion_pos, marker)
new_boxes.insert(insertion_pos, bx)
left_edges.insert(insertion_pos, bx[0])
if len(vectors) == 1: i = -1
skip_next_n = 0
for i, v in enumerate(vectors[:-1]):
if skip_next_n:
skip_next_n -= 1
continue
if new_boxes[i+1][0] - (new_boxes[i][0] + new_boxes[i][2]) >= 2*tsek_mean:
if not len(new_boxes[i]) == 6 and not isinstance(v, unicode):
prd, prob = prd_prob(v)
else:
if len(new_boxes[i]) == 6:
prob, prd = new_boxes[i][4:]
else:
## v is unicode stack, likely from segmentation step
prd = v
prob = .95 # NEED ACTUAL PROB
new_boxes[i].append(prob)
new_boxes[i].append(prd)
tmp_result.append(new_boxes[i])
tmp_result.append([-1,-1,-1,-1, 1.0, u' '])
else:
if hasattr(v, 'dtype'):
try:
prd, prob = prd_prob(v)
except:
print v
new_boxes[i].append(prob)
new_boxes[i].append(prd)
else:
if len(new_boxes[i]) == 6:
prob, prd = new_boxes[i][4:]
else:
prd = v
if len(new_boxes[i]) < 6:
try:
new_boxes[i].append(prob)
except:
new_boxes[i].append(1)
new_boxes[i].append(prd)
tmp_result.append(new_boxes[i])
if hasattr(vectors[-1], 'dtype'):
prd, prob = prd_prob(vectors[-1])
new_boxes[-1].append(prob)
new_boxes[-1].append(prd)
tmp_result.append(new_boxes[-1])
results.append(tmp_result)
return results
def viterbi_post_process(img_arr, results):
'''Go through all results and attempts to correct invalid syllables'''
final = [[] for i in range(len(results))]
for i, line in enumerate(results):
syllable = []
for j, char in enumerate(line):
if char[-1] in u'་། ' or not word_parts.intersection(char[-1]) or j == len(line)-1:
if syllable:
syl_str = ''.join(s[-1] for s in syllable)
if is_non_std(syl_str) and syl_str not in syllables:
print syl_str, 'HAS PROBLEMS. TRYING TO FIX'
bx = combine_many_boxes([ch[0:4] for ch in syllable])
bx = list(bx)
arr = img_arr[bx[1]:bx[1]+bx[3], bx[0]:bx[0]+bx[2]]
arr = fadd_padding(arr, 3)
try:
prob, hmm_res = main(arr, Config(line_break_method='line_cut', page_type='book', postprocess=False, viterbi_postprocess=True, clear_hr=False), page_info={'flname':''})
except TypeError:
print 'HMM run exited with an error.'
prob = 0
hmm_res = ''
# corrections[syl_str].append(hmm_res)
logging.info(u'VPP Correction: %s\t%s' % (syl_str, hmm_res))
if prob == 0 and hmm_res == '':
print 'hit problem. using unmodified output'
for s in syllable:
final[i].append(s)
else:
bx.append(prob)
bx.append(hmm_res)
final[i].append(bx)
else:
for s in syllable:
final[i].append(s)
final[i].append(char)
syllable = []
else:
syllable.append(char)
if syllable:
for s in syllable:
final[i].append(s)
return final
def main(page_array, conf=Config(viterbi_postprocess=False, line_break_method = None, page_type = None), retries=0,
text=False, page_info={}):
'''Main procedure for processing a page from start to finish
Parameters:
--------------------
page_array: a 2 dimensional numpy array containing binary pixel data of
the image
page_info: dictionary, optional
A dictionary containing metadata about the page to be recognized.
Define strings for the keywords "flname" and "volume" if saving
a serialized copy of the OCR results.
retries: Used internally when system attempts to reboot a failed attempt
text: boolean flag. If true, return text rather than char-position data
Returns:
--------------
text: str
Recognized text for entire page
if text=False, return character position and label data as a python dictionary
'''
print page_info.get('flname','')
confpath = conf.path
conf = conf.conf
line_break_method = conf['line_break_method']
page_type = conf['page_type']
### Set the line_break method automatically if it hasn't been
### specified beforehand
if not line_break_method and not page_type:
if page_array.shape[1] > 2*page_array.shape[0]:
print 'setting page type as pecha'
line_break_method = 'line_cluster'
page_type = 'pecha'
else:
print 'setting page type as book'
line_break_method = 'line_cut'
page_type = 'book'
conf['page_type'] = page_type
conf['line_break_method'] = line_break_method
detect_o = conf.get('detect_o', False)
print 'clear hr', conf.get('clear_hr', False)
results = []
out = u''
try:
### Get information about the pages
shapes = PE2(page_array, cls, page_type=page_type,
low_ink=conf['low_ink'],
flpath=page_info.get('flname',''),
detect_o=detect_o,
clear_hr = conf.get('clear_hr', False))
shapes.conf = conf
### Separate the lines on a page
if page_type == 'pecha':
k_groups = shapes.num_lines
shapes.viterbi_post = conf['viterbi_postprocess']
if line_break_method == 'line_cut':
line_info = LineCut(shapes)
if not line_info: # immediately skip to re-run with LineCluster
sys.exit()
elif line_break_method == 'line_cluster':
line_info = LineCluster(shapes, k=k_groups)
### Perform segmentation of characters
segmentation = Segmenter(line_info)
###Perform recognition
if not conf['viterbi_postprocess']:
if conf['recognizer'] == 'probout':
results = recognize_chars_probout(segmentation)
elif conf['recognizer'] == 'hmm':
results = recognize_chars_hmm(segmentation, trans_p, start_p)
elif conf['recognizer'] == 'kama':
results = recognize_chars_probout(segmentation)
results = recognize_chars_kama(results, segmentation)
if conf['postprocess']:
results = viterbi_post_process(segmentation.line_info.shapes.img_arr, results)
else: # Should only be call from *within* a non viterbi run...
prob, results = hmm_recognize_bigram(segmentation)
return prob, results
### Construct an output string
output = []
for n, line in enumerate(results):
for m,k in enumerate(line):
# if isinstance(k[-1], int):
# print n,m,k
# page_array[k[1]:k[1]+k[3], k[0]:k[0]+k[2]] = 0
# Image.fromarray(page_array*255).show()
output.append(k[-1])
output.append(u'\n')
out = ''.join(output)
print out
if text:
results = out
return results
except:
### Retry and assume the error was cause by use of the
### wrong line_break_method...
import traceback;traceback.print_exc()
if not results and not conf['viterbi_postprocess']:
print 'WARNING', '*'*40
print page_info['flname'], 'failed to return a result.'
print 'WARNING', '*'*40
print
if line_break_method == 'line_cut' and retries < 1:
print 'retrying with line_cluster instead of line_cut'
try:
return main(page_array, conf=Config(path=confpath, line_break_method='line_cluster', page_type='pecha'), page_info=page_info, retries = 1, text=text)
except:
logging.info('Exited after failure of second run.')
return []
if not conf['viterbi_postprocess']:
if not results:
logging.info('***** No OCR output for %s *****' % page_info['flname'])
return results
def run_main(fl, conf=None, text=False):
'''Helper function to do recognition'''
if not conf:
# conf = Config(low_ink=False, segmenter='stochastic', recognizer='hmm',
# break_width=2.0, page_type='pecha', line_break_method='line_cluster',
# line_cluster_pos='center', postprocess=False, detect_o=False,
# clear_hr = False)
#
conf = Config(segmenter='stochastic', recognizer='hmm', break_width=2.5,
line_break_method='line_cut', postprocess=False,
low_ink=False, stop_line_cut=False, clear_hr=True,
detect_o=False)
return main(np.asarray(Image.open(fl).convert('L'))/255, conf=conf,
page_info={'flname':os.path.basename(fl), 'volume': VOL},
text=text)
if __name__ == '__main__':
fls = ['/Users/zach/random-tibetan-tiff.tif']
lbmethod = 'line_cluster'
page_type = 'pecha'
VOL = 'single_volumes'
def run_main(fl):
try:
return main(np.asarray(Image.open(fl).convert('L'))/255,
conf=Config(break_width=2.5, recognizer='hmm',
segmenter='stochastic', page_type='pecha',
line_break_method='line_cluster'),
page_info={'flname':fl, 'volume': VOL})
except:
return []
import datetime
start = datetime.datetime.now()
print 'starting'
outfile = codecs.open('/home/zr/latest-ocr-outfile.txt', 'w', 'utf-8')
for fl in fls:
#### line cut
# ret = main((np.asarray(Image.open(fl).convert('L'))/255),
# conf=Config(break_width=2., recognizer='probout',
# segmenter='stochastic', line_break_method='line_cut',
# postprocess=False, stop_line_cut=False, low_ink=False, clear_hr=True),
# page_info={'flname':fl, 'volume': VOL}, text=True)
#### line cluster
ret = main((np.asarray(Image.open(fl).convert('L'))/255),
conf=Config(segmenter='stochastic', recognizer='hmm',
break_width=2.0, page_type='pecha',
line_break_method='line_cluster',
line_cluster_pos='center', postprocess=False,
detect_o=False, low_ink=False, clear_hr=True),
page_info={'flname':fl, 'volume': VOL}, text=True)
outfile.write(ret)
outfile.write('\n\n')
print datetime.datetime.now() - start, 'time taken'
| mit |
Lab41/pelops | pelops/datasets/dgcars.py | 3 | 1715 | import collections
import json
import os.path
import pelops.datasets.chip as chip
import pelops.utils as utils
class DGCarsDataset(chip.ChipDataset):
filenames = collections.namedtuple(
"filenames",
[
"all_list",
"train_list",
"test_list",
]
)
filepaths = filenames(
"allFiles",
"training",
"testing",
)
def __init__(self, dataset_path, set_type=None):
super().__init__(dataset_path, set_type)
self.__set_filepaths() # set self.__filepaths
self.__set_chips()
def __set_filepaths(self):
self.__filepaths = self.filenames(
os.path.join(self.dataset_path, DGCarsDataset.filepaths.all_list),
os.path.join(self.dataset_path, DGCarsDataset.filepaths.train_list),
os.path.join(self.dataset_path, DGCarsDataset.filepaths.test_list),
)
def __set_chips(self):
# identify all the chips, default query to all
name_filepath = {
utils.SetType.ALL: self.__filepaths.all_list,
utils.SetType.TEST: self.__filepaths.test_list,
utils.SetType.TRAIN: self.__filepaths.train_list,
}.get(self.set_type, self.__filepaths.all_list)
# create chip objects based on the names listed in the files
for dg_chip in utils.read_json(name_filepath):
filepath = os.path.normpath(os.path.join(self.dataset_path, dg_chip["filename"]))
car_id = None
cam_id = None
time = None
misc = dg_chip
current_chip = chip.Chip(filepath, car_id, cam_id, time, misc)
self.chips[filepath] = current_chip
| apache-2.0 |
pytroll/pyresample | pyresample/resampler.py | 1 | 14848 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019-2021 Pyresample developers
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
"""Base resampler class made for subclassing."""
from __future__ import annotations
import logging
import os
from functools import lru_cache, partial
from numbers import Number
from typing import Union
import numpy as np
from .slicer import _enumerate_chunk_slices, create_slicer
try:
import dask
import dask.array as da
from dask.highlevelgraph import HighLevelGraph
except ImportError:
da = None
try:
import xarray as xr
except ImportError:
xr = None
from pyresample.geometry import (
AreaDefinition,
CoordinateDefinition,
IncompatibleAreas,
SwathDefinition,
)
from .future.resamplers.resampler import hash_dict
logger = logging.getLogger(__name__)
class BaseResampler:
"""Base abstract resampler class."""
def __init__(self,
source_geo_def: Union[SwathDefinition, AreaDefinition],
target_geo_def: Union[CoordinateDefinition, AreaDefinition],
):
"""Initialize resampler with geolocation information.
Args:
source_geo_def:
Geolocation definition for the data to be resampled
target_geo_def:
Geolocation definition for the area to resample data to.
"""
self.source_geo_def = source_geo_def
self.target_geo_def = target_geo_def
def get_hash(self, source_geo_def=None, target_geo_def=None, **kwargs):
"""Get hash for the current resample with the given *kwargs*."""
if source_geo_def is None:
source_geo_def = self.source_geo_def
if target_geo_def is None:
target_geo_def = self.target_geo_def
the_hash = source_geo_def.update_hash()
target_geo_def.update_hash(the_hash)
hash_dict(kwargs, the_hash)
return the_hash.hexdigest()
def precompute(self, **kwargs):
"""Do the precomputation.
This is an optional step if the subclass wants to implement more
complex features like caching or can share some calculations
between multiple datasets to be processed.
"""
return None
def compute(self, data, **kwargs):
"""Do the actual resampling.
This must be implemented by subclasses.
"""
raise NotImplementedError
def resample(self, data, cache_dir=None, mask_area=None, **kwargs):
"""Resample `data` by calling `precompute` and `compute` methods.
Only certain resampling classes may use `cache_dir` and the `mask`
provided when `mask_area` is True. The return value of calling the
`precompute` method is passed as the `cache_id` keyword argument
of the `compute` method, but may not be used directly for caching. It
is up to the individual resampler subclasses to determine how this
is used.
Args:
data (xarray.DataArray): Data to be resampled
cache_dir (str): directory to cache precomputed results
(default False, optional)
mask_area (bool): Mask geolocation data where data values are
invalid. This should be used when data values
may affect what neighbors are considered valid.
Returns (xarray.DataArray): Data resampled to the target area
"""
# default is to mask areas for SwathDefinitions
if mask_area is None and isinstance(
self.source_geo_def, SwathDefinition):
mask_area = True
if mask_area:
if isinstance(self.source_geo_def, SwathDefinition):
geo_dims = self.source_geo_def.lons.dims
else:
geo_dims = ('y', 'x')
flat_dims = [dim for dim in data.dims if dim not in geo_dims]
if np.issubdtype(data.dtype, np.integer):
kwargs['mask'] = data == data.attrs.get('_FillValue', np.iinfo(data.dtype.type).max)
else:
kwargs['mask'] = data.isnull()
kwargs['mask'] = kwargs['mask'].all(dim=flat_dims)
cache_id = self.precompute(cache_dir=cache_dir, **kwargs)
return self.compute(data, cache_id=cache_id, **kwargs)
def _create_cache_filename(self, cache_dir=None, prefix='',
fmt='.zarr', **kwargs):
"""Create filename for the cached resampling parameters."""
cache_dir = cache_dir or '.'
hash_str = self.get_hash(**kwargs)
return os.path.join(cache_dir, prefix + hash_str + fmt)
def resample_blocks(func, src_area, src_arrays, dst_area,
dst_arrays=(), chunk_size=None, dtype=None, name=None, fill_value=None, **kwargs):
"""Resample dask arrays blockwise.
Resample_blocks applies a function blockwise to transform data from a source
area domain to a destination area domain.
Args:
func: A callable to apply on the input data. This function is passed a block of src_arrays,
dst_arrays in that order, followed by the kwargs, which include the fill_value. If the callable accepts a
`block_info` keyword argument, block information is passed to it. Block information provides the source
area, destination area, position of source and destination blocks relative to respectively `src_area` and
`dst_area`.
src_area: a source geo definition.
dst_area: a destination geo definition. If the same as the source definition, a ValueError is raised.
src_arrays: data to use. When split into smaller bit to pass to func, they are split across the x and y
dimensions, but not across the other dimensions, so all the dimensions of the smaller arrays will be using
only one chunk!
dst_arrays: arrays to use that are already in dst_area space. If the array has more than 2 dimensions,
the last two are expected to be y, x.
chunk_size: the chunks size(s) to use in the dst_area space. This has to be provided since it is not guaranteed
that we can get this information from the other arguments. Moreover, this needs to be an iterable of k
elements if the resulting array of func is to have a different number of dimensions (k) than the input
array.
dtype: the dtype the resulting array is going to have. Has to be provided.
kwargs: any other keyword arguments that will be passed on to func.
Principle of operations:
Resample_blocks works by iterating over chunks on the dst_area domain. For each chunk, the corresponding slice
of the src_area domain is computed and the input src_arrays are cut accordingly to pass to func. To know more
about how the slicing is performed, refer to the :class:Slicer class and subclasses.
Examples:
To generate indices from the gradient resampler, you can apply the corresponding function with no input. Note
how we provide the chunk sizes knowing that the result array with have 2 elements along a third dimension.
>>> indices_xy = resample_blocks(gradient_resampler_indices, source_geo_def, [], target_geo_def,
... chunk_size=(2, "auto", "auto"), dtype=float)
From these indices, to resample an array using bilinear interpolation:
>>> resampled = resample_blocks(block_bilinear_interpolator, source_geo_def, [src_array], target_geo_def,
... dst_arrays=[indices_xy],
... chunk_size=("auto", "auto"), dtype=src_array.dtype)
"""
if dst_area == src_area:
raise ValueError("Source and destination areas are identical."
" Should you be running `map_blocks` instead of `resample_blocks`?")
name = _create_dask_name(name, func,
src_area, src_arrays,
dst_area, dst_arrays,
fill_value, dtype, chunk_size, kwargs)
dask_graph = dict()
dependencies = []
fill_value = _make_fill_value(fill_value, dtype)
dst_chunks, output_shape = _normalize_chunks_for_area(dst_area, chunk_size, dtype)
for dst_block_info, dst_area_chunk in _enumerate_dst_area_chunks(dst_area, dst_chunks):
position = dst_block_info["chunk-location"]
dst_block_info["shape"] = output_shape
try:
cropped_src_arrays, cropped_src_area, src_block_info = crop_data_around_area(src_area, src_arrays,
dst_area_chunk)
_check_resolution_mismatch(cropped_src_area, dtype)
except IncompatibleAreas: # no relevant data matching
task = (np.full, dst_block_info["chunk-shape"], fill_value)
src_dependencies = []
else:
task, src_dependencies = _create_task(func,
cropped_src_arrays, src_block_info,
dst_arrays, dst_block_info,
position,
fill_value, kwargs)
dask_graph[(name, *position)] = task
dependencies.extend(src_dependencies)
dependencies.extend(dst_arrays)
dask_graph = HighLevelGraph.from_collections(name, dask_graph, dependencies=dependencies)
return da.Array(dask_graph, name, chunks=dst_chunks, dtype=dtype, shape=output_shape)
def _create_dask_name(name, func, src_area, src_arrays, dst_area, dst_arrays, fill_value, dtype, chunks, kwargs):
if name is not None:
name = f"{name}"
else:
from dask.base import tokenize
from dask.utils import funcname
token = tokenize(func, hash(src_area), *src_arrays, hash(dst_area), *dst_arrays,
fill_value, dtype, chunks, **kwargs)
name = f"{funcname(func)}-{token}"
return name
def _make_fill_value(fill_value, dtype):
if fill_value is None:
if np.issubdtype(dtype, np.integer):
fill_value = np.iinfo(dtype).min
else:
fill_value = np.nan
return fill_value
def _check_resolution_mismatch(src_area_crop, dtype):
res_chunks, _ = _normalize_chunks_for_area(src_area_crop, dask.config.get('array.chunk-size', '128MiB'),
dtype)
if len(res_chunks[0]) * len(res_chunks[1]) >= 4:
logger.warning("The input area chunks are large. "
"This usually means that the input area is of much higher resolution than the output "
"area. You can reduce the chunks passed, and ponder whether you are using the right "
"resampler for the job.")
def _create_task(func, smaller_src_arrays, src_block_info, dst_arrays, dst_block_info, position, fill_value,
kwargs):
"""Create a task for resample_blocks."""
from dask.utils import has_keyword
dependencies = []
args = []
for smaller_data in smaller_src_arrays:
args.append((smaller_data.name, *([0] * smaller_data.ndim)))
dependencies.append(smaller_data)
for dst_array in dst_arrays:
dst_position = [0] * (dst_array.ndim - 2) + list(position[-2:])
args.append((dst_array.name, *dst_position))
func_kwargs = kwargs.copy()
func_kwargs['fill_value'] = fill_value
if has_keyword(func, "block_info"):
func_kwargs["block_info"] = {0: src_block_info,
None: dst_block_info}
pfunc = partial(func, **func_kwargs)
task = (pfunc, *args)
return task, dependencies
def crop_data_around_area(source_geo_def, src_arrays, target_geo_def):
"""Crop the data around the provided area."""
small_source_geo_def, x_slice, y_slice = crop_source_area(source_geo_def, target_geo_def)
smaller_src_arrays = []
for data in src_arrays:
smaller_src_arrays.append(data[..., y_slice, x_slice].rechunk([-1] * data.ndim))
block_info = {"shape": source_geo_def.shape,
"array-location": (y_slice, x_slice),
"area": small_source_geo_def}
return smaller_src_arrays, small_source_geo_def, block_info
@lru_cache
def crop_source_area(source_geo_def, target_geo_def):
"""Crop a source area around the provided target area."""
slicer = create_slicer(source_geo_def, target_geo_def)
x_slice, y_slice = slicer.get_slices()
small_source_geo_def = source_geo_def[y_slice, x_slice]
if isinstance(small_source_geo_def, SwathDefinition):
small_source_geo_def.lons.data = small_source_geo_def.lons.data.rechunk((-1, -1))
small_source_geo_def.lats.data = small_source_geo_def.lats.data.rechunk((-1, -1))
return small_source_geo_def, x_slice, y_slice
def _enumerate_dst_area_chunks(dst_area, dst_chunks):
"""Enumerate the chunks in function of the dst_area."""
for position, slices in _enumerate_chunk_slices(dst_chunks):
chunk_shape = tuple(chunk[pos] for pos, chunk in zip(position, dst_chunks))
target_geo_def = dst_area[slices[-2:]]
block_info = {"num-chunks": [len(chunk) for chunk in dst_chunks],
"chunk-location": position,
"array-location": slices,
"chunk-shape": chunk_shape,
"area": target_geo_def,
}
yield block_info, target_geo_def
def _normalize_chunks_for_area(area, chunk_size, dtype):
rest_shape = []
if not isinstance(chunk_size, (Number, str)) and len(chunk_size) > len(area.shape):
rest_chunks = chunk_size[:-len(area.shape)]
for elt in rest_chunks:
try:
rest_shape.append(sum(elt))
except TypeError:
rest_shape.append(elt)
output_shape = tuple(rest_shape) + area.shape
dst_chunks = da.core.normalize_chunks(chunk_size, output_shape, dtype=dtype)
return dst_chunks, output_shape
| lgpl-3.0 |
keras-team/keras-io | examples/nlp/text_classification_with_switch_transformer.py | 1 | 12277 | """
Title: Text classification with Switch Transformer
Author: [Khalid Salama](https://www.linkedin.com/in/khalid-salama-24403144/)
Date created: 2020/05/10
Last modified: 2021/02/15
Description: Implement a Switch Transformer for text classification.
"""
"""
## Introduction
This example demonstrates the implementation of the
[Switch Transformer](https://arxiv.org/abs/2101.03961) model for text
classification.
The Switch Transformer replaces the feedforward network (FFN) layer in the standard
Transformer with a Mixture of Expert (MoE) routing layer, where each expert operates
independently on the tokens in the sequence. This allows increasing the model size without
increasing the computation needed to process each example.
Note that, for training the Switch Transformer efficiently, data and model parallelism
need to be applied, so that expert modules can run simultaneously, each on its own accelerator.
While the implementation described in the paper uses the
[TensorFlow Mesh](https://github.com/tensorflow/mesh) framework for distributed training,
this example presents a simple, non-distributed implementation of the Switch Transformer
model for demonstration purposes.
"""
"""
## Setup
"""
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
"""
## Download and prepare dataset
"""
vocab_size = 20000 # Only consider the top 20k words
num_tokens_per_example = 200 # Only consider the first 200 words of each movie review
(x_train, y_train), (x_val, y_val) = keras.datasets.imdb.load_data(num_words=vocab_size)
print(len(x_train), "Training sequences")
print(len(x_val), "Validation sequences")
x_train = keras.preprocessing.sequence.pad_sequences(
x_train, maxlen=num_tokens_per_example
)
x_val = keras.preprocessing.sequence.pad_sequences(x_val, maxlen=num_tokens_per_example)
"""
## Define hyperparameters
"""
embed_dim = 32 # Embedding size for each token.
num_heads = 2 # Number of attention heads
ff_dim = 32 # Hidden layer size in feedforward network.
num_experts = 10 # Number of experts used in the Switch Transformer.
batch_size = 50 # Batch size.
learning_rate = 0.001 # Learning rate.
dropout_rate = 0.25 # Dropout rate.
num_epochs = 3 # Number of epochs.
num_tokens_per_batch = (
batch_size * num_tokens_per_example
) # Total number of tokens per batch.
print(f"Number of tokens per batch: {num_tokens_per_batch}")
"""
## Implement token & position embedding layer
It consists of two seperate embedding layers, one for tokens, one for token index (positions).
"""
class TokenAndPositionEmbedding(layers.Layer):
def __init__(self, maxlen, vocab_size, embed_dim):
super(TokenAndPositionEmbedding, self).__init__()
self.token_emb = layers.Embedding(input_dim=vocab_size, output_dim=embed_dim)
self.pos_emb = layers.Embedding(input_dim=maxlen, output_dim=embed_dim)
def call(self, x):
maxlen = tf.shape(x)[-1]
positions = tf.range(start=0, limit=maxlen, delta=1)
positions = self.pos_emb(positions)
x = self.token_emb(x)
return x + positions
"""
## Implement the feedforward network
This is used as the Mixture of Experts in the Switch Transformer.
"""
def create_feedforward_network(ff_dim, name=None):
return keras.Sequential(
[layers.Dense(ff_dim, activation="relu"), layers.Dense(ff_dim)], name=name
)
"""
## Implement the load-balanced loss
This is an auxiliary loss to encourage a balanced load across experts.
"""
def load_balanced_loss(router_probs, expert_mask):
# router_probs [tokens_per_batch, num_experts] is the probability assigned for
# each expert per token. expert_mask [tokens_per_batch, num_experts] contains
# the expert with the highest router probability in one−hot format.
num_experts = tf.shape(expert_mask)[-1]
# Get the fraction of tokens routed to each expert.
# density is a vector of length num experts that sums to 1.
density = tf.reduce_mean(expert_mask, axis=0)
# Get fraction of probability mass assigned to each expert from the router
# across all tokens. density_proxy is a vector of length num experts that sums to 1.
density_proxy = tf.reduce_mean(router_probs, axis=0)
# Want both vectors to have uniform allocation (1/num experts) across all
# num_expert elements. The two vectors will be pushed towards uniform allocation
# when the dot product is minimized.
loss = tf.reduce_mean(density_proxy * density) * tf.cast(
(num_experts**2), tf.dtypes.float32
)
return loss
"""
### Implement the router as a layer
"""
class Router(layers.Layer):
def __init__(self, num_experts, expert_capacity):
self.num_experts = num_experts
self.route = layers.Dense(units=num_experts)
self.expert_capacity = expert_capacity
super(Router, self).__init__()
def call(self, inputs, training=False):
# inputs shape: [tokens_per_batch, embed_dim]
# router_logits shape: [tokens_per_batch, num_experts]
router_logits = self.route(inputs)
if training:
# Add noise for exploration across experts.
router_logits += tf.random.uniform(
shape=router_logits.shape, minval=0.9, maxval=1.1
)
# Probabilities for each token of what expert it should be sent to.
router_probs = keras.activations.softmax(router_logits, axis=-1)
# Get the top−1 expert for each token. expert_gate is the top−1 probability
# from the router for each token. expert_index is what expert each token
# is going to be routed to.
expert_gate, expert_index = tf.math.top_k(router_probs, k=1)
# expert_mask shape: [tokens_per_batch, num_experts]
expert_mask = tf.one_hot(expert_index, depth=self.num_experts)
# Compute load balancing loss.
aux_loss = load_balanced_loss(router_probs, expert_mask)
self.add_loss(aux_loss)
# Experts have a fixed capacity, ensure we do not exceed it. Construct
# the batch indices, to each expert, with position in expert make sure that
# not more that expert capacity examples can be routed to each expert.
position_in_expert = tf.cast(
tf.math.cumsum(expert_mask, axis=0) * expert_mask, tf.dtypes.int32
)
# Keep only tokens that fit within expert capacity.
expert_mask *= tf.cast(
tf.math.less(
tf.cast(position_in_expert, tf.dtypes.int32), self.expert_capacity
),
tf.dtypes.float32,
)
expert_mask_flat = tf.reduce_sum(expert_mask, axis=-1)
# Mask out the experts that have overflowed the expert capacity.
expert_gate *= expert_mask_flat
# Combine expert outputs and scaling with router probability.
# combine_tensor shape: [tokens_per_batch, num_experts, expert_capacity]
combined_tensor = tf.expand_dims(
expert_gate
* expert_mask_flat
* tf.squeeze(tf.one_hot(expert_index, depth=self.num_experts), 1),
-1,
) * tf.squeeze(tf.one_hot(position_in_expert, depth=self.expert_capacity), 1)
# Create binary dispatch_tensor [tokens_per_batch, num_experts, expert_capacity]
# that is 1 if the token gets routed to the corresponding expert.
dispatch_tensor = tf.cast(combined_tensor, tf.dtypes.float32)
return dispatch_tensor, combined_tensor
"""
### Implement a Switch layer
"""
class Switch(layers.Layer):
def __init__(self, num_experts, embed_dim, num_tokens_per_batch, capacity_factor=1):
self.num_experts = num_experts
self.embed_dim = embed_dim
self.experts = [
create_feedforward_network(embed_dim) for _ in range(num_experts)
]
self.expert_capacity = num_tokens_per_batch // self.num_experts
self.router = Router(self.num_experts, self.expert_capacity)
super(Switch, self).__init__()
def call(self, inputs):
batch_size = tf.shape(inputs)[0]
num_tokens_per_example = tf.shape(inputs)[1]
# inputs shape: [num_tokens_per_batch, embed_dim]
inputs = tf.reshape(inputs, [num_tokens_per_batch, self.embed_dim])
# dispatch_tensor shape: [expert_capacity, num_experts, tokens_per_batch]
# combine_tensor shape: [tokens_per_batch, num_experts, expert_capacity]
dispatch_tensor, combine_tensor = self.router(inputs)
# expert_inputs shape: [num_experts, expert_capacity, embed_dim]
expert_inputs = tf.einsum("ab,acd->cdb", inputs, dispatch_tensor)
expert_inputs = tf.reshape(
expert_inputs, [self.num_experts, self.expert_capacity, self.embed_dim]
)
# Dispatch to experts
expert_input_list = tf.unstack(expert_inputs, axis=0)
expert_output_list = [
self.experts[idx](expert_input)
for idx, expert_input in enumerate(expert_input_list)
]
# expert_outputs shape: [expert_capacity, num_experts, embed_dim]
expert_outputs = tf.stack(expert_output_list, axis=1)
# expert_outputs_combined shape: [tokens_per_batch, embed_dim]
expert_outputs_combined = tf.einsum(
"abc,xba->xc", expert_outputs, combine_tensor
)
# output shape: [batch_size, num_tokens_per_example, embed_dim]
outputs = tf.reshape(
expert_outputs_combined,
[batch_size, num_tokens_per_example, self.embed_dim],
)
return outputs
"""
## Implement a Transformer block layer
"""
class TransformerBlock(layers.Layer):
def __init__(self, embed_dim, num_heads, ffn, dropout_rate=0.1):
super(TransformerBlock, self).__init__()
self.att = layers.MultiHeadAttention(num_heads=num_heads, key_dim=embed_dim)
# The ffn can be either a standard feedforward network or a switch
# layer with a Mixture of Experts.
self.ffn = ffn
self.layernorm1 = layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = layers.Dropout(dropout_rate)
self.dropout2 = layers.Dropout(dropout_rate)
def call(self, inputs, training):
attn_output = self.att(inputs, inputs)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(inputs + attn_output)
ffn_output = self.ffn(out1)
ffn_output = self.dropout2(ffn_output, training=training)
return self.layernorm2(out1 + ffn_output)
"""
## Implement the classifier
The `TransformerBlock` layer outputs one vector for each time step of our input sequence.
Here, we take the mean across all time steps and use a feedforward network on top
of it to classify text.
"""
def create_classifier():
switch = Switch(num_experts, embed_dim, num_tokens_per_batch)
transformer_block = TransformerBlock(ff_dim, num_heads, switch)
inputs = layers.Input(shape=(num_tokens_per_example,))
embedding_layer = TokenAndPositionEmbedding(
num_tokens_per_example, vocab_size, embed_dim
)
x = embedding_layer(inputs)
x = transformer_block(x)
x = layers.GlobalAveragePooling1D()(x)
x = layers.Dropout(dropout_rate)(x)
x = layers.Dense(ff_dim, activation="relu")(x)
x = layers.Dropout(dropout_rate)(x)
outputs = layers.Dense(2, activation="softmax")(x)
classifier = keras.Model(inputs=inputs, outputs=outputs)
return classifier
"""
## Train and evaluate the model
"""
def run_experiment(classifier):
classifier.compile(
optimizer=keras.optimizers.Adam(learning_rate),
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
history = classifier.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=num_epochs,
validation_data=(x_val, y_val),
)
return history
classifier = create_classifier()
run_experiment(classifier)
"""
## Conclusion
Compared to the standard Transformer architecture, the Switch Transformer can have a much
larger number of parameters, leading to increased model
capacity, while maintaining a reasonable computational cost.
"""
| apache-2.0 |
h2oai/h2o | py/testdir_multi_jvm/test_rf_mnist_fvec.py | 9 | 6331 | import unittest
import random, sys, time, re
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i, h2o_glm, h2o_util, h2o_rf, h2o_jobs
DO_POLL = False
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
# assume we're at 0xdata with it's hdfs namenode
h2o.init(1, java_heap_GB=28)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_rf_mnist_fvec(self):
importFolderPath = "mnist"
csvFilelist = [
# ("mnist_testing.csv.gz", "mnist_testing.csv.gz", 600),
# ("a.csv", "b.csv", 60),
# ("mnist_testing.csv.gz", "mnist_testing.csv.gz", 600),
("mnist_training.csv.gz", "mnist_testing.csv.gz", 600),
]
trial = 0
for (trainCsvFilename, testCsvFilename, timeoutSecs) in csvFilelist:
trialStart = time.time()
# PARSE test****************************************
testKey2 = testCsvFilename + "_" + str(trial) + ".hex"
start = time.time()
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=importFolderPath + "/" + testCsvFilename,
hex_key=testKey2, timeoutSecs=timeoutSecs)
elapsed = time.time() - start
print "parse end on ", testCsvFilename, 'took', elapsed, 'seconds',\
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
print "parse result:", parseResult['destination_key']
print "We won't use this pruning of x on test data. See if it prunes the same as the training"
y = 0 # first column is pixel value
print "y:"
# x = h2o_glm.goodXFromColumnInfo(y, key=parseResult['destination_key'], timeoutSecs=300)
# PARSE train****************************************
trainKey2 = trainCsvFilename + "_" + str(trial) + ".hex"
start = time.time()
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=importFolderPath + "/" + trainCsvFilename, schema='local',
hex_key=trainKey2, timeoutSecs=timeoutSecs)
elapsed = time.time() - start
print "parse end on ", trainCsvFilename, 'took', elapsed, 'seconds',\
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
print "parse result:", parseResult['destination_key']
# RF+RFView (train)****************************************
print "This is the 'ignore=' we'll use"
ignore_x = h2o_glm.goodXFromColumnInfo(y, key=parseResult['destination_key'], timeoutSecs=300, returnIgnoreX=True)
params = {
'response': 'C' + str(y+1),
'cols': None,
'ignored_cols_by_name': ignore_x,
'classification': 1,
'validation': None,
'ntrees': 2,
'max_depth': 20,
'min_rows': None,
'nbins': 1000,
'mtries': None,
'sample_rate': 0.66,
'seed': None,
}
rfViewInitial = []
for jobDispatch in range(1):
# adjust timeoutSecs with the number of trees
# seems ec2 can be really slow
params['destination_key'] = 'RFModel_' + str('jobDispatch')
kwargs = params.copy()
timeoutSecs = 1200
start = time.time()
rfResult = h2o_cmd.runRF(parseResult=parseResult, timeoutSecs=timeoutSecs, noPoll=not DO_POLL, rfView=DO_POLL, **kwargs)
elapsed = time.time() - start
# print h2o.dump_json(rfResult)
print "rf job dispatch end on ", trainCsvFilename, 'took', time.time() - start, 'seconds'
print "\njobDispatch #", jobDispatch
# FIX! are these already in there?
rfView = {}
rfView['data_key'] = trainKey2
rfView['model_key'] = kwargs['destination_key']
rfView['ntrees'] = kwargs['ntrees']
rfViewInitial.append(rfView)
if not DO_POLL:
h2o_jobs.pollStatsWhileBusy(timeoutSecs=1200, pollTimeoutSecs=120, retryDelaySecs=5)
# FIX! need to add the rfview and predict stuff
# we saved the initial response?
# if we do another poll they should be done now, and better to get it that
# way rather than the inspect (to match what simpleCheckGLM is expected
print "rfViewInitial", rfViewInitial
for rfView in rfViewInitial:
print "Checking completed job:", rfView
print "rfView", h2o.dump_json(rfView)
data_key = rfView['data_key']
model_key = rfView['model_key']
ntrees = rfView['ntrees']
rfView = h2o_cmd.runRFView(None, model_key=model_key, timeoutSecs=60, noPoll=not DO_POLL, doSimpleCheck=False)
(classification_error, classErrorPctList, totalScores) = h2o_rf.simpleCheckRFView(rfv=rfView)
self.assertAlmostEqual(classification_error, 20, delta=2, msg="Classification error %s differs too much" % classification_error)
if not DO_POLL:
h2o_jobs.pollStatsWhileBusy(timeoutSecs=300, pollTimeoutSecs=120, retryDelaySecs=5)
# rfView = h2o_cmd.runRFView(None, data_key, model_key, timeoutSecs=60, noPoll=True, doSimpleCheck=False)
# print "rfView:", h2o.dump_json(rfView)
# "N":1,
# "errs":[0.25,0.1682814508676529],
# "testKey":"syn_binary_10000x10.hex",
# "cm":[[3621,1399],[1515,3465]]}}
rf_model = rfView['drf_model']
cms = rf_model['cms']
errs = rf_model['errs']
# FIX! should update this expected classification error
## (classification_error, classErrorPctList, totalScores) = h2o_rf.simpleCheckRFView(rfv=rfView, ntree=ntrees)
## self.assertAlmostEqual(classification_error, 0.03, delta=0.5, msg="Classification error %s differs too much" % classification_error)
predict = h2o.nodes[0].generate_predictions(model_key=model_key, data_key=data_key)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
rhiever/sklearn-benchmarks | model_code/random_search/LinearSVC.py | 1 | 1161 | import sys
import pandas as pd
import numpy as np
from sklearn.preprocessing import RobustScaler
from sklearn.svm import LinearSVC
from evaluate_model import evaluate_model
dataset = sys.argv[1]
num_param_combinations = int(sys.argv[2])
random_seed = int(sys.argv[3])
np.random.seed(random_seed)
pipeline_components = [RobustScaler, LinearSVC]
pipeline_parameters = {}
C_values = np.random.uniform(low=1e-10, high=10., size=num_param_combinations)
loss_values = np.random.choice(['hinge', 'squared_hinge'], size=num_param_combinations)
penalty_values = np.random.choice(['l1', 'l2'], size=num_param_combinations)
dual_values = np.random.choice([True, False], size=num_param_combinations)
fit_intercept_values = np.random.choice([True, False], size=num_param_combinations)
all_param_combinations = zip(C_values, loss_values, penalty_values, dual_values, fit_intercept_values)
pipeline_parameters[LinearSVC] = \
[{'C': C, 'penalty': penalty, 'fit_intercept': fit_intercept, 'dual': dual, 'random_state': 324089}
for (C, loss, penalty, dual, fit_intercept) in all_param_combinations]
evaluate_model(dataset, pipeline_components, pipeline_parameters)
| mit |
herilalaina/scikit-learn | sklearn/metrics/tests/test_score_objects.py | 15 | 21336 | import pickle
import tempfile
import shutil
import os
import numbers
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.base import BaseEstimator
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
log_loss, precision_score, recall_score)
from sklearn.metrics import cluster as cluster_module
from sklearn.metrics.scorer import (check_scoring, _PredictScorer,
_passthrough_scorer)
from sklearn.metrics import accuracy_score
from sklearn.metrics.scorer import _check_multimetric_scoring
from sklearn.metrics import make_scorer, get_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
from sklearn.externals import joblib
REGRESSION_SCORERS = ['explained_variance', 'r2',
'neg_mean_absolute_error', 'neg_mean_squared_error',
'neg_mean_squared_log_error',
'neg_median_absolute_error', 'mean_absolute_error',
'mean_squared_error', 'median_absolute_error']
CLF_SCORERS = ['accuracy', 'balanced_accuracy',
'f1', 'f1_weighted', 'f1_macro', 'f1_micro',
'roc_auc', 'average_precision', 'precision',
'precision_weighted', 'precision_macro', 'precision_micro',
'recall', 'recall_weighted', 'recall_macro', 'recall_micro',
'neg_log_loss', 'log_loss', 'brier_score_loss']
# All supervised cluster scorers (They behave like classification metric)
CLUSTER_SCORERS = ["adjusted_rand_score",
"homogeneity_score",
"completeness_score",
"v_measure_score",
"mutual_info_score",
"adjusted_mutual_info_score",
"normalized_mutual_info_score",
"fowlkes_mallows_score"]
MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples']
def _make_estimators(X_train, y_train, y_ml_train):
# Make estimators that make sense to test various scoring methods
sensible_regr = DecisionTreeRegressor(random_state=0)
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
sensible_ml_clf = DecisionTreeClassifier(random_state=0)
sensible_ml_clf.fit(X_train, y_ml_train)
return dict(
[(name, sensible_regr) for name in REGRESSION_SCORERS] +
[(name, sensible_clf) for name in CLF_SCORERS] +
[(name, sensible_clf) for name in CLUSTER_SCORERS] +
[(name, sensible_ml_clf) for name in MULTILABEL_ONLY_SCORERS]
)
X_mm, y_mm, y_ml_mm = None, None, None
ESTIMATORS = None
TEMP_FOLDER = None
def setup_module():
# Create some memory mapped data
global X_mm, y_mm, y_ml_mm, TEMP_FOLDER, ESTIMATORS
TEMP_FOLDER = tempfile.mkdtemp(prefix='sklearn_test_score_objects_')
X, y = make_classification(n_samples=30, n_features=5, random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
random_state=0)
filename = os.path.join(TEMP_FOLDER, 'test_data.pkl')
joblib.dump((X, y, y_ml), filename)
X_mm, y_mm, y_ml_mm = joblib.load(filename, mmap_mode='r')
ESTIMATORS = _make_estimators(X_mm, y_mm, y_ml_mm)
def teardown_module():
global X_mm, y_mm, y_ml_mm, TEMP_FOLDER, ESTIMATORS
# GC closes the mmap file descriptors
X_mm, y_mm, y_ml_mm, ESTIMATORS = None, None, None, None
shutil.rmtree(TEMP_FOLDER)
class EstimatorWithoutFit(object):
"""Dummy estimator to test scoring validators"""
pass
class EstimatorWithFit(BaseEstimator):
"""Dummy estimator to test scoring validators"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore(object):
"""Dummy estimator to test scoring validators"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict(object):
"""Dummy estimator to test scoring validators"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
class DummyScorer(object):
"""Dummy scorer that always returns 1."""
def __call__(self, est, X, y):
return 1
def test_all_scorers_repr():
# Test that all scorers have a working repr
for name, scorer in SCORERS.items():
repr(scorer)
def check_scoring_validator_for_single_metric_usecases(scoring_validator):
# Test all branches of single metric usecases
estimator = EstimatorWithoutFit()
pattern = (r"estimator should be an estimator implementing 'fit' method,"
r" .* was passed")
assert_raises_regexp(TypeError, pattern, scoring_validator, estimator)
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = scoring_validator(estimator)
assert_true(scorer is _passthrough_scorer)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, scoring_validator, estimator)
scorer = scoring_validator(estimator, "accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
scorer = scoring_validator(estimator, "accuracy")
assert_true(isinstance(scorer, _PredictScorer))
# Test the allow_none parameter for check_scoring alone
if scoring_validator is check_scoring:
estimator = EstimatorWithFit()
scorer = scoring_validator(estimator, allow_none=True)
assert_true(scorer is None)
def check_multimetric_scoring_single_metric_wrapper(*args, **kwargs):
# This wraps the _check_multimetric_scoring to take in single metric
# scoring parameter so we can run the tests that we will run for
# check_scoring, for check_multimetric_scoring too for single-metric
# usecases
scorers, is_multi = _check_multimetric_scoring(*args, **kwargs)
# For all single metric use cases, it should register as not multimetric
assert_false(is_multi)
if args[0] is not None:
assert_true(scorers is not None)
names, scorers = zip(*scorers.items())
assert_equal(len(scorers), 1)
assert_equal(names[0], 'score')
scorers = scorers[0]
return scorers
def test_check_scoring_and_check_multimetric_scoring():
check_scoring_validator_for_single_metric_usecases(check_scoring)
# To make sure the check_scoring is correctly applied to the constituent
# scorers
check_scoring_validator_for_single_metric_usecases(
check_multimetric_scoring_single_metric_wrapper)
# For multiple metric use cases
# Make sure it works for the valid cases
for scoring in (('accuracy',), ['precision'],
{'acc': 'accuracy', 'precision': 'precision'},
('accuracy', 'precision'), ['precision', 'accuracy'],
{'accuracy': make_scorer(accuracy_score),
'precision': make_scorer(precision_score)}):
estimator = LinearSVC(random_state=0)
estimator.fit([[1], [2], [3]], [1, 1, 0])
scorers, is_multi = _check_multimetric_scoring(estimator, scoring)
assert_true(is_multi)
assert_true(isinstance(scorers, dict))
assert_equal(sorted(scorers.keys()), sorted(list(scoring)))
assert_true(all([isinstance(scorer, _PredictScorer)
for scorer in list(scorers.values())]))
if 'acc' in scoring:
assert_almost_equal(scorers['acc'](
estimator, [[1], [2], [3]], [1, 0, 0]), 2. / 3.)
if 'accuracy' in scoring:
assert_almost_equal(scorers['accuracy'](
estimator, [[1], [2], [3]], [1, 0, 0]), 2. / 3.)
if 'precision' in scoring:
assert_almost_equal(scorers['precision'](
estimator, [[1], [2], [3]], [1, 0, 0]), 0.5)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
# Make sure it raises errors when scoring parameter is not valid.
# More weird corner cases are tested at test_validation.py
error_message_regexp = ".*must be unique strings.*"
for scoring in ((make_scorer(precision_score), # Tuple of callables
make_scorer(accuracy_score)), [5],
(make_scorer(precision_score),), (), ('f1', 'f1')):
assert_raises_regexp(ValueError, error_message_regexp,
_check_multimetric_scoring, estimator,
scoring=scoring)
def test_check_scoring_gridsearchcv():
# test that check_scoring works on GridSearchCV and pipeline.
# slightly redundant non-regression test.
grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]})
scorer = check_scoring(grid, "f1")
assert_true(isinstance(scorer, _PredictScorer))
pipe = make_pipeline(LinearSVC())
scorer = check_scoring(pipe, "f1")
assert_true(isinstance(scorer, _PredictScorer))
# check that cross_val_score definitely calls the scorer
# and doesn't make any assumptions about the estimator apart from having a
# fit.
scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1],
scoring=DummyScorer())
assert_array_equal(scores, 1)
def test_make_scorer():
# Sanity check on the make_scorer factory function.
f = lambda *args: 0
assert_raises(ValueError, make_scorer, f, needs_threshold=True,
needs_proba=True)
def test_classification_scores():
# Test classification scorers.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
for prefix, metric in [('f1', f1_score), ('precision', precision_score),
('recall', recall_score)]:
score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='weighted')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='macro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='micro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=1)
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
# Test regression scorers.
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = get_scorer('r2')(clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
# Test scorers that take thresholds.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = get_scorer('neg_log_loss')(clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# test with a regressor (no decision_function)
reg = DecisionTreeRegressor()
reg.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(reg, X_test, y_test)
score2 = roc_auc_score(y_test, reg.predict(X_test))
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
# Test that the scorer work with multilabel-indicator format
# for multilabel and multi-output multi-class classifier
X, y = make_multilabel_classification(allow_unlabeled=False,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multi-output multi-class decision_function
# TODO Is there any yet?
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
clf._predict_proba = clf.predict_proba
clf.predict_proba = None
clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
y_proba = clf.decision_function(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_supervised_cluster_scorers():
# Test clustering scorers against gold standard labeling.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
for name in CLUSTER_SCORERS:
score1 = get_scorer(name)(km, X_test, y_test)
score2 = getattr(cluster_module, name)(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
# Test that when a list of scores is returned, we raise proper errors.
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_scorer_sample_weight():
# Test that scorers support sample_weight or raise sensible errors
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
random_state=0)
split = train_test_split(X, y, y_ml, random_state=0)
X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
estimator = _make_estimators(X_train, y_train, y_ml_train)
for name, scorer in SCORERS.items():
if name in MULTILABEL_ONLY_SCORERS:
target = y_ml_test
else:
target = y_test
try:
weighted = scorer(estimator[name], X_test, target,
sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], target[10:])
unweighted = scorer(estimator[name], X_test, target)
assert_not_equal(weighted, unweighted,
msg="scorer {0} behaves identically when "
"called with sample weights: {1} vs "
"{2}".format(name, weighted, unweighted))
assert_almost_equal(weighted, ignored,
err_msg="scorer {0} behaves differently when "
"ignoring samples and setting sample_weight to"
" 0: {1} vs {2}".format(name, weighted,
ignored))
except TypeError as e:
assert_true("sample_weight" in str(e),
"scorer {0} raises unhelpful exception when called "
"with sample weights: {1}".format(name, str(e)))
@ignore_warnings # UndefinedMetricWarning for P / R scores
def check_scorer_memmap(scorer_name):
scorer, estimator = SCORERS[scorer_name], ESTIMATORS[scorer_name]
if scorer_name in MULTILABEL_ONLY_SCORERS:
score = scorer(estimator, X_mm, y_ml_mm)
else:
score = scorer(estimator, X_mm, y_mm)
assert isinstance(score, numbers.Number), scorer_name
def test_scorer_memmap_input():
# Non-regression test for #6147: some score functions would
# return singleton memmap when computed on memmap data instead of scalar
# float values.
for name in SCORERS.keys():
yield check_scorer_memmap, name
def test_deprecated_names():
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
for name in ('mean_absolute_error', 'mean_squared_error',
'median_absolute_error', 'log_loss'):
warning_msg = "Scoring method %s was renamed to" % name
for scorer in (get_scorer(name), SCORERS[name]):
assert_warns_message(DeprecationWarning,
warning_msg,
scorer, clf, X, y)
assert_warns_message(DeprecationWarning,
warning_msg,
cross_val_score, clf, X, y, scoring=name)
def test_scoring_is_not_metric():
assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
LogisticRegression(), f1_score)
assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
LogisticRegression(), roc_auc_score)
assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
Ridge(), r2_score)
assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
KMeans(), cluster_module.adjusted_rand_score)
| bsd-3-clause |
fbuitron/FBMusic_ML_be | INTERACTIVE/MachineLearning/Classification.py | 1 | 4629 | import numpy as np
import pandas as pd
from sklearn import neighbors, tree
from sklearn import cross_validation
from . import Preprocessing
# import Preprocessing as Preprocessing
def excKNN(k, train_data, train_labels, test_data, test_labels):
errorCount = 0.0
knnclf = neighbors.KNeighborsClassifier(k, weights='distance')
knnclf.fit(train_data, train_labels)
numTestVecs = test_data.shape[0]
for i in range(numTestVecs):
classifierResult = knnclf.predict(test_data[i,:].reshape(1,-1))
# print("the classifier came back with: ", classifierResult ,", the real answer is: " , bank_test_labels_arr[i])
if (classifierResult != test_labels[i]):
errorCount += 1.0
print("the total error rate is: %f",errorCount / float(numTestVecs))
def startClassifyKNN():
dataSet = Preprocessing.getDataFromCSV()
train_data, train_labels, train_ids, test_data, test_labels, test_ids = sep_train_test(dataSet)
train_norm = Preprocessing.completePreprocessing(train_data,train_data)
test_norm = Preprocessing.completePreprocessing(train_data, test_data)
knnclf = neighbors.KNeighborsClassifier(5, weights='distance')
knnclf.fit(train_norm, train_labels)
trainScore = knnclf.score(train_norm, train_labels)
testScore = knnclf.score(test_norm, test_labels)
print(trainScore)
print(testScore)
def startClassifyTree():
dataSet = Preprocessing.getDataFromCSV()
train_data, train_labels, train_ids, test_data, test_labels, test_ids = sep_train_test(dataSet)
treeclf = tree.DecisionTreeClassifier(criterion='entropy')
treeclf = treeclf.fit(np.array(train_data), np.array(train_labels))
print(treeclf.score(train_data, train_labels))
print(treeclf.score(test_data, test_labels))
def test(cv=30):
dataSet = Preprocessing.getDataFromCSV()
data_ = dataSet.iloc[:,1:-1]
labels_ = dataSet.iloc[:,-1]
ids_ = dataSet.iloc[:,0]
data_norm = Preprocessing.completePreprocessing(data_,data_)
knnclf = neighbors.KNeighborsClassifier(5, algorithm='brute')
scores_KNN = cross_validation.cross_val_score(knnclf, data_norm, labels_, cv=cv)
print("Overall Accuracy: %0.2f (+/- %0.2f)" % (scores_KNN.mean(), scores_KNN.std() * 2))
treeclf = tree.DecisionTreeClassifier(criterion='entropy')
scores_ = cross_validation.cross_val_score(treeclf, np.array(data_), np.array(labels_), cv=cv)
print("Overall Accuracy: %0.2f (+/- %0.2f)" % (scores_.mean(), scores_.std() * 2))
def sep_train_test(dataSet, percent_test_cases=0.20):
random_data_set = dataSet.reindex(np.random.permutation(dataSet.index))
number_test_cases = int(random_data_set.shape[0]*percent_test_cases)
test_data = random_data_set.iloc[:number_test_cases,1:-1]
test_labels = random_data_set.iloc[:number_test_cases,-1]
test_ids = random_data_set.iloc[:number_test_cases,0]
train_data = random_data_set.iloc[number_test_cases:,1:-1]
train_labels = random_data_set.iloc[number_test_cases:,-1]
train_ids = random_data_set.iloc[number_test_cases:,0]
return train_data, train_labels, train_ids, test_data, test_labels, test_ids
categories = ['blues','classical','country','hiphop','jazz','metal','pop','punk','reggae','rnb','rock']
def transform_labels_to_numbers(labels_):
i=0
categ = np.zeros(shape=labels_.shape)
for cat in categories:
catTrues = labels_ == cat
categ[np.array(catTrues == True)] = i
i+=1
return categ
def classify_KNN(k, cv=30):
dataSet = Preprocessing.getDataFromCSV()
data_ = dataSet.iloc[:,1:-1]
labels_ = dataSet.iloc[:,-1]
ids_ = dataSet.iloc[:,0]
print("ESTA ACA")
data_norm = Preprocessing.completePreprocessing(data_,data_)
print("AVA VA A EHECUTAR ",data_norm.shape)
knnclf = neighbors.KNeighborsClassifier(k, algorithm='auto')
scores_KNN = cross_validation.cross_val_score(knnclf, data_norm, labels_, cv=cv)
return "%0.2f (+/- %0.2f)" % (scores_KNN.mean(), scores_KNN.std() * 2)
def classify_Tree(cv=30):
dataSet = Preprocessing.getDataFromCSV()
data_ = dataSet.iloc[:,1:-1]
labels_ = dataSet.iloc[:,-1]
ids_ = dataSet.iloc[:,0]
treeclf = tree.DecisionTreeClassifier(criterion='entropy')
scores_ = cross_validation.cross_val_score(treeclf, np.array(data_), np.array(labels_), cv=cv)
return "%0.2f (+/- %0.2f)" % (scores_.mean(), scores_.std() * 2)
def classify_song_tree(song):
dataSet = Preprocessing.getDataFromCSV()
data_ = dataSet.iloc[:,1:-1]
labels_ = dataSet.iloc[:,-1]
ids_ = dataSet.iloc[:,0]
treeclf = tree.DecisionTreeClassifier(criterion='entropy')
treeclf.fit(data_,labels_)
category = treeclf.predict(song)
return category[0]
# classify_KNN(3)
# startClassifyKNN()
# startClassifyTree()
# test()
# print(classify_KNN(20))
| apache-2.0 |
boland1992/seissuite_iran | build/lib/ambient/spectrum/heat_pickle.py | 8 | 25534 | # -*- coding: utf-8 -*-
"""
Created on Fri July 6 11:04:03 2015
@author: boland
"""
import os
import datetime
import numpy as np
import multiprocessing as mp
import matplotlib.pyplot as plt
import shapefile
from scipy import signal
from obspy import read
from scipy.signal import argrelextrema
from info_dataless import locs_from_dataless
from scipy import interpolate
from matplotlib.colors import LogNorm
import pickle
import fiona
from shapely import geometry
from shapely.geometry import asPolygon, Polygon
from math import sqrt, radians, cos, sin, asin
from info_dataless import locs_from_dataless
from descartes.patch import PolygonPatch
from matplotlib.colors import LogNorm
from scipy.spatial import ConvexHull
from scipy.cluster.vq import kmeans
from shapely.affinity import scale
from matplotlib.path import Path
import itertools
from scipy.interpolate import griddata
import random
from sklearn.cluster import DBSCAN
#------------------------------------------------------------------------------
# CLASSES
#------------------------------------------------------------------------------
class InShape:
"""
Class defined in order to define a shapefile boundary AND quickly check
if a given set of coordinates is contained within it. This class uses
the shapely module.
"""
def __init__(self, input_shape, coords=0.):
#initialise boundary shapefile location string input
self.boundary = input_shape
#initialise coords shape input
self.dots = coords
#initialise boundary polygon
self.polygon = 0.
#initialise output coordinates that are contained within the polygon
self.output = 0.
def shape_poly(self):
with fiona.open(self.boundary) as fiona_collection:
# In this case, we'll assume the shapefile only has one later
shapefile_record = fiona_collection.next()
# Use Shapely to create the polygon
self.polygon = geometry.asShape( shapefile_record['geometry'] )
return self.polygon
def point_check(self, coord):
"""
Function that takes a single (2,1) shape input, converts the points
into a shapely.geometry.Point object and then checks if the coord
is contained within the shapefile.
"""
self.polygon = self.shape_poly()
point = geometry.Point(coord[0], coord[1])
if self.polygon.contains(point):
return coord
def shape_bounds(self):
"""
Function that returns the bounding box coordinates xmin,xmax,ymin,ymax
"""
self.polygon = self.shape_poly()
return self.polygon.bounds
def shape_buffer(self, shape=None, size=1., res=1):
"""
Function that returns a new polygon of the larger buffered points.
Can import polygon into function if desired. Default is
self.shape_poly()
"""
if shape is None:
self.polygon = self.shape_poly()
return asPolygon(self.polygon.buffer(size, resolution=res)\
.exterior)
def extract_poly_coords(self, poly):
if poly.type == 'Polygon':
exterior_coords = poly.exterior.coords[:]
elif poly.type == 'MultiPolygon':
exterior_coords = []
for part in poly:
epc = np.asarray(self.extract_poly_coords(part)) # Recursive call
exterior_coords.append(epc)
else:
raise ValueError('Unhandled geometry type: ' + repr(poly.type))
return np.vstack(exterior_coords)
def external_coords(self, shape=None, buff=None, size=1., res=1):
"""
Function that returns the external coords of a buffered shapely
polygon. Note that shape variable input
MUST be a shapely Polygon object.
"""
if shape is not None and buff is not None:
poly = self.shape_buffer(shape=shape, size=size, res=res)
elif shape is not None:
poly = shape
else:
poly = self.shape_poly()
exterior_coords = self.extract_poly_coords(poly)
return exterior_coords
class InPoly:
"""
Class defined in order to define a shapefile boundary AND quickly check
if a given set of coordinates is contained within it. The class uses
the matplotlib Path class.
"""
def __init__(self, input_shape, coords=0.):
#initialise boundary shapefile location string input
self.boundary = input_shape
#initialise coords shape input
self.dots = coords
#initialise boundary polygon
self.polygon = 0.
#initialise boundary polygon nodes
self.nodes = 0.
#initialise output coordinates that are contained within the polygon
self.output = 0.
def poly_nodes(self):
"""
Function that returns the nodes of a shapefile as a (n,2) array.
"""
sf = shapefile.Reader(self.boundary)
poly = sf.shapes()[0]
#find polygon nodes lat lons
self.nodes = np.asarray(poly.points)
return self.nodes
def points_from_path(self, poly):
"""
Function that returns nodes from matplotlib Path object.
"""
return poly.vertices
def shapefile_poly(self):
"""
Function that imports a shapefile location path and returns
a matplotlib Path object representing this shape.
"""
self.nodes = self.poly_nodes()
#convert to a matplotlib path class!
self.polygon = Path(self.nodes)
return self.polygon
def node_poly(self, nodes):
"""
Function creates a matplotlib Path object from input nodes.
"""
#convert to a matplotlib path class!
polygon = Path(nodes)
return polygon
def points_in_shapefile_poly(self):
"""
Function that takes a single (2,1) coordinate input, and uses the
contains() function in class matplotlib Path to check if point is
in the polygon.
"""
self.polygon = self.shapefile_poly()
points_in = self.polygon.contains_points(self.dots)
self.output = self.dots[points_in == True]
return np.asarray(self.output)
def points_in(self, points, poly=None, IN=True, indices=False):
"""
Function that takes a many (2,N) points, and uses the
contains() function in class matplotlib Path to check if point is
in the polygon. If IN=True then the function will return points inside
the matplotlib Path object, else if IN=False then the function will
return the points outside the matplotlib Path object.
"""
if poly is None:
poly = self.shapefile_poly()
points_test = poly.contains_points(points)
if indices:
return points_test
else:
output = points[points_test == IN]
return np.asarray(output)
def bounds_poly(self, nodes=None):
"""
Function that returns boundaries of a shapefile polygon.
"""
if nodes is None:
nodes = self.poly_nodes()
xmin, xmax = np.min(nodes[:,0]), np.max(nodes[:,0])
ymin, ymax = np.min(nodes[:,1]), np.max(nodes[:,1])
return xmin, xmax, ymin, ymax
def poly_from_shape(self, shape=None, size=1., res=1):
"""
Function that returns a matplotlib Path object from
buffered shape points. if shape != None then the shape input
MUST be of type shapely polygon.
"""
SHAPE = InShape(self.boundary)
if shape is None:
# Generates shape object from shape_file input
shape = SHAPE
return self.node_poly(shape.external_coords(size=size, res=res))
else:
return self.node_poly(SHAPE.external_coords(shape=shape))
def rand_poly(self, poly=None, N=1e4, IN=True):
"""
Function that takes an input matplotlib Path object (or the default)
and generates N random points within the bounding box around it.
Then M unknown points are returned that ARE contained within the
Path object. This is done for speed. If IN=True then the function
will return points inside the matplotlib Path object, else if
IN=False then the function will return the points outside the
matplotlib Path object.
"""
if poly is None:
#poly = self.shapefile_poly()
xmin, xmax, ymin, ymax = self.bounds_poly()
else:
nodes = self.points_from_path(poly)
xmin, xmax, ymin, ymax = self.bounds_poly(nodes=nodes)
X = abs(xmax - xmin) * np.random.rand(N,1) + xmin
Y = abs(ymax - ymin) * np.random.rand(N,1) + ymin
many_points = np.column_stack((X,Y))
many_points = self.points_in(many_points, poly=poly, IN=IN)
return many_points
def rand_shape(self, shape=None, N=1e4, IN=True):
"""
Function that takes an input shapely Polygon object (or the default)
and generates N random points within the bounding box around it.
Then M unknown points are returned that ARE contained within the
Polygon object. This is done for speed. If IN=True then the function
will return points inside the matplotlib Path object, else
if IN=False then the function will return the points outside
the matplotlib Path object.
"""
if shape is None:
# Generates shape object from shape_file input
INSHAPE = InShape(self.boundary)
shape = self.node_poly(INSHAPE.external_coords())
xmin, xmax, ymin, ymax = INSHAPE.shape_bounds()
poly = self.node_poly(SHAPE.external_coords(shape=shape))
points = self.rand_poly(poly=poly, N=N, IN=IN)
return points
class Geodesic:
"""
Class defined in order to create to process points, distances and
other related geodesic calculations and functions
"""
def __init__(self, period_range=[1, 40], km_point=20., max_dist=2e3):
# initialise period_range as [1,40] default for ambient noise
self.per_range = period_range
self.km = km_point
self.max_dist = max_dist
def remove_distance(self, period_range, max_dist=None):
"""
Function that returns a given possible resolvable ambient noise
structure distance range, given the maximum period range
availabe to the study. The distance returned is in km.
Maximum distance default can be reassigned based on the cut-off found
by your time-lag plots for your study!
"""
if max_dist is None:
max_dist = self.max_dist
if type(period_range) == list:
min_dist = min(period_range) * 9
return [min_dist, max_dist]
elif type(period_range) == int or float:
return [period_range*9, max_dist]
def haversine(self, lon1, lat1, lon2, lat2, R=6371):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees). R is radius of
spherical earth. Default is 6371km.
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon, dlat = lon2 - lon1, lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
km = R * c
return km
def fast_geodesic(self, lon1, lat1, lon2, lat2, npts):
"""
Returns a list of *npts* points along the geodesic between
(and including) *coord1* and *coord2*, in an array of
shape (*npts*, 2).
@rtype: L{ndarray}
"""
if npts < 2:
raise Exception('nb of points must be at least 2')
path = wgs84.npts(lon1=lon1, lat1=lat1,
lon2=lon2, lat2=lat2,
npts=npts-2)
return np.array([[lon1,lat1]] + path + [[lon2,lat2]])
def paths_calc(self, path_info, km_points=None, per_lims=None):
"""
Function that returns an array of coordinates equidistant along
a great cricle path between two lat-lon coordinates if these points
lay within a certain distance range ... otherwise the points return
only a set of zeros the same size as the array. Default is 1.0km
distance per point.
"""
if per_lims is None:
# if no new default for period limits is defined, then set the
# limit to the default.
per_lims = self.per_range
if km_points is None:
km_points = self.km
lon1, lat1, lon2, lat2 = path_info[0], \
path_info[1], path_info[2], path_info[3]
# interpoint distance <= 1 km, and nb of points >= 100
dist = self.haversine(lon1, lat1, lon2, lat2)
npts = max(int((np.ceil(dist) + 1) / km_points), 100)
path = self.fast_geodesic(lon1, lat1, lon2, lat2, npts)
dist_range = self.remove_distance(per_lims)
if min(dist_range) < dist < max(dist_range):
#remove the closest points along this line that fall below the distance
#find the index of the first point that is above this distance away!
pts_km = npts / float((np.ceil(dist) + 1)) #this gives pts/km
#remove all points below this index in the paths list
dist_index = pts_km * min(dist_range)
path = path[dist_index:]
return path
else:
return np.zeros_like(path)
def fast_paths(self, coord_list):
"""
Function that takes many point coordinate combinations and quickly
passes them through the paths_calc function. coord_list MUST be
of the shape (4, N) whereby each coordinate combination is in a
(4,1) row [lon1,lat1,lon2,lat2].
"""
return map(self.paths_calc, coord_list)
def combine_paths(self, paths):
"""
Function that takes many paths (should be array of same length as
number of stations). This is automatically generated by parallelising
the fast_paths function above.
The output array should only contain unique, no repeating paths
and should be of the shape (2,N) where N is a large number of coords.
"""
#create a flattened numpy array of size 2xN from the paths created!
paths = list(itertools.chain(*paths))
paths = np.asarray(list(itertools.chain\
(*paths)))
#keep all but the repeated coordinates by keeping only unique whole rows!
b = np.ascontiguousarray(paths).view(np.dtype\
((np.void, paths.dtype.itemsize * \
paths.shape[1])))
_, idx = np.unique(b, return_index=True)
paths = np.unique(b).view(paths.dtype)\
.reshape(-1, paths.shape[1])
return paths
def remove_zeros(self, paths):
"""
Function that processes the flattened path output from combine_paths
and removes the zero paths created by paths_calc. Remove zeroes
from paths to ensure all paths that were NOT in the distance threshold
are removed from the path density calculation!
"""
path_lons, path_lats = paths[:,0], paths[:,1]
FIND_ZERO1 = np.where(paths[:,0]==0)[0]
FIND_ZERO2 = np.where(paths[:,1]==0)[0]
if len(FIND_ZERO1) != 0 and len(FIND_ZERO2) != 0:
path_lons = np.delete(path_lons, FIND_ZERO1)
path_lats = np.delete(path_lats, FIND_ZERO2)
return np.column_stack((path_lons, path_lats))
#------------------------------------------------------------------------------
# IMPORT PATHS TO MSEED FILES
#------------------------------------------------------------------------------
def spectrum(tr):
wave = tr.data #this is how to extract a data array from a mseed file
fs = tr.stats.sampling_rate
#hour = str(hour).zfill(2) #create correct format for eqstring
f, Pxx_spec = signal.welch(wave, fs, 'flattop', nperseg=1024, scaling='spectrum')
#plt.semilogy(f, np.sqrt(Pxx_spec))
if len(f) >= 256:
column = np.column_stack((f[:255], np.abs(np.sqrt(Pxx_spec)[:255])))
return column
else:
return 0.
# x = np.linspace(0, 10, 1000)
# f_interp = interp1d(np.sqrt(Pxx_spec),f, kind='cubic')
#x.reverse()
#y.reverse()
# print f_interp(x)
#f,np.sqrt(Pxx_spec),'o',
# plt.figure()
# plt.plot(x,f_interp(x),'-' )
# plt.show()
def paths_sort(path):
"""
Function defined for customised sorting of the abs_paths list
and will be used in conjunction with the sorted() built in python
function in order to produce file paths in chronological order.
"""
base_name = os.path.basename(path)
stat_name = base_name.split('.')[0]
date = base_name.split('.')[1]
try:
date = datetime.datetime.strptime(date, '%Y-%m-%d')
return date, stat_name
except Exception as e:
a=4
def paths(folder_path, extension):
"""
Function that returns a list of desired absolute paths called abs_paths
of files that contains a given extension e.g. .txt should be entered as
folder_path, txt. This function will run recursively through and find
any and all files within this folder with that extension!
"""
abs_paths = []
for root, dirs, files in os.walk(folder_path):
for f in files:
fullpath = os.path.join(root, f)
if os.path.splitext(fullpath)[1] == '.{}'.format(extension):
abs_paths.append(fullpath)
abs_paths = sorted(abs_paths, key=paths_sort)
return abs_paths
GEODESIC = Geodesic()
# import background shapefile location
shape_path = "/home/boland/Dropbox/University/UniMelb\
/AGOS/PROGRAMS/ANT/Versions/26.04.2015/shapefiles/aus.shp"
INPOLY = InPoly(shape_path)
# generate shape object
# Generate InShape class
SHAPE = InShape(shape_path)
# Create shapely polygon from imported shapefile
UNIQUE_SHAPE = SHAPE.shape_poly()
# set plotting limits for shapefile boundaries
lonmin, latmin, lonmax, latmax = SHAPE.shape_bounds()
print lonmin, latmin, lonmax, latmax
#lonmin, lonmax, latmin, latmax = SHAPE.plot_lims()
dataless_path = 'ALL_AUSTRALIA.870093.dataless'
stat_locs = locs_from_dataless(dataless_path)
#folder_path = '/storage/ANT/INPUT/DATA/AU-2014'
folder_path = '/storage/ANT/INPUT/DATA/AU-2014'
extension = 'mseed'
paths_list = paths(folder_path, extension)
t0_total = datetime.datetime.now()
figs_counter = 0
pickle_file0 = '/storage/ANT/spectral_density/station_pds_maxima/\
AUSTRALIA 2014/noiseinfo_comb.pickle'
pickle_file0 = '/storage/ANT/spectral_density/station_pds_maxima/AUSTRALIA 2014/first_peak_dict_australia_2014.pickle'
pickle_file0 = '/storage/ANT/spectral_density/noise_info0.pickle'
comb_noise = '/storage/ANT/spectral_density/station_pds_maxima/total_noise_combination.pickle'
f = open(name=comb_noise, mode='rb')
noise_info0 = pickle.load(f)
f.close()
# sort the noise
noise_info0 = np.asarray(noise_info0)
#noise_info0 = noise_info0[np.argsort(noise_info0[:, 1])]
# Combine AU with S info
print len(noise_info0)
# find outliers
def reject_outliers(data, m=0.5):
return data[abs(data - np.mean(data)) < m * np.std(data)]
outliers = reject_outliers(noise_info0[:,2])
# remove outliers
noise_info0 = np.asarray([info for info in noise_info0 \
if info[2] in outliers])
# filter coordinates that are too close together.
min_dist = 1. #degrees
coords = np.column_stack((noise_info0[:,0], noise_info0[:,1]))
# next remove points outside of the given poly if applicable
coord_indices = INPOLY.points_in(coords, indices=True)
noise_info0 = noise_info0[coord_indices == True]
print noise_info0
coords = np.column_stack((noise_info0[:,0], noise_info0[:,1]))
coord_combs = np.asarray(list(itertools.combinations(coords,2)))
print len(coord_combs)
def coord_combinations(coord_combs):
lon1, lat1 = coord_combs[0][0], coord_combs[0][1]
lon2, lat2 = coord_combs[1][0], coord_combs[1][1]
return [coord_combs, GEODESIC.haversine(lon1, lat1, lon2, lat2)]
t0 = datetime.datetime.now()
pool = mp.Pool()
comb_dists = pool.map(coord_combinations, coord_combs)
pool.close()
pool.join()
t1 = datetime.datetime.now()
print t1-t0
comb_dists = np.asarray(comb_dists)
# sort by distance
comb_dists = comb_dists[np.argsort(comb_dists[:, 1])]
# find where the distances are less than the min_dist
find_min = np.where(comb_dists[:,1]>min_dist)[0]
# remove points where the distances are less than the min_dist
comb_dists = np.delete(comb_dists, find_min, axis=0)
remaining_coords = comb_dists[:,0]
# get unique coordinates from remaining coords
#paths = list(itertools.chain(*paths))
remaining_coords = np.asarray(list(itertools.chain\
(*remaining_coords)))
#keep all but the repeated coordinates by keeping only unique whole rows!
b = np.ascontiguousarray(remaining_coords).view(np.dtype\
((np.void, remaining_coords.dtype.itemsize * \
remaining_coords.shape[1])))
_, idx = np.unique(b, return_index=True)
remaining_coords = np.unique(b).view(remaining_coords.dtype)\
.reshape(-1, remaining_coords.shape[1])
#scan for all points that are within a degree radius of one another!
db = DBSCAN(eps=min_dist).fit(coords)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
# Black removed and is used for noise instead.
unique_labels = set(labels)
clusters = []
cluster_keep = []
for k in unique_labels:
if k != -1:
class_member_mask = (labels == k)
cluster = coords[class_member_mask & core_samples_mask]
#xy = coords[class_member_mask & ~core_samples_mask]
# Select only 1 random point from each cluster to keep. Remove all others!
clusters.append(cluster)
cluster_keep.append(cluster[random.randint(0,len(cluster)-1)])
cluster_keep = np.asarray(cluster_keep)
# flatten clusters array
clusters = np.asarray(list(itertools.chain(*clusters)))
# remove all points in clusters from the overall coords array
coords = np.asarray([coord for coord in coords if coord not in clusters])
# place single representative point from cluster back into overall coord list
coords = np.append(coords, cluster_keep, axis=0)
print len(noise_info0)
# remove cluster coordinates from noise_info0
noise_info0 = np.asarray([info for info in noise_info0 \
if info[0] in coords[:,0]])
fig = plt.figure(figsize=(15,10), dpi=1000)
plt.title('Average Seismic Noise First Peak Maximum PDS\n Australian Network | 2014')
plt.xlabel('Longitude (degrees)')
plt.ylabel('Latitude (degrees)')
print "number of station points: ", len(noise_info0)
patch = PolygonPatch(UNIQUE_SHAPE, facecolor='white',\
edgecolor='k', zorder=1)
ax = fig.add_subplot(111)
ax.add_patch(patch)
x, y = noise_info0[:,0], noise_info0[:,1]
points = np.column_stack((x,y))
xmin, xmax = np.min(x), np.max(x)
ymin, ymax = np.min(y), np.max(y)
values = noise_info0[:,2]
#now we create a grid of values, interpolated from our random sample above
y = np.linspace(ymin, ymax, 200)
x = np.linspace(xmin, xmax, 200)
gridx, gridy = np.meshgrid(x, y)
heat_field = griddata(points, values, (gridx, gridy),
method='cubic',fill_value=0)
#heat_field = np.where(heat_field < 0, 1, heat_field)
heat_field = np.ma.masked_where(heat_field==0,heat_field)
print gridx
plt.pcolor(gridx, gridy, heat_field,
cmap='rainbow',alpha=0.5, norm=LogNorm(vmin=100, vmax=3e4),
zorder=2)
plt.scatter(noise_info0[:,0], noise_info0[:,1], c=noise_info0[:,2],
norm=LogNorm(vmin=100, vmax=3e4), s=35, cmap='rainbow', zorder=3)
#cmin, cmax = np.min(noise_info0[:,2]), np.max(noise_info0[:,2])
#sc = plt.scatter(noise_info0[:,0], noise_info0[:,1], c=noise_info0[:,2],
# norm=LogNorm(vmin=100, vmax=3e4), s=50, cmap=cm, zorder=2)
col = plt.colorbar()
col.ax.set_ylabel('Maximum Power Density Spectrum (V RMS)')
ax.set_xlim(lonmin-0.05*abs(lonmax-lonmin), \
lonmax+0.05*abs(lonmax-lonmin))
ax.set_ylim(latmin-0.05*abs(latmax-latmin), \
latmax+0.05*abs(latmax-latmin))
fig.savefig('station_pds_maxima/noise_map_all.svg',
format='SVG')
| gpl-3.0 |
h2oai/h2o | py/testdir_single_jvm/test_GLM2_poisson_timeout_fail.py | 9 | 1668 | import unittest, random, sys, time
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_glm, h2o_import as h2i
print "Repeated test of case that timeouts in EC2"
def define_params():
paramDict = {
'response': 54,
'family': 'poisson',
'beta_epsilon': 0.001,
'max_iter': 15,
'alpha': 0.5,
'n_folds': 9,
'lambda': 1e-4
}
return paramDict
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init()
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GLM2_poisson_timeout_fail(self):
start = time.time()
csvPathname = 'standard/covtype.data'
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname, schema='put')
print "upload/parse end on ", csvPathname, 'took', time.time() - start, 'seconds'
kwargs = define_params()
for trial in range(3):
# make timeout bigger with xvals
timeoutSecs = 60 + (kwargs['n_folds']*20)
# or double the 4 seconds per iteration (max_iter+1 worst case?)
timeoutSecs = max(timeoutSecs, (8 * (kwargs['max_iter']+1)))
start = time.time()
glm = h2o_cmd.runGLM(timeoutSecs=timeoutSecs, parseResult=parseResult, **kwargs)
print "glm end on ", csvPathname, 'took', time.time() - start, 'seconds'
h2o_glm.simpleCheckGLM(self, glm, None, **kwargs)
print "Trial #", trial, "completed\n"
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
pravsripad/mne-python | tutorials/epochs/30_epochs_metadata.py | 2 | 6906 | # -*- coding: utf-8 -*-
"""
.. _tut-epochs-metadata:
===========================
Working with Epoch metadata
===========================
This tutorial shows how to add metadata to `~mne.Epochs` objects, and
how to use :ref:`Pandas query strings <pandas:indexing.query>` to select and
plot epochs based on metadata properties.
For this tutorial we'll use a different dataset than usual: the
:ref:`kiloword-dataset`, which contains EEG data averaged across 75 subjects
who were performing a lexical decision (word/non-word) task. The data is in
`~mne.Epochs` format, with each epoch representing the response to a
different stimulus (word). As usual we'll start by importing the modules we
need and loading the data:
"""
# %%
import numpy as np
import pandas as pd
import mne
kiloword_data_folder = mne.datasets.kiloword.data_path()
kiloword_data_file = kiloword_data_folder / 'kword_metadata-epo.fif'
epochs = mne.read_epochs(kiloword_data_file)
# %%
# Viewing ``Epochs`` metadata
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# .. sidebar:: Restrictions on metadata DataFrames
#
# Metadata dataframes are less flexible than typical
# :class:`Pandas DataFrames <pandas.DataFrame>`. For example, the allowed
# data types are restricted to strings, floats, integers, or booleans;
# and the row labels are always integers corresponding to epoch numbers.
# Other capabilities of :class:`DataFrames <pandas.DataFrame>` such as
# :class:`hierarchical indexing <pandas.MultiIndex>` are possible while the
# `~mne.Epochs` object is in memory, but will not survive saving and
# reloading the `~mne.Epochs` object to/from disk.
#
# The metadata attached to `~mne.Epochs` objects is stored as a
# :class:`pandas.DataFrame` containing one row for each epoch. The columns of
# this :class:`~pandas.DataFrame` can contain just about any information you
# want to store about each epoch; in this case, the metadata encodes
# information about the stimulus seen on each trial, including properties of
# the visual word form itself (e.g., ``NumberOfLetters``, ``VisualComplexity``)
# as well as properties of what the word means (e.g., its ``Concreteness``) and
# its prominence in the English lexicon (e.g., ``WordFrequency``). Here are all
# the variables; note that in a Jupyter notebook, viewing a
# :class:`pandas.DataFrame` gets rendered as an HTML table instead of the
# normal Python output block:
epochs.metadata
# %%
# Viewing the metadata values for a given epoch and metadata variable is done
# using any of the :ref:`Pandas indexing <pandas:/reference/indexing.rst>`
# methods such as :obj:`~pandas.DataFrame.loc`,
# :obj:`~pandas.DataFrame.iloc`, :obj:`~pandas.DataFrame.at`,
# and :obj:`~pandas.DataFrame.iat`. Because the
# index of the dataframe is the integer epoch number, the name- and index-based
# selection methods will work similarly for selecting rows, except that
# name-based selection (with :obj:`~pandas.DataFrame.loc`) is inclusive of the
# endpoint:
print('Name-based selection with .loc')
print(epochs.metadata.loc[2:4])
print('\nIndex-based selection with .iloc')
print(epochs.metadata.iloc[2:4])
# %%
# Modifying the metadata
# ^^^^^^^^^^^^^^^^^^^^^^
#
# Like any :class:`pandas.DataFrame`, you can modify the data or add columns as
# needed. Here we convert the ``NumberOfLetters`` column from :class:`float` to
# :class:`integer <int>` data type, and add a :class:`boolean <bool>` column
# that arbitrarily divides the variable ``VisualComplexity`` into high and low
# groups.
epochs.metadata['NumberOfLetters'] = \
epochs.metadata['NumberOfLetters'].map(int)
epochs.metadata['HighComplexity'] = epochs.metadata['VisualComplexity'] > 65
epochs.metadata.head()
# %%
# Selecting epochs using metadata queries
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# All `~mne.Epochs` objects can be subselected by event name, index, or
# :term:`slice` (see :ref:`tut-section-subselect-epochs`). But
# `~mne.Epochs` objects with metadata can also be queried using
# :ref:`Pandas query strings <pandas:indexing.query>` by passing the query
# string just as you would normally pass an event name. For example:
print(epochs['WORD.str.startswith("dis")'])
# %%
# This capability uses the :meth:`pandas.DataFrame.query` method under the
# hood, so you can check out the documentation of that method to learn how to
# format query strings. Here's another example:
print(epochs['Concreteness > 6 and WordFrequency < 1'])
# %%
# Note also that traditional epochs subselection by condition name still works;
# MNE-Python will try the traditional method first before falling back on rich
# metadata querying.
epochs['solenoid'].plot_psd()
# %%
# One use of the Pandas query string approach is to select specific words for
# plotting:
words = ['typhoon', 'bungalow', 'colossus', 'drudgery', 'linguist', 'solenoid']
epochs['WORD in {}'.format(words)].plot(n_channels=29)
# %%
# Notice that in this dataset, each "condition" (A.K.A., each word) occurs only
# once, whereas with the :ref:`sample-dataset` dataset each condition (e.g.,
# "auditory/left", "visual/right", etc) occurred dozens of times. This makes
# the Pandas querying methods especially useful when you want to aggregate
# epochs that have different condition names but that share similar stimulus
# properties. For example, here we group epochs based on the number of letters
# in the stimulus word, and compare the average signal at electrode ``Pz`` for
# each group:
evokeds = dict()
query = 'NumberOfLetters == {}'
for n_letters in epochs.metadata['NumberOfLetters'].unique():
evokeds[str(n_letters)] = epochs[query.format(n_letters)].average()
# sphinx_gallery_thumbnail_number = 3
mne.viz.plot_compare_evokeds(evokeds, cmap=('word length', 'viridis'),
picks='Pz')
# %%
# Metadata can also be useful for sorting the epochs in an image plot. For
# example, here we order the epochs based on word frequency to see if there's a
# pattern to the latency or intensity of the response:
sort_order = np.argsort(epochs.metadata['WordFrequency'])
epochs.plot_image(order=sort_order, picks='Pz')
# %%
# Although there's no obvious relationship in this case, such analyses may be
# useful for metadata variables that more directly index the time course of
# stimulus processing (such as reaction time).
#
#
# Adding metadata to an ``Epochs`` object
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# You can add a metadata :class:`~pandas.DataFrame` to any
# `~mne.Epochs` object (or replace existing metadata) simply by
# assigning to the :attr:`~mne.Epochs.metadata` attribute:
new_metadata = pd.DataFrame(data=['foo'] * len(epochs), columns=['bar'],
index=range(len(epochs)))
epochs.metadata = new_metadata
epochs.metadata.head()
# %%
# You can remove metadata from an `~mne.Epochs` object by setting its
# metadata to ``None``:
epochs.metadata = None
| bsd-3-clause |
herilalaina/scikit-learn | sklearn/feature_selection/tests/test_mutual_info.py | 27 | 6913 | from __future__ import division
import numpy as np
from numpy.testing import run_module_suite
from scipy.sparse import csr_matrix
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_equal, assert_almost_equal,
assert_false, assert_raises, assert_equal,
assert_greater)
from sklearn.feature_selection.mutual_info_ import (
mutual_info_regression, mutual_info_classif, _compute_mi)
def test_compute_mi_dd():
# In discrete case computations are straightforward and can be done
# by hand on given vectors.
x = np.array([0, 1, 1, 0, 0])
y = np.array([1, 0, 0, 0, 1])
H_x = H_y = -(3/5) * np.log(3/5) - (2/5) * np.log(2/5)
H_xy = -1/5 * np.log(1/5) - 2/5 * np.log(2/5) - 2/5 * np.log(2/5)
I_xy = H_x + H_y - H_xy
assert_almost_equal(_compute_mi(x, y, True, True), I_xy)
def test_compute_mi_cc():
# For two continuous variables a good approach is to test on bivariate
# normal distribution, where mutual information is known.
# Mean of the distribution, irrelevant for mutual information.
mean = np.zeros(2)
# Setup covariance matrix with correlation coeff. equal 0.5.
sigma_1 = 1
sigma_2 = 10
corr = 0.5
cov = np.array([
[sigma_1**2, corr * sigma_1 * sigma_2],
[corr * sigma_1 * sigma_2, sigma_2**2]
])
# True theoretical mutual information.
I_theory = (np.log(sigma_1) + np.log(sigma_2) -
0.5 * np.log(np.linalg.det(cov)))
rng = check_random_state(0)
Z = rng.multivariate_normal(mean, cov, size=1000)
x, y = Z[:, 0], Z[:, 1]
# Theory and computed values won't be very close, assert that the
# first figures after decimal point match.
for n_neighbors in [3, 5, 7]:
I_computed = _compute_mi(x, y, False, False, n_neighbors)
assert_almost_equal(I_computed, I_theory, 1)
def test_compute_mi_cd():
# To test define a joint distribution as follows:
# p(x, y) = p(x) p(y | x)
# X ~ Bernoulli(p)
# (Y | x = 0) ~ Uniform(-1, 1)
# (Y | x = 1) ~ Uniform(0, 2)
# Use the following formula for mutual information:
# I(X; Y) = H(Y) - H(Y | X)
# Two entropies can be computed by hand:
# H(Y) = -(1-p)/2 * ln((1-p)/2) - p/2*log(p/2) - 1/2*log(1/2)
# H(Y | X) = ln(2)
# Now we need to implement sampling from out distribution, which is
# done easily using conditional distribution logic.
n_samples = 1000
rng = check_random_state(0)
for p in [0.3, 0.5, 0.7]:
x = rng.uniform(size=n_samples) > p
y = np.empty(n_samples)
mask = x == 0
y[mask] = rng.uniform(-1, 1, size=np.sum(mask))
y[~mask] = rng.uniform(0, 2, size=np.sum(~mask))
I_theory = -0.5 * ((1 - p) * np.log(0.5 * (1 - p)) +
p * np.log(0.5 * p) + np.log(0.5)) - np.log(2)
# Assert the same tolerance.
for n_neighbors in [3, 5, 7]:
I_computed = _compute_mi(x, y, True, False, n_neighbors)
assert_almost_equal(I_computed, I_theory, 1)
def test_compute_mi_cd_unique_label():
# Test that adding unique label doesn't change MI.
n_samples = 100
x = np.random.uniform(size=n_samples) > 0.5
y = np.empty(n_samples)
mask = x == 0
y[mask] = np.random.uniform(-1, 1, size=np.sum(mask))
y[~mask] = np.random.uniform(0, 2, size=np.sum(~mask))
mi_1 = _compute_mi(x, y, True, False)
x = np.hstack((x, 2))
y = np.hstack((y, 10))
mi_2 = _compute_mi(x, y, True, False)
assert_equal(mi_1, mi_2)
# We are going test that feature ordering by MI matches our expectations.
def test_mutual_info_classif_discrete():
X = np.array([[0, 0, 0],
[1, 1, 0],
[2, 0, 1],
[2, 0, 1],
[2, 0, 1]])
y = np.array([0, 1, 2, 2, 1])
# Here X[:, 0] is the most informative feature, and X[:, 1] is weakly
# informative.
mi = mutual_info_classif(X, y, discrete_features=True)
assert_array_equal(np.argsort(-mi), np.array([0, 2, 1]))
def test_mutual_info_regression():
# We generate sample from multivariate normal distribution, using
# transformation from initially uncorrelated variables. The zero
# variables after transformation is selected as the target vector,
# it has the strongest correlation with the variable 2, and
# the weakest correlation with the variable 1.
T = np.array([
[1, 0.5, 2, 1],
[0, 1, 0.1, 0.0],
[0, 0.1, 1, 0.1],
[0, 0.1, 0.1, 1]
])
cov = T.dot(T.T)
mean = np.zeros(4)
rng = check_random_state(0)
Z = rng.multivariate_normal(mean, cov, size=1000)
X = Z[:, 1:]
y = Z[:, 0]
mi = mutual_info_regression(X, y, random_state=0)
assert_array_equal(np.argsort(-mi), np.array([1, 2, 0]))
def test_mutual_info_classif_mixed():
# Here the target is discrete and there are two continuous and one
# discrete feature. The idea of this test is clear from the code.
rng = check_random_state(0)
X = rng.rand(1000, 3)
X[:, 1] += X[:, 0]
y = ((0.5 * X[:, 0] + X[:, 2]) > 0.5).astype(int)
X[:, 2] = X[:, 2] > 0.5
mi = mutual_info_classif(X, y, discrete_features=[2], n_neighbors=3,
random_state=0)
assert_array_equal(np.argsort(-mi), [2, 0, 1])
for n_neighbors in [5, 7, 9]:
mi_nn = mutual_info_classif(X, y, discrete_features=[2],
n_neighbors=n_neighbors, random_state=0)
# Check that the continuous values have an higher MI with greater
# n_neighbors
assert_greater(mi_nn[0], mi[0])
assert_greater(mi_nn[1], mi[1])
# The n_neighbors should not have any effect on the discrete value
# The MI should be the same
assert_equal(mi_nn[2], mi[2])
def test_mutual_info_options():
X = np.array([[0, 0, 0],
[1, 1, 0],
[2, 0, 1],
[2, 0, 1],
[2, 0, 1]], dtype=float)
y = np.array([0, 1, 2, 2, 1], dtype=float)
X_csr = csr_matrix(X)
for mutual_info in (mutual_info_regression, mutual_info_classif):
assert_raises(ValueError, mutual_info_regression, X_csr, y,
discrete_features=False)
mi_1 = mutual_info(X, y, discrete_features='auto', random_state=0)
mi_2 = mutual_info(X, y, discrete_features=False, random_state=0)
mi_3 = mutual_info(X_csr, y, discrete_features='auto',
random_state=0)
mi_4 = mutual_info(X_csr, y, discrete_features=True,
random_state=0)
assert_array_equal(mi_1, mi_2)
assert_array_equal(mi_3, mi_4)
assert_false(np.allclose(mi_1, mi_3))
if __name__ == '__main__':
run_module_suite()
| bsd-3-clause |
xzturn/tensorflow | tensorflow/python/distribute/distribute_lib_test.py | 2 | 21712 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test DistributionStrategy, ReplicaContext, and supporting APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.autograph.core import converter_testing
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import reduce_util
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.util import nest
class _TestReplicaContext(distribute_lib.ReplicaContext):
def merge_call(self, fn, *args, **kwargs):
return kwargs["test_arg"]
def _get_test_variable(name, synchronization, aggregation):
return {
"name": name,
"synchronization": synchronization,
"aggregation": aggregation
}
def _test_input_fn(input_context):
del input_context
return dataset_ops.DatasetV2.from_tensors(1.).repeat()
class _TestStrategy(distribute_lib.Strategy):
def __init__(self):
super(_TestStrategy, self).__init__(_TestExtended(self))
class _TestExtended(distribute_lib.StrategyExtendedV1):
def __init__(self, distribute):
super(_TestExtended, self).__init__(distribute)
worker_device_pairs = [("", ["/device:CPU:0"])]
self._input_workers = input_lib.InputWorkers(worker_device_pairs)
def _call_for_each_replica(self, fn, args, kwargs):
with _TestReplicaContext(
self._container_strategy(),
replica_id_in_sync_group=constant_op.constant(0, dtypes.int32)):
return fn(*args, **kwargs)
def _create_variable(self, next_creator, **kwargs):
return _get_test_variable(kwargs["name"], kwargs["synchronization"],
kwargs["aggregation"])
def _make_input_fn_iterator(
self,
input_fn,
replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):
return input_lib.InputFunctionIterator(input_fn, self._input_workers,
[distribute_lib.InputContext()],
self._container_strategy())
def _experimental_distribute_datasets_from_function(self, dataset_fn):
return dataset_fn(distribute_lib.InputContext())
def _local_results(self, value):
return (value,)
def _reduce_to(self, reduce_op, value, destinations, experimental_hints):
del reduce_op, destinations, experimental_hints
return value
def _experimental_make_numpy_dataset(self, numpy_input, session):
del session
return dataset_ops.DatasetV2.from_tensor_slices(numpy_input)
def _experimental_run_steps_on_iterator(self, fn, iterator, iterations,
initial_loop_values=None):
# TODO(tomhennigan) This is missing many things (e.g. ctx.run_op).
ctx = input_lib.MultiStepContext()
for _ in range(iterations):
fn(ctx, iterator.get_next())
return ctx
def _update(self, var, fn, args, kwargs, group):
# The implementations of _update() and _update_non_slot() are identical
# except _update() passes `var` as the first argument to `fn()`.
return self._update_non_slot(var, fn, (var,) + tuple(args), kwargs, group)
def _update_non_slot(self, colocate_with, fn, args, kwargs, group):
del colocate_with
result = fn(*args, **kwargs)
if group:
return result
else:
return nest.map_structure(self._unwrap, result)
def _assert_in_default_state(t):
t.assertIs(ds_context._get_default_replica_context(),
ds_context.get_replica_context())
t.assertIs(None, ds_context.get_cross_replica_context())
t.assertFalse(ds_context.in_cross_replica_context())
t.assertIs(ds_context._get_default_strategy(), ds_context.get_strategy())
t.assertFalse(ds_context.has_strategy())
def _run_in_and_out_of_scope(unbound_test_method):
def wrapper(test_case):
dist = _TestStrategy()
# Running in the default (replica) scope should be supported.
_assert_in_default_state(test_case)
unbound_test_method(test_case, dist)
# As well as running in the strategy scope.
with dist.scope():
unbound_test_method(test_case, dist)
_assert_in_default_state(test_case)
# When run under a different strategy the test method should fail.
another_strategy = _TestStrategy()
msg = "Mixing different .*Strategy objects"
with test_case.assertRaisesRegexp(RuntimeError, msg):
with another_strategy.scope():
unbound_test_method(test_case, dist)
return wrapper
class TestStrategyTest(test.TestCase):
def testCallForEachReplica(self):
_assert_in_default_state(self)
dist = _TestStrategy()
def run_fn():
replica_context = ds_context.get_replica_context()
self.assertTrue(replica_context is not None)
self.assertIs(None, ds_context.get_cross_replica_context())
self.assertFalse(ds_context.in_cross_replica_context())
self.assertTrue(ds_context.has_strategy())
self.assertIs(dist, ds_context.get_strategy())
self.assertEqual("foo", replica_context.merge_call(None, test_arg="foo"))
expected_value = _get_test_variable(
"bar", variable_scope.VariableSynchronization.AUTO,
variable_scope.VariableAggregation.NONE)
self.assertDictEqual(expected_value,
variable_scope.variable(1.0, name="bar"))
dist.extended.call_for_each_replica(run_fn)
with dist.scope():
dist.extended.call_for_each_replica(run_fn)
_assert_in_default_state(self)
def testScope(self):
_assert_in_default_state(self)
dist = _TestStrategy()
with dist.scope():
self.assertIs(None, ds_context.get_replica_context())
self.assertIs(dist, ds_context.get_cross_replica_context())
self.assertTrue(ds_context.in_cross_replica_context())
self.assertTrue(ds_context.has_strategy())
self.assertIs(dist, ds_context.get_strategy())
expected_value = _get_test_variable(
"baz", variable_scope.VariableSynchronization.AUTO,
variable_scope.VariableAggregation.NONE)
self.assertDictEqual(expected_value,
variable_scope.variable(1.0, name="baz"))
_assert_in_default_state(self)
def testScopeDeviceNestingError(self):
_assert_in_default_state(self)
dist = _TestStrategy()
# Open a device scope with dist.scope().
dist.extended._default_device = "/device:GPU:0"
scope = dist.scope()
scope.__enter__()
self.assertIs(dist, ds_context.get_strategy())
with ops.device("/device:CPU:0"):
with self.assertRaisesRegexp(RuntimeError, "Device scope nesting error"):
scope.__exit__(None, None, None)
scope.__exit__(None, None, None)
_assert_in_default_state(self)
def testScopeVarCreatorNestingError(self):
def creator(next_creator, **kwargs):
return next_creator(**kwargs)
_assert_in_default_state(self)
dist = _TestStrategy()
scope = dist.scope()
scope.__enter__()
self.assertIs(dist, ds_context.get_strategy())
with variable_scope.variable_creator_scope(creator):
with self.assertRaisesRegexp(RuntimeError,
"Variable creator scope nesting error"):
scope.__exit__(None, None, None)
scope.__exit__(None, None, None)
_assert_in_default_state(self)
def testScopeVarScopeNestingError(self):
# We create a new graph here to simplify clean-up, since the error
# we are triggering happens in the middle of scope.__exit__() and
# leaves us in a weird state.
with ops.Graph().as_default():
_assert_in_default_state(self)
dist = _TestStrategy()
scope = dist.scope()
scope.__enter__()
self.assertIs(dist, ds_context.get_strategy())
with variable_scope.variable_scope("AA"):
with self.assertRaisesRegexp(RuntimeError,
"Variable scope nesting error"):
scope.__exit__(None, None, None)
_assert_in_default_state(self)
def testSettingSynchronizationAndAggregation(self):
_assert_in_default_state(self)
dist = _TestStrategy()
with dist.scope():
expected_value = _get_test_variable(
"baz", variable_scope.VariableSynchronization.ON_WRITE,
variable_scope.VariableAggregation.MEAN)
self.assertDictEqual(
expected_value,
variable_scope.variable(
1.0,
name="baz",
synchronization=variable_scope.VariableSynchronization.ON_WRITE,
aggregation=variable_scope.VariableAggregation.MEAN))
_assert_in_default_state(self)
def testSetStrategy(self):
_assert_in_default_state(self)
dist = _TestStrategy()
dist2 = _TestStrategy()
ds_context.experimental_set_strategy(dist)
self.assertIs(None, ds_context.get_replica_context())
self.assertIs(dist, ds_context.get_cross_replica_context())
self.assertTrue(ds_context.in_cross_replica_context())
self.assertTrue(ds_context.has_strategy())
self.assertIs(dist, ds_context.get_strategy())
expected_value = _get_test_variable(
"baz", variable_scope.VariableSynchronization.AUTO,
variable_scope.VariableAggregation.NONE)
self.assertDictEqual(expected_value,
variable_scope.variable(1.0, name="baz"))
ds_context.experimental_set_strategy(dist2)
self.assertIs(dist2, ds_context.get_strategy())
ds_context.experimental_set_strategy(None)
_assert_in_default_state(self)
def testSetStrategyInScope(self):
_assert_in_default_state(self)
dist = _TestStrategy()
with dist.scope():
with self.assertRaisesRegexp(
RuntimeError,
"Must not be called inside a `tf.distribute.Strategy` scope"):
ds_context.experimental_set_strategy(_TestStrategy())
with self.assertRaisesRegexp(
RuntimeError,
"Must not be called inside a `tf.distribute.Strategy` scope"):
ds_context.experimental_set_strategy(dist)
with self.assertRaisesRegexp(
RuntimeError,
"Must not be called inside a `tf.distribute.Strategy` scope"):
ds_context.experimental_set_strategy(None)
_assert_in_default_state(self)
def testSameScopeNesting(self):
_assert_in_default_state(self)
dist = _TestStrategy()
scope_a = dist.scope()
with scope_a:
self.assertIs(dist, ds_context.get_strategy())
scope_b = dist.scope()
with scope_b:
self.assertIs(dist, ds_context.get_strategy())
with scope_a:
self.assertIs(dist, ds_context.get_strategy())
self.assertIs(dist, ds_context.get_strategy())
self.assertIs(dist, ds_context.get_strategy())
dist2 = _TestStrategy()
scope2 = dist2.scope()
with self.assertRaisesRegexp(
RuntimeError,
"Mixing different tf.distribute.Strategy objects"):
with scope2:
pass
_assert_in_default_state(self)
with scope_b:
self.assertIs(dist, ds_context.get_strategy())
_assert_in_default_state(self)
@_run_in_and_out_of_scope
def testMakeInputFnIterator(self, dist):
self.assertIsNotNone(dist.make_input_fn_iterator(_test_input_fn))
@_run_in_and_out_of_scope
def testReduce(self, dist):
x = constant_op.constant(1.)
x_r = dist.reduce(reduce_util.ReduceOp.MEAN, x, axis=None)
self.assertEqual(self.evaluate(x), self.evaluate(x_r))
def testReductions_acceptStringOps(self):
dist = _TestStrategy()
for op in ("mean", "MEAN", "sum", "SUM"):
x = constant_op.constant(1.)
y = constant_op.constant(1.)
x_r = dist.reduce(op, x, axis=None)
self.assertEqual(self.evaluate(x), self.evaluate(x_r))
x_r = dist.extended.reduce_to(op, x, "/CPU:0")
self.assertEqual(self.evaluate(x), self.evaluate(x_r))
x_r, y_r = dist.extended.batch_reduce_to(op,
((x, "/CPU:0"), (y, "/CPU:0")))
self.assertEqual(self.evaluate(x), self.evaluate(x_r))
self.assertEqual(self.evaluate(y), self.evaluate(y_r))
@_run_in_and_out_of_scope
def testExperimentalMakeNumpyDataset(self, dist):
numpy_input = np.ones([10], dtype=np.float32)
dataset = dist.experimental_make_numpy_dataset(numpy_input)
self.assertEqual(
self.evaluate(dataset.reduce(0., lambda a, b: a + b)), 10.)
@_run_in_and_out_of_scope
def testExperimentalRunStepsOnIterator(self, dist):
all_inputs = []
dataset = dataset_ops.Dataset.from_tensors(1.).repeat()
dist.extended.experimental_run_steps_on_iterator(
lambda _, inputs: all_inputs.append(self.evaluate(inputs)),
dataset_ops.make_one_shot_iterator(dataset))
self.assertEqual(all_inputs, [1.])
@_run_in_and_out_of_scope
def testReduceTo(self, dist):
x = constant_op.constant(1.)
x_r = dist.extended.reduce_to(reduce_util.ReduceOp.MEAN, x, "/CPU:0")
self.assertEqual(self.evaluate(x), self.evaluate(x_r))
@_run_in_and_out_of_scope
def testBatchReduceTo(self, dist):
x = constant_op.constant(1.)
y = constant_op.constant(1.)
x_r, y_r = dist.extended.batch_reduce_to(reduce_util.ReduceOp.MEAN,
((x, "/CPU:0"), (y, "/CPU:0")))
self.assertEqual(self.evaluate(x), self.evaluate(x_r))
self.assertEqual(self.evaluate(y), self.evaluate(y_r))
@_run_in_and_out_of_scope
def testUpdate(self, dist):
with dist.scope():
v = variables.Variable(1.)
t = constant_op.constant(2.)
def assign_fn(vv, tt):
self.assertIs(vv, v)
self.assertIs(tt, t)
dist.extended.update(v, assign_fn, (t,))
@_run_in_and_out_of_scope
def testUpdateAutoGraph(self, dist):
with dist.scope():
v = variables.Variable(1.)
t = constant_op.constant(2.)
def assign_fn(unused_vv, unused_tt):
self.assertTrue(converter_testing.is_inside_generated_code())
@def_function.function # AutoGraph is default-on only within tf.function
def test_fn():
dist.extended.update(v, assign_fn, (t,))
test_fn()
@_run_in_and_out_of_scope
def testUpdateNonSlot(self, dist):
t = constant_op.constant(2.)
update_calls = []
dist.extended.update_non_slot(t, lambda: update_calls.append(1))
self.assertEqual(len(update_calls), 1)
@_run_in_and_out_of_scope
def testUpdateNonSlotAutoGraph(self, dist):
t = constant_op.constant(2.)
def update_fn():
self.assertTrue(converter_testing.is_inside_generated_code())
@def_function.function # AutoGraph is default-on only within tf.function
def test_fn():
dist.extended.update_non_slot(t, update_fn)
test_fn()
# _TestStrategy2 is like _TestStrategy, except it doesn't change variable
# creation.
class _TestStrategy2(distribute_lib.Strategy):
def __init__(self):
super(_TestStrategy2, self).__init__(_TestExtended2(self))
class _TestExtended2(_TestExtended):
def _create_variable(self, next_creator, **kwargs):
return next_creator(**kwargs)
class DefaultDistributionStrategyTest(test.TestCase, parameterized.TestCase):
def testMergeCall(self):
_assert_in_default_state(self)
def merge_fn(dist, s):
self.assertIs(ds_context._get_default_strategy(), dist)
self.assertIs(None, ds_context.get_replica_context())
self.assertIs(dist, ds_context.get_cross_replica_context())
self.assertTrue(ds_context.in_cross_replica_context())
self.assertIs(dist, ds_context.get_strategy())
self.assertFalse(ds_context.has_strategy())
return "foo_" + s
replica_ctx = ds_context.get_replica_context()
self.assertIs(ds_context._get_default_replica_context(), replica_ctx)
self.assertEqual("foo_bar", replica_ctx.merge_call(merge_fn, args=("bar",)))
_assert_in_default_state(self)
def testMergeCallAutoGraph(self):
_assert_in_default_state(self)
def merge_fn(_, s):
self.assertTrue(converter_testing.is_inside_generated_code())
return s
@def_function.function # AutoGraph is default-on only within tf.function
def test_fn():
replica_ctx = ds_context.get_replica_context()
replica_ctx.merge_call(merge_fn, args=("bar",))
test_fn()
def testScopeMostlyNoOp(self):
_assert_in_default_state(self)
test_strategy = _TestStrategy2()
with test_strategy.scope():
variable_scope.variable(1.0, name="before")
default_strategy = ds_context._get_default_strategy()
scope = default_strategy.scope()
with scope:
_assert_in_default_state(self)
with test_strategy.scope():
with self.assertRaisesRegexp(
RuntimeError, "Mixing different tf.distribute.Strategy objects"):
variable_scope.variable(1.0, name="error")
with scope:
_assert_in_default_state(self)
with test_strategy.scope():
with self.assertRaisesRegexp(
RuntimeError, "Mixing different tf.distribute.Strategy objects"):
variable_scope.variable(1.0, name="also_error")
_assert_in_default_state(self)
_assert_in_default_state(self)
with test_strategy.scope():
variable_scope.variable(1.0, name="after")
def testExperimentalRunV2(self):
default_strategy = ds_context._get_default_strategy()
dataset = dataset_ops.Dataset.range(10).batch(2)
iterator = default_strategy.extended._make_dataset_iterator(dataset)
next_val = iterator.get_next()
def train_step(input_data):
return input_data
for _ in range(2):
default_strategy.experimental_run_v2(train_step, args=(next_val,))
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def testDistributedDatasets(self):
default_strategy = ds_context._get_default_strategy()
if context.executing_eagerly():
dataset_fn = lambda _: dataset_ops.DatasetV2.range(10).batch(2)
dist_dataset = default_strategy.experimental_distribute_dataset(
dataset_fn(distribute_lib.InputContext()))
next_val = next(iter(dist_dataset))
else:
dataset_fn = lambda _: dataset_ops.DatasetV1.range(10).batch(2)
dist_dataset = default_strategy.experimental_distribute_dataset(
dataset_fn(distribute_lib.InputContext()))
iterator = dist_dataset.make_initializable_iterator()
self.evaluate(iterator.initializer)
next_val = iterator.get_next()
self.assertAllEqual([0, 1], self.evaluate(next_val))
@combinations.generate(combinations.combine(mode=["graph", "eager"]))
def testDistributedDatasetsFromFunction(self):
default_strategy = ds_context._get_default_strategy()
if context.executing_eagerly():
dataset_fn = lambda _: dataset_ops.DatasetV2.range(10).batch(2)
dist_dataset_from_func = \
default_strategy.experimental_distribute_datasets_from_function(
dataset_fn)
next_val = next(iter(dist_dataset_from_func))
self.assertAllEqual([0, 1], self.evaluate(next_val))
else:
dataset_fn = lambda _: dataset_ops.DatasetV2.range(10).batch(2)
dist_dataset_from_func = \
default_strategy.experimental_distribute_datasets_from_function(
dataset_fn)
dataset_ops.make_initializable_iterator(dist_dataset_from_func)
class InputContextTest(test.TestCase):
def testProperties(self):
input_context = distribute_lib.InputContext(
num_input_pipelines=2, input_pipeline_id=1, num_replicas_in_sync=6)
self.assertEqual(6, input_context.num_replicas_in_sync)
self.assertEqual(1, input_context.input_pipeline_id)
self.assertEqual(2, input_context.num_input_pipelines)
def testPerReplicaBatchSize(self):
input_context = distribute_lib.InputContext(
num_input_pipelines=2, input_pipeline_id=1, num_replicas_in_sync=6)
self.assertEqual(2, input_context.get_per_replica_batch_size(12))
with self.assertRaises(ValueError):
input_context.get_per_replica_batch_size(13)
def testStr(self):
input_context = distribute_lib.InputContext(
num_input_pipelines=1, input_pipeline_id=0, num_replicas_in_sync=42)
self.assertEqual(
"tf.distribute.InputContext(input pipeline id 0, total: 1)",
str(input_context))
input_context = distribute_lib.InputContext(
num_input_pipelines=3, input_pipeline_id=1, num_replicas_in_sync=42)
self.assertEqual(
"tf.distribute.InputContext(input pipeline id 1, total: 3)",
str(input_context))
if __name__ == "__main__":
test.main()
| apache-2.0 |
nhuntwalker/astroML | astroML/datasets/sdss_specgals.py | 4 | 6586 | from __future__ import print_function, division
import os
import numpy as np
from . import get_data_home
from .tools import download_with_progress_bar
DATA_URL = ("http://www.astro.washington.edu/users/ivezic/"
"DMbook/data/SDSSspecgalsDR8.fit")
def fetch_sdss_specgals(data_home=None, download_if_missing=True):
"""Loader for SDSS Galaxies with spectral information
Parameters
----------
data_home : optional, default=None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/astroML_data' subfolders.
download_if_missing : optional, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
data : recarray, shape = (327260,)
record array containing pipeline parameters
Notes
-----
These were compiled from the SDSS database using the following SQL query::
SELECT
G.ra, G.dec, S.mjd, S.plate, S.fiberID, --- basic identifiers
--- basic spectral data
S.z, S.zErr, S.rChi2, S.velDisp, S.velDispErr,
--- some useful imaging parameters
G.extinction_r, G.petroMag_r, G.psfMag_r, G.psfMagErr_r,
G.modelMag_u, modelMagErr_u, G.modelMag_g, modelMagErr_g,
G.modelMag_r, modelMagErr_r, G.modelMag_i, modelMagErr_i,
G.modelMag_z, modelMagErr_z, G.petroR50_r, G.petroR90_r,
--- line fluxes for BPT diagram and other derived spec. parameters
GSL.nii_6584_flux, GSL.nii_6584_flux_err, GSL.h_alpha_flux,
GSL.h_alpha_flux_err, GSL.oiii_5007_flux, GSL.oiii_5007_flux_err,
GSL.h_beta_flux, GSL.h_beta_flux_err, GSL.h_delta_flux,
GSL.h_delta_flux_err, GSX.d4000, GSX.d4000_err, GSE.bptclass,
GSE.lgm_tot_p50, GSE.sfr_tot_p50, G.objID, GSI.specObjID
INTO mydb.SDSSspecgalsDR8 FROM SpecObj S CROSS APPLY
dbo.fGetNearestObjEQ(S.ra, S.dec, 0.06) N, Galaxy G,
GalSpecInfo GSI, GalSpecLine GSL, GalSpecIndx GSX, GalSpecExtra GSE
WHERE N.objID = G.objID
AND GSI.specObjID = S.specObjID
AND GSL.specObjID = S.specObjID
AND GSX.specObjID = S.specObjID
AND GSE.specObjID = S.specObjID
--- add some quality cuts to get rid of obviously bad measurements
AND (G.petroMag_r > 10 AND G.petroMag_r < 18)
AND (G.modelMag_u-G.modelMag_r) > 0
AND (G.modelMag_u-G.modelMag_r) < 6
AND (modelMag_u > 10 AND modelMag_u < 25)
AND (modelMag_g > 10 AND modelMag_g < 25)
AND (modelMag_r > 10 AND modelMag_r < 25)
AND (modelMag_i > 10 AND modelMag_i < 25)
AND (modelMag_z > 10 AND modelMag_z < 25)
AND S.rChi2 < 2
AND (S.zErr > 0 AND S.zErr < 0.01)
AND S.z > 0.02
--- end of query ---
Examples
--------
>>> from astroML.datasets import fetch_sdss_specgals
>>> data = fetch_sdss_specgals()
>>> data.shape # number of objects in dataset
(661598,)
>>> data.names[:5] # first five column names
['ra', 'dec', 'mjd', 'plate', 'fiberID']
>>> print(data['ra'][:3]) # first three RA values
[ 146.71419105 146.74414186 146.62857334]
>>> print(data['dec'][:3]) # first three declination values
[-1.04127639 -0.6522198 -0.7651468 ]
"""
# fits is an optional dependency: don't import globally
from astropy.io import fits
data_home = get_data_home(data_home)
if not os.path.exists(data_home):
os.makedirs(data_home)
archive_file = os.path.join(data_home, os.path.basename(DATA_URL))
if not os.path.exists(archive_file):
if not download_if_missing:
raise IOError('data not present on disk. '
'set download_if_missing=True to download')
fitsdata = download_with_progress_bar(DATA_URL)
open(archive_file, 'wb').write(fitsdata)
hdulist = fits.open(archive_file)
return np.asarray(hdulist[1].data)
def fetch_great_wall(data_home=None, download_if_missing=True,
xlim=(-375, -175), ylim=(-300, 200)):
"""Get the 2D SDSS "Great Wall" distribution, following Cowan et al 2008
Parameters
----------
data_home : optional, default=None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/astroML_data' subfolders.
download_if_missing : optional, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
xlim, ylim : tuples or None
the limits in Mpc of the data: default values are the same as that
used for the plots in Cowan 2008. If set to None, no cuts will
be performed.
Returns
-------
data : ndarray, shape = (Ngals, 2)
grid of projected (x, y) locations of galaxies in Mpc
"""
# local imports so we don't need dependencies for loading module
from scipy.interpolate import interp1d
from ..cosmology import Cosmology
data = fetch_sdss_specgals(data_home, download_if_missing)
# cut to the part of the sky with the "great wall"
data = data[(data['dec'] > -7) & (data['dec'] < 7)]
data = data[(data['ra'] > 80) & (data['ra'] < 280)]
# do a redshift cut, following Cowan et al 2008
z = data['z']
data = data[(z > 0.01) & (z < 0.12)]
# use redshift to compute absolute r-band magnitude
cosmo = Cosmology(omegaM=0.27, omegaL=0.73, h=0.732)
# first sample the distance modulus on a grid
zgrid = np.linspace(min(data['z']), max(data['z']), 100)
mugrid = np.array([cosmo.mu(z) for z in zgrid])
f = interp1d(zgrid, mugrid)
mu = f(data['z'])
# do an absolute magnitude cut at -20
Mr = data['petroMag_r'] + data['extinction_r'] - mu
data = data[Mr < -21]
# compute distances in the equatorial plane
# first sample comoving distance
Dcgrid = np.array([cosmo.Dc(z) for z in zgrid])
f = interp1d(zgrid, Dcgrid)
dist = f(data['z'])
locs = np.vstack([dist * np.cos(data['ra'] * np.pi / 180.),
dist * np.sin(data['ra'] * np.pi / 180.)]).T
# cut on x and y limits if specified
if xlim is not None:
locs = locs[(locs[:, 0] > xlim[0]) & (locs[:, 0] < xlim[1])]
if ylim is not None:
locs = locs[(locs[:, 1] > ylim[0]) & (locs[:, 1] < ylim[1])]
return locs
| bsd-2-clause |
h2oai/h2o | py/testdir_single_jvm/test_GBM_mnist.py | 9 | 2885 | import unittest
import random, sys, time, re
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i, h2o_glm, h2o_util, h2o_rf, h2o_jobs, h2o_gbm
DO_CLASSIFICATION=True
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init(java_heap_GB=28)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GBM_mnist_fvec(self):
importFolderPath = "mnist"
csvFilename = "mnist_training.csv.gz"
timeoutSecs=1800
trialStart = time.time()
# PARSE train****************************************
trainKey = csvFilename + "_" + ".hex"
start = time.time()
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=importFolderPath + "/" + csvFilename, schema='put',
hex_key=trainKey, timeoutSecs=timeoutSecs)
elapsed = time.time() - start
print "parse end on ", csvFilename, 'took', elapsed, 'seconds',\
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
print "parse result:", parseResult['destination_key']
# GBM (train)****************************************
modelKey = "GBM_model"
params = {
'classification': 1, # faster?
'destination_key': modelKey,
'learn_rate': .1,
'ntrees': 3,
'max_depth': 8,
'min_rows': 1,
'response': 0, # this dataset has the response in the last col (0-9 to check)
# 'ignored_cols_by_name': range(200,784) # only use the first 200 for speed?
}
kwargs = params.copy()
timeoutSecs = 1800
#noPoll -> False when GBM finished
start = time.time()
GBMFirstResult = h2o_cmd.runGBM(parseResult=parseResult, noPoll=True, **kwargs)
h2o_jobs.pollStatsWhileBusy(timeoutSecs=1200, pollTimeoutSecs=120, retryDelaySecs=5)
elapsed = time.time() - start
print "GBM training completed in", elapsed, "seconds.", \
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
gbmTrainView = h2o_cmd.runGBMView(model_key=modelKey)
errsLast = gbmTrainView['gbm_model']['errs'][-1]
print "GBM 'errsLast'", errsLast
if DO_CLASSIFICATION:
cms = gbmTrainView['gbm_model']['cms']
cm = cms[-1]['_arr'] # use the last one
print "GBM cms[-1]['_predErr']:", cms[-1]['_predErr']
print "GBM cms[-1]['_classErr']:", cms[-1]['_classErr']
pctWrongTrain = h2o_gbm.pp_cm_summary(cm);
print "\nTrain\n==========\n"
print h2o_gbm.pp_cm(cm)
else:
print "GBMTrainView:", h2o.dump_json(gbmTrainView['gbm_model']['errs'])
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
ChristopherGS/sensor_readings | ML_Sandbox/rnn.py | 1 | 4918 | from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, TimeDistributedDense
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import LSTM, GRU
from keras.datasets import cifar10
from sklearn import cross_validation
import pandas as pd
from random import random
import numpy as np
from wrangle import prep
training_data = prep()
y = training_data['state'].values
X = training_data.drop(['state', 'index'], axis=1)
#ACCEL_Z GYRO_X GYRO_Y GYRO_Z rolling_median_x rolling_median_y rolling_median_z rolling_median_gx rolling_median_gy rolling_median_gz
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X, y, test_size=0.1)
x1 = X_train['ACCEL_X'].values
x2 = X_train['ACCEL_Y'].values
x3 = X_train['ACCEL_Z'].values
_x1 = X_test['ACCEL_X'].values
_x2 = X_test['ACCEL_Y'].values
_x3 = X_test['ACCEL_Z'].values
print X_train.shape
print y_train.shape
#======================================================
"""
model = Sequential()
model.add(Embedding(3, 16, input_length=3))
model.add(LSTM(output_dim=128, activation='sigmoid', inner_activation='hard_sigmoid'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='rmsprop')
X = np.column_stack([x1, x2, x3])
Y = np.column_stack([y_train])
model.fit(X, Y, batch_size=16, nb_epoch=5)
score, acc = model.evaluate(np.column_stack([_x1, _x2, _x3]), np.column_stack([y_test]),show_accuracy=True)
print('Test score:', score)
print('Test accuracy:', acc)
classification = model.predict_classes(np.column_stack([_x1, _x2, _x3]), verbose=1)
print classification
"""
#=========================================================
"""
print('Build model 2...')
model2 = Sequential()
model2.add(Embedding(3, 128, input_length=100))
model2.add(LSTM(128)) # try using a GRU instead, for fun
model2.add(Dropout(0.5))
model2.add(Dense(1))
model2.add(Activation('sigmoid'))
# try using different optimizers and different optimizer configs
model2.compile(loss='binary_crossentropy',
optimizer='adam',
class_mode="binary")
print("Train...")
X = np.column_stack([x1, x2, x3])
Y = np.column_stack([y_train])
_X = np.column_stack([_x1, _x2, _x3])
_Y = np.column_stack([y_test])
model2.fit(X, Y, batch_size=32, nb_epoch=5,
validation_data=(_X, _Y), show_accuracy=True)
score, acc = model2.evaluate(_X, _Y,
batch_size=32,
show_accuracy=True)
print('Test score:', score)
print('Test accuracy:', acc)
"""
#====================================================
"""
NOT SURE ABOUT TIMESTEPS
https://github.com/bigaidream/subsets_ml_cookbook/blob/d4e9e8def2068c83390257d0b5aed9072bf4ece6/dl/theano/theano_keras_sequence2sequence.md
n_in_out = 3
n_hidden = 100
n_samples = 2297
n_timesteps = 400
X = [len(X_train), n_timesteps, np.column_stack([x1, x2, x3])]
Y = np.column_stack([y_train])
_X = np.column_stack([_x1, _x2, _x3])
_Y = np.column_stack([y_test])
model3 = Sequential()
model3.add(GRU( n_hidden, input_dim = n_in_out, return_sequences=True))
model3.add(TimeDistributedDense(n_in_out, input_dim = n_hidden))
model3.compile(loss='mse', optimizer='rmsprop')
X_final = np.random.random((n_samples, n_timesteps, n_in))
Y_final = np.random.random((n_samples, n_timesteps, n_out))
model.fit(X, Y, nb_epoch=10, validation_data=(_X, _Y), show_accuracy=True)
"""
1)
An optimizer is one of the two arguments required for compiling a Keras model:
# pass optimizer by name: default parameters will be used
model.compile(loss='mean_squared_error', optimizer='sgd') #Stochastic gradient descent
2)
An objective function (or loss function, or optimization score function) is one of the two parameters required to compile a model
3) Keras has two models: Sequential, a linear stack of layers, and Graph, a directed acyclic graph of layers.
4) Activations can either be used through an Activation layer, or through the activation argument supported by all forward layers:
from keras.layers.core import Activation, Dense
model.add(Dense(64))
model.add(Activation('tanh'))
is equivalent to:
model.add(Dense(64, activation='tanh'))
5) Apply Dropout to the input. Dropout consists in randomly setting a fraction p of input units to 0 at each update during training time,
which helps prevent overfitting.
6) Dense
keras.layers.core.Dense(output_dim, init='glorot_uniform', activation='linear',
weights=None, W_regularizer=None, b_regularizer=None, activity_regularizer=None, W_constraint=None,
b_constraint=None, input_dim=None)
Just your regular fully connected NN layer.
7) One epoch consists of one full training cycle on the training set.
Once every sample in the set is seen, you start again - marking the beginning of the 2nd epoch.
8) To discuss: Should try GRU instead of LSTM
""" | bsd-3-clause |
kylepjohnson/ipython | public_talks/2016_10_26_harvard/scripts/3.1+Classification%2C+extract+features.py | 2 | 4646 | # This script is the same as 3.1 but uses tfidf instead and creates bigram bag of words
#
#
#
# feature table saved at:
# ~/cltk_data/user_data/tlg_bow.csv # 662MB
# model at:
# ~/cltk_data/user_data/tlg_bow_df_tfidf_vectorizer_features100000_ngrams2.pickle # 2.3GB
# coding: utf-8
# In[1]:
import datetime as dt
import os
import time
from cltk.corpus.greek.tlg.parse_tlg_indices import get_epithet_of_author
from cltk.corpus.greek.tlg.parse_tlg_indices import get_id_author
import pandas
from sklearn.externals import joblib
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
# # Make vectorizer
# In[26]:
def stream_lemmatized_files(corpus_dir):
# return all docs in a dir
user_dir = os.path.expanduser('~/cltk_data/user_data/' + corpus_dir)
files = os.listdir(user_dir)
for file in files:
filepath = os.path.join(user_dir, file)
with open(filepath) as fo:
#TODO rm words less the 3 chars long
yield file[3:-4], fo.read()
# In[3]:
t0 = dt.datetime.utcnow()
map_id_author = get_id_author()
df = pandas.DataFrame(columns=['id', 'author' 'text', 'epithet'])
for _id, text in stream_lemmatized_files('tlg_lemmatized_no_accents_no_stops'):
author = map_id_author[_id]
epithet = get_epithet_of_author(_id)
df = df.append({'id': _id, 'author': author, 'text': text, 'epithet': epithet}, ignore_index=True)
print(df.shape)
print('... finished in {}'.format(dt.datetime.utcnow() - t0))
print('Number of texts:', len(df))
# In[4]:
text_list = df['text'].tolist()
# make a list of short texts to drop
# For pres, get distributions of words per doc
short_text_drop_index = [index if len(text) > 500 else None for index, text in enumerate(text_list) ] # ~100 words
# In[5]:
t0 = dt.datetime.utcnow()
# TODO: Consdier using generator to CV http://stackoverflow.com/a/21600406
# time & size counts, w/ 50 texts:
# 0:01:15 & 202M @ ngram_range=(1, 3), min_df=2, max_features=500
# 0:00:26 & 80M @ ngram_range=(1, 2), analyzer='word', min_df=2, max_features=5000
# 0:00:24 & 81M @ ngram_range=(1, 2), analyzer='word', min_df=2, max_features=50000
# time & size counts, w/ 1823 texts:
# 0:02:18 & 46MB @ ngram_range=(1, 1), analyzer='word', min_df=2, max_features=500000
# 0:2:01 & 47 @ ngram_range=(1, 1), analyzer='word', min_df=2, max_features=1000000
# max features in the lemmatized data set: 551428
max_features = 100000
ngrams = 2
vectorizer = TfidfVectorizer(ngram_range=(1, ngrams), analyzer='word',
min_df=2, max_features=max_features)
term_document_matrix = vectorizer.fit_transform(text_list) # input is a list of strings, 1 per document
# save matrix
vector_fp = os.path.expanduser('~/cltk_data/user_data/tfidf_vectorizer_features{0}_ngrams{1}.pickle'.format(max_features, ngrams))
joblib.dump(term_document_matrix, vector_fp)
print('... finished in {}'.format(dt.datetime.utcnow() - t0))
# # Transform term matrix into feature table
# In[6]:
# Put BoW vectors into a new df
term_document_matrix = joblib.load(vector_fp) # scipy.sparse.csr.csr_matrix
# In[7]:
term_document_matrix.shape
# In[8]:
term_document_matrix_array = term_document_matrix.toarray()
# In[9]:
dataframe_bow = pandas.DataFrame(term_document_matrix_array, columns=vectorizer.get_feature_names())
# In[10]:
ids_list = df['id'].tolist()
# In[11]:
len(ids_list)
# In[12]:
#dataframe_bow.shape
# In[13]:
dataframe_bow['id'] = ids_list
# In[14]:
authors_list = df['author'].tolist()
dataframe_bow['author'] = authors_list
# In[15]:
epithets_list = df['epithet'].tolist()
dataframe_bow['epithet'] = epithets_list
# In[16]:
# For pres, give distribution of epithets, including None
#dataframe_bow['epithet']
# In[21]:
t0 = dt.datetime.utcnow()
# removes 334
#! remove rows whose epithet = None
# note on selecting none in pandas: http://stackoverflow.com/a/24489602
dataframe_bow = dataframe_bow[dataframe_bow.epithet.notnull()]
dataframe_bow.shape
print('... finished in {}'.format(dt.datetime.utcnow() - t0))
# In[22]:
t0 = dt.datetime.utcnow()
dataframe_bow.to_csv(os.path.expanduser('~/cltk_data/user_data/tlg_bow.csv'))
print('... finished in {}'.format(dt.datetime.utcnow() - t0))
# In[23]:
print('shape:', dataframe_bow.shape)
# In[24]:
#print(dataframe_bow.head(10))
# In[25]:
# write dataframe_bow to disk, for fast reuse while classifying
# 2.3G
fp_df = os.path.expanduser('~/cltk_data/user_data/tlg_bow_df_tfidf_vectorizer_features{0}_ngrams{1}.pickle'.format(max_features, ngrams))
joblib.dump(dataframe_bow, fp_df)
| mit |