repo_name
stringlengths 6
103
| path
stringlengths 5
191
| copies
stringlengths 1
4
| size
stringlengths 4
6
| content
stringlengths 986
970k
| license
stringclasses 15
values |
---|---|---|---|---|---|
Xaroth/libzfs-python | libzfs/zdataset.py | 1 | 5791 | from . import bindings
from .handle import LibZFSHandle
from .utils.conversion import boolean_t
libzfs = bindings.libzfs
ffi = bindings.ffi
zfs_type_t = bindings['zfs_type_t']
zprop_type_t = bindings['zprop_type_t']
zfs_prop_t = bindings['zfs_prop_t']
zprop_source_t = bindings['zprop_source_t']
ZFS_MAXNAMELEN = bindings['ZFS_MAXNAMELEN']
def _get_iterfunc(funcname, extra=False):
func = getattr(bindings.libzfs, funcname)
@LibZFSHandle.requires_refcount
@LibZFSHandle.auto
def _inner(self):
datasets = []
@ffi.callback('zfs_iter_f')
def _cb(hdl, arg=None):
ds = ZDataset(hdl)
datasets.append(ds)
return 0
args = [_cb, bindings.ffi.NULL]
if extra:
args.insert(0, boolean_t(False))
func(self.hdl, *args)
return datasets
_inner.__name__ = funcname
return property(_inner)
class ZDatasetProperties(dict):
_altnames = {}
def __repr__(self):
base = dict.__repr__(self)
return "<%s: %s>" % (self.__class__.__name__, base)
class ZDatasetPropSources(dict):
_altnames = {}
def __repr__(self):
base = dict.__repr__(self)
return "<%s: %s>" % (self.__class__.__name__, base)
class ZDataset(object):
_properties = None
_propertysources = None
_propertynames = None
children = _get_iterfunc('zfs_iter_children')
child_filesystems = _get_iterfunc('zfs_iter_filesystems')
child_snapshots = _get_iterfunc('zfs_iter_snapshots', True)
def reset_children(self):
self._zfs_iter_children = None
self._zfs_iter_filesystems = None
self._zfs_iter_snapshots = None
def __init__(self, hdl):
self._hdl = hdl
self._type = zfs_type_t(libzfs.zfs_get_type(hdl))
self._name = ffi.string(libzfs.zfs_get_name(hdl))
def __del__(self):
if hasattr(self, '_hdl'):
libzfs.zfs_close(self._hdl)
def __repr__(self):
return "<%s: %s: %r>" % (self.__class__.__name__, self._name, self._type)
@property
def type(self):
return self._type
@property
def name(self):
return self._name
@property
def guid(self):
return self.properties.get(zfs_prop_t.ZFS_PROP_GUID)
@property
def hdl(self):
return self._hdl
@LibZFSHandle.requires_refcount
@LibZFSHandle.auto
def refresh_properties(self):
self._properties = ZDatasetProperties()
self._propertysources = ZDatasetPropSources()
for prop in zfs_prop_t:
if prop >= zfs_prop_t.ZFS_NUM_PROPS:
continue
if not bool(libzfs.zfs_prop_valid_for_type(int(prop), int(self._type), boolean_t(False))):
continue
sourceholder = ffi.new('zprop_source_t *')
statbuf = ffi.new('char [%s]' % ZFS_MAXNAMELEN)
ptype = zprop_type_t(libzfs.zfs_prop_get_type(int(prop)))
value = None
if ptype == zprop_type_t.PROP_TYPE_NUMBER:
holder = ffi.new("uint64_t *")
res = libzfs.zfs_prop_get_numeric(self.hdl, int(prop), holder, sourceholder, statbuf, ZFS_MAXNAMELEN)
if res == 0:
value = int(holder[0])
else:
holder = ffi.new("char [%s]" % ZFS_MAXNAMELEN)
res = libzfs.zfs_prop_get(self.hdl, int(prop), holder, ZFS_MAXNAMELEN, sourceholder, statbuf,
ZFS_MAXNAMELEN, boolean_t(True))
if res == 0:
value = ffi.string(holder)
if prop not in ZDatasetProperties._altnames:
name = bindings.ffi.string(bindings.libzfs.zfs_prop_to_name(int(prop)))
ZDatasetProperties._altnames[prop] = name
ZDatasetPropSources._altnames[prop] = name
self._propertysources[prop] = zprop_source_t(sourceholder[0])
self._properties[prop] = value
@property
def properties(self):
if self._properties is None:
self.refresh_properties()
return self._properties
@property
def propertysources(self):
if self._propertysources is None:
self.refresh_properties()
return self._propertysources
@property
def propertynames(self):
if self._propertynames is None:
self.refresh_properties()
return self._propertynames
@classmethod
@LibZFSHandle.requires_refcount
def list(cls):
datasets = []
@ffi.callback('zfs_iter_f')
def _callback(handle, arg=None):
zpool = ZDataset(handle)
datasets.append(zpool)
return 0
with LibZFSHandle() as hdl:
libzfs.zfs_iter_root(hdl, _callback, ffi.NULL)
return datasets
@classmethod
@LibZFSHandle.requires_refcount
def get(cls, name=None, guid=None):
if guid:
guid = int(guid)
datasets = cls.list()
if name:
datasets = [dataset for dataset in datasets if dataset.name == name]
if guid:
datasets = [dataset for dataset in datasets if dataset.guid == guid]
if len(datasets) == 1:
return datasets[0]
raise KeyError("Could not find %s matching query" % cls.__name__)
@classmethod
@LibZFSHandle.requires_refcount
def open(cls, name, zfs_type):
try:
zfs_type = zfs_type_t(int(zfs_type))
except ValueError:
raise ValueError("Unknown zfs_type_t")
with LibZFSHandle() as hdl:
zhp = libzfs.zfs_open(hdl, name, zfs_type)
if zhp == ffi.NULL:
raise KeyError("Unknown dataset: %s" % name)
return ZDataset(zhp)
| mit |
DonBeo/scikit-learn | examples/mixture/plot_gmm_selection.py | 247 | 3223 | """
=================================
Gaussian Mixture Model Selection
=================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
print(__doc__)
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a mixture of Gaussians with EM
gmm = mixture.GMM(n_components=n_components, covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['k', 'r', 'g', 'b', 'c', 'm', 'y'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_,
color_iter)):
v, w = linalg.eigh(covar)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180 * angle / np.pi # convert to degrees
v *= 4
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
| bsd-3-clause |
jhaux/tensorflow | tensorflow/contrib/keras/python/keras/datasets/__init__.py | 57 | 1290 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras datasets: utilities for downloading and pre-processing common datasets.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.keras.python.keras.datasets import boston_housing
from tensorflow.contrib.keras.python.keras.datasets import cifar10
from tensorflow.contrib.keras.python.keras.datasets import cifar100
from tensorflow.contrib.keras.python.keras.datasets import imdb
from tensorflow.contrib.keras.python.keras.datasets import mnist
from tensorflow.contrib.keras.python.keras.datasets import reuters
| apache-2.0 |
tensorflow/tensorflow-experimental_link_static_libraries_once | tensorflow/python/data/kernel_tests/repeat_test.py | 7 | 7738 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.repeat()`."""
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.experimental.ops import random_access
from tensorflow.python.data.kernel_tests import checkpoint_test_base
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
class RepeatTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(count=[0, 3, 7])))
def testFiniteRepeat(self, count):
"""Test a dataset that repeats its input multiple times."""
components = (np.array(1), np.array([1, 2, 3]), np.array(37.0))
dataset = dataset_ops.Dataset.from_tensors(components).repeat(count)
self.assertEqual(
[c.shape for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
self.assertDatasetProduces(dataset, [components] * count)
@combinations.generate(test_base.default_test_combinations())
def testInfiniteRepeat(self):
# NOTE(mrry): There's not a good way to test that the sequence is infinite.
components = (np.array(1), np.array([1, 2, 3]), np.array(37.0))
dataset = dataset_ops.Dataset.from_tensors(components).repeat(-1)
self.assertEqual(
[c.shape for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
get_next = self.getNext(dataset)
for _ in range(17):
results = self.evaluate(get_next())
for component, result_component in zip(components, results):
self.assertAllEqual(component, result_component)
@combinations.generate(test_base.default_test_combinations())
def testRepeatRepeat(self):
"""Test the composition of repeat datasets."""
components = (np.array(1), np.array([1, 2, 3]), np.array(37.0))
inner_count, outer_count = 7, 14
dataset = dataset_ops.Dataset.from_tensors(components).repeat(
inner_count).repeat(outer_count)
self.assertEqual(
[c.shape for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
self.assertDatasetProduces(dataset,
[components] * (inner_count * outer_count))
@combinations.generate(test_base.default_test_combinations())
def testName(self):
dataset = dataset_ops.Dataset.from_tensors(42).repeat(1, name="repeat")
self.assertDatasetProduces(dataset, [42])
class RepeatDatasetCheckpointTest(checkpoint_test_base.CheckpointTestBase,
parameterized.TestCase):
def _build_repeat_dataset(self, count, take_count=3):
components = (np.arange(10),)
return dataset_ops.Dataset.from_tensor_slices(components).take(
take_count).repeat(count)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations()))
def testFiniteRepeat(self, verify_fn):
count = 10
verify_fn(
self,
lambda: self._build_repeat_dataset(count),
num_outputs=(3 * count))
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations()))
def testEmptyRepeat(self, verify_fn):
verify_fn(self, lambda: self._build_repeat_dataset(0), num_outputs=0)
@combinations.generate(test_base.default_test_combinations())
def testInfiniteRepeat(self):
self.verify_unused_iterator(
lambda: self._build_repeat_dataset(-1), 10, verify_exhausted=False)
self.verify_multiple_breaks(
lambda: self._build_repeat_dataset(-1), 20, verify_exhausted=False)
self.verify_reset_restored_iterator(
lambda: self._build_repeat_dataset(-1), 20, verify_exhausted=False)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations()))
def testInfiniteEmptyRepeat(self, verify_fn):
verify_fn(self, lambda: self._build_repeat_dataset(-1, 0), num_outputs=0)
class RepeatRandomAccessTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(index=[-1, 6, 7])))
def testInvalidIndex(self, index):
dataset = dataset_ops.Dataset.from_tensor_slices([1, 2, 3]).repeat(2)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(random_access.at(dataset, index=index))
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(index=[-1, 0])))
def testEmptyDataset(self, index):
dataset = dataset_ops.Dataset.from_tensor_slices([]).repeat(2)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(random_access.at(dataset, index=index))
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(elements=[0, 5, 10],
count=[0, 3, 8])))
def testFiniteRepeat(self, elements, count):
dataset = dataset_ops.Dataset.range(elements).repeat(count)
expected_dataset = np.tile(
np.arange(
start=0, stop=elements, step=1, dtype=dtypes.int64.as_numpy_dtype),
count)
for i in range(elements * count):
self.assertEqual(
self.evaluate(random_access.at(dataset, index=i)),
expected_dataset[i])
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
elements=[0, 3, 5], count_1=[0, 1, 2], count_2=[3, 4, 5])))
def testRepeatRepeat(self, elements, count_1, count_2):
dataset = dataset_ops.Dataset.range(elements).repeat(count_1).repeat(
count_2)
expected_dataset = np.tile(
np.arange(
start=0, stop=elements, step=1, dtype=dtypes.int64.as_numpy_dtype),
count_1 * count_2)
for i in range(elements * count_1 * count_2):
self.assertEqual(
self.evaluate(random_access.at(dataset, index=i)),
expected_dataset[i])
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(elements=[3, 5], count=[None, -1, -2])))
def testInfiniteRepeat(self, elements, count):
dataset = dataset_ops.Dataset.range(elements).repeat(count=count)
# Datasets with infinite cardinality do not support random access.
with self.assertRaises(errors.FailedPreconditionError):
self.evaluate(random_access.at(dataset, index=0))
if __name__ == "__main__":
test.main()
| apache-2.0 |
jgomezdans/grabba_grabba_hey | grabba_grabba_hey/sentinel3_downloader.py | 2 | 9042 | #!/usr/bin/env python
"""
A simple interface to download Sentinel-1 and Sentinel-2 datasets from
the COPERNICUS Sentinel Hub.
"""
from functools import partial
import hashlib
import os
import datetime
import sys
import xml.etree.cElementTree as ET
import re
import requests
from concurrent import futures
import logging
logging.basicConfig(level=logging.INFO)
LOG = logging.getLogger(__name__)
logging.getLogger("requests").setLevel(logging.CRITICAL)
logging.getLogger("urllib3").setLevel(logging.CRITICAL)
# hub_url = "https://scihub.copernicus.eu/dhus/search?q="
#hub_url = "https://scihub.copernicus.eu/s3hub/search?q="
hub_url= "https://scihub.copernicus.eu/apihub/search?q="
requests.packages.urllib3.disable_warnings()
def calculate_md5(fname):
hasher = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hasher.update(chunk)
return hasher.hexdigest().upper()
def do_query(query, user="guest", passwd="guest"):
"""
A simple function to pass a query to the Sentinel scihub website. If
successful this function will return the XML file back for further
processing.
query: str
A query string, such as "https://scihub.copernicus.eu/dhus/odata/v1/"
"Products?$orderby=IngestionDate%20desc&$top=100&$skip=100"
Returns:
The relevant XML file, or raises error
"""
r = requests.get(query, auth=(user, passwd), verify=False)
if r.status_code == 200:
return r.text
else:
raise IOError("Something went wrong! Error code %d" % r.status_code)
def download_product(source, target, user="guest", passwd="guest"):
"""
Download a product from the SentinelScihub site, and save it to a named
local disk location given by ``target``.
source: str
A product fully qualified URL
target: str
A filename where to download the URL specified
"""
md5_source = source.replace("$value", "/Checksum/Value/$value")
r = requests.get(md5_source, auth=(user, passwd), verify=False)
md5 = r.text
if os.path.exists(target):
md5_file = calculate_md5(target)
if md5 == md5_file:
return
chunks = 1048576 # 1MiB...
while True:
LOG.debug("Getting %s" % source)
r = requests.get(source, auth=(user, passwd), stream=True,
verify=False)
if not r.ok:
raise IOError("Can't start download... [%s]" % source)
file_size = int(r.headers['content-length'])
LOG.info("Downloading to -> %s" % target)
LOG.info("%d bytes..." % file_size)
with open(target, 'wb') as fp:
cntr = 0
dload = 0
for chunk in r.iter_content(chunk_size=chunks):
if chunk:
cntr += 1
if cntr > 100:
dload += cntr * chunks
LOG.info("\tWriting %d/%d [%5.2f %%]" % (dload, file_size,
100. * float(dload) /
float(file_size)))
sys.stdout.flush()
cntr = 0
fp.write(chunk)
fp.flush()
os.fsync(fp)
md5_file = calculate_md5(target)
if md5_file == md5:
break
return
def parse_xml(xml):
"""
Parse an OData XML file to havest some relevant information re products
available and so on. It will return a list of dictionaries, with one
dictionary per product returned from the query. Each dicionary will have a
number of keys (see ``fields_of_interest``), as well as ``link`` and
``qui
"""
fields_of_interest = ["filename", "identifier", "instrumentshortname",
"orbitnumber", "orbitdirection", "producttype",
"beginposition", "endposition"]
tree = ET.ElementTree(ET.fromstring(xml))
# Search for all the acquired images...
granules = []
for elem in tree.iter(tag="{http://www.w3.org/2005/Atom}entry"):
granule = {}
for img in elem.getchildren():
if img.tag.find("id") >= 0:
granule['id'] = img.text
if img.tag.find("link") and img.attrib.has_key("href"):
if img.attrib['href'].find("Quicklook") >= 0:
granule['quicklook'] = img.attrib['href']
elif img.attrib['href'].find("$value") >= 0:
granule['link'] = img.attrib['href'].replace("$value", "")
if img.attrib.has_key("name"):
if img.attrib['name'] in fields_of_interest:
granule[img.attrib['name']] = img.text
granules.append(granule)
return granules
# print img.tag, img.attrib, img.text
# for x in img.getchildren():
def download_sentinel(location, input_start_date, input_sensor, output_dir,
input_end_date=None, username="guest", password="guest"):
input_sensor = input_sensor.upper()
sensor_list = ["S1", "S2", "S3"]
if not input_sensor in sensor_list:
raise ValueError("Sensor can only be S1, S2 or S3. You provided %s"
% input_sensor)
else:
if input_sensor.upper() == "S1":
sensor = "Sentinel-1"
elif input_sensor.upper() == "S2":
sensor = "Sentinel-2"
elif input_sensor.upper() == "S3":
sensor= "Sentinel-3"
sensor_str = 'platformname:%s' % sensor
#sensor_str = 'filename:%s' % input_sensor.upper()
try:
start_date = datetime.datetime.strptime(input_start_date,
"%Y.%m.%d").isoformat()
except ValueError:
try:
start_date = datetime.datetime.strptime(input_start_date,
"%Y-%m-%d").isoformat()
except ValueError:
start_date = datetime.datetime.strptime(input_start_date,
"%Y/%j").isoformat()
start_date = start_date + "Z"
if input_end_date is None:
end_date = "NOW"
else:
try:
end_date = datetime.datetime.strptime(input_end_date,
"%Y.%m.%d").isoformat()
except ValueError:
try:
end_date = datetime.datetime.strptime(input_end_date,
"%Y-%m-%d").isoformat()
except ValueError:
end_date = datetime.datetime.strptime(input_end_date,
"%Y/%j").isoformat()
if len(location) == 2:
location_str = 'footprint:"Intersects(%f, %f)"' % (location[0], location[1])
elif len(location) == 4:
location_str = 'footprint:"Intersects( POLYGON(( " + \
"%f %f, %f %f, %f %f, %f %f, %f %f) ))"' % (
location[0], location[0],
location[0], location[1],
location[1], location[1],
location[1], location[0],
location[0], location[0])
time_str = 'beginposition:[%s TO %s]' % (start_date, end_date)
query = "%s AND %s AND %s" % (location_str, time_str, sensor_str)
query = "%s%s" % (hub_url, query)
# query = "%s%s" % ( hub_url, urllib2.quote(query ) )
LOG.debug(query)
import pdb;pdb.set_trace()
result = do_query(query, user=username, passwd=password)
granules = parse_xml(result)
if not os.path.exists(output_dir):
os.mkdir(output_dir)
ret_files = []
for granule in granules:
download_product(granule['link'] + "$value", os.path.join(output_dir,
granule['filename'].replace("SAFE", "zip")),
user=username, passwd=password)
ret_files.append(os.path.join(output_dir,
granule['filename'].replace("SAFE", "zip")))
return granules, ret_files
if __name__ == "__main__": # location = (43.3650, -8.4100)
# input_start_date = "2015.01.01"
# input_end_date = None
# username = "guest"
# password = "guest"
# input_sensor = "S2"
# output_dir = "/data/selene/ucfajlg/tmp/"
# granules, retfiles = download_sentinel ( location, input_start_date,
# input_sensor, output_dir )
lng = -8.4100
lat = 43.3650
#lat = 39.0985 # Barrax
#lng = -2.1082
#lat = 28.55 # Libya 4
#lng = 23.39
print "Testing S2 on COPERNICUS scientific hub"
location=(lat,lng)
input_start_date="2017.1.1"
input_sensor="S3"
output_dir="/tmp/"
username="s3guest"
password="s3guest"
print "Set username and password variables for Sentinel hub!!!"
download_sentinel(location, input_start_date, input_sensor, output_dir,
input_end_date=None, username=username, password=password)
| gpl-2.0 |
alexsavio/palladium | palladium/julia.py | 2 | 3851 | from julia import Julia
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder
from palladium.interfaces import Model
from palladium.util import logger
from palladium.util import timer
def make_bridge(): # pragma: no cover
with timer(logger.info, "Creating Julia bridge"):
return Julia()
class AbstractModel(Model):
def __init__(self, fit_func, predict_func,
fit_kwargs=None, predict_kwargs=None,
encode_labels=False):
"""
Instantiates a model with the given *fit_func* and
*predict_func* written in Julia.
:param str fit_func:
The dotted name of the Julia function to use for fitting.
The function must take as its first two arguments the *X*
and *y* arrays. All elements of the optional *fit_kwargs*
dictionary will be passed on to the Julia function as
keyword arguments. The return value of *fit_func* will be
used as the first argument to *predict_func*.
:param str predict_func:
Similar to *fit_func*, this is the dotted name of the Julia
function used for prediction. The first argument of this
function is the return value of *fit_func*. The second
argument is the *X* data array. All elements of the
optional *fit_kwargs* dictionary will be passed on to the
Julia function as keyword arguments. The return value of
*predict_func* is considered to be the target array *y*.
:param bool encode_labels:
If set to *True*, the *y* target array will be automatically
encoded using a :class:`sklearn.preprocessing.LabelEncoder`,
which is useful if you have string labels but your Julia
function only accepts numeric labels.
"""
self.fit_func = fit_func
self.predict_func = predict_func
self.encode_labels = encode_labels
self.fit_kwargs = fit_kwargs or {}
self.predict_kwargs = predict_kwargs or {}
def fit(self, X, y):
self._initialize_julia()
if self.encode_labels:
self.enc_ = LabelEncoder()
y = self.enc_.fit_transform(y)
self.fitted_ = self.fit_func_(X.T, y, **self.fit_kwargs)
return self
def predict(self, X):
X = X.astype(float)
y_pred = self.predict_func_(self.fitted_, X.T, **self.predict_kwargs)
if self.encode_labels:
y_pred = self.enc_.inverse_transform(y_pred)
return y_pred
def _initialize_julia(self):
fit_1, fit_2 = self.fit_func.rsplit('.', 1)
predict_1, predict_2 = self.predict_func.rsplit('.', 1)
bridge = self.bridge_ = make_bridge()
bridge.call("import {}".format(fit_1))
bridge.call("import {}".format(predict_1))
self.fit_func_ = bridge.eval(self.fit_func)
self.predict_func_ = bridge.eval(self.predict_func)
def __getstate__(self):
state = self.__dict__.copy()
# Serialize the fitted attribute in Julia:
iobuf = self.bridge_.eval("IOBuffer()")
self.bridge_.eval('serialize')(iobuf, self.fitted_)
iobuf.seek(0)
state['fitted_'] = iobuf.read()
del state['fit_func_']
del state['predict_func_']
del state['bridge_']
return state
def __setstate__(self, state):
self.__dict__.update(state)
self._initialize_julia()
# Deserialize the fitted Julia attribute:
fitted = state['fitted_']
iobuf = self.bridge_.eval("IOBuffer()")
iobuf.write(fitted)
iobuf.seek(0)
self.fitted_ = self.bridge_.eval('deserialize')(iobuf)
class ClassificationModel(AbstractModel):
def score(self, X, y):
return accuracy_score(self.predict(X), y)
| apache-2.0 |
arabenjamin/scikit-learn | sklearn/datasets/mlcomp.py | 286 | 3855 | # Copyright (c) 2010 Olivier Grisel <[email protected]>
# License: BSD 3 clause
"""Glue code to load http://mlcomp.org data as a scikit.learn dataset"""
import os
import numbers
from sklearn.datasets.base import load_files
def _load_document_classification(dataset_path, metadata, set_=None, **kwargs):
if set_ is not None:
dataset_path = os.path.join(dataset_path, set_)
return load_files(dataset_path, metadata.get('description'), **kwargs)
LOADERS = {
'DocumentClassification': _load_document_classification,
# TODO: implement the remaining domain formats
}
def load_mlcomp(name_or_id, set_="raw", mlcomp_root=None, **kwargs):
"""Load a datasets as downloaded from http://mlcomp.org
Parameters
----------
name_or_id : the integer id or the string name metadata of the MLComp
dataset to load
set_ : select the portion to load: 'train', 'test' or 'raw'
mlcomp_root : the filesystem path to the root folder where MLComp datasets
are stored, if mlcomp_root is None, the MLCOMP_DATASETS_HOME
environment variable is looked up instead.
**kwargs : domain specific kwargs to be passed to the dataset loader.
Read more in the :ref:`User Guide <datasets>`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'filenames', the files holding the raw to learn, 'target', the
classification labels (integer index), 'target_names',
the meaning of the labels, and 'DESCR', the full description of the
dataset.
Note on the lookup process: depending on the type of name_or_id,
will choose between integer id lookup or metadata name lookup by
looking at the unzipped archives and metadata file.
TODO: implement zip dataset loading too
"""
if mlcomp_root is None:
try:
mlcomp_root = os.environ['MLCOMP_DATASETS_HOME']
except KeyError:
raise ValueError("MLCOMP_DATASETS_HOME env variable is undefined")
mlcomp_root = os.path.expanduser(mlcomp_root)
mlcomp_root = os.path.abspath(mlcomp_root)
mlcomp_root = os.path.normpath(mlcomp_root)
if not os.path.exists(mlcomp_root):
raise ValueError("Could not find folder: " + mlcomp_root)
# dataset lookup
if isinstance(name_or_id, numbers.Integral):
# id lookup
dataset_path = os.path.join(mlcomp_root, str(name_or_id))
else:
# assume name based lookup
dataset_path = None
expected_name_line = "name: " + name_or_id
for dataset in os.listdir(mlcomp_root):
metadata_file = os.path.join(mlcomp_root, dataset, 'metadata')
if not os.path.exists(metadata_file):
continue
with open(metadata_file) as f:
for line in f:
if line.strip() == expected_name_line:
dataset_path = os.path.join(mlcomp_root, dataset)
break
if dataset_path is None:
raise ValueError("Could not find dataset with metadata line: " +
expected_name_line)
# loading the dataset metadata
metadata = dict()
metadata_file = os.path.join(dataset_path, 'metadata')
if not os.path.exists(metadata_file):
raise ValueError(dataset_path + ' is not a valid MLComp dataset')
with open(metadata_file) as f:
for line in f:
if ":" in line:
key, value = line.split(":", 1)
metadata[key.strip()] = value.strip()
format = metadata.get('format', 'unknow')
loader = LOADERS.get(format)
if loader is None:
raise ValueError("No loader implemented for format: " + format)
return loader(dataset_path, metadata, set_=set_, **kwargs)
| bsd-3-clause |
heli522/scikit-learn | sklearn/cluster/__init__.py | 359 | 1228 | """
The :mod:`sklearn.cluster` module gathers popular unsupervised clustering
algorithms.
"""
from .spectral import spectral_clustering, SpectralClustering
from .mean_shift_ import (mean_shift, MeanShift,
estimate_bandwidth, get_bin_seeds)
from .affinity_propagation_ import affinity_propagation, AffinityPropagation
from .hierarchical import (ward_tree, AgglomerativeClustering, linkage_tree,
FeatureAgglomeration)
from .k_means_ import k_means, KMeans, MiniBatchKMeans
from .dbscan_ import dbscan, DBSCAN
from .bicluster import SpectralBiclustering, SpectralCoclustering
from .birch import Birch
__all__ = ['AffinityPropagation',
'AgglomerativeClustering',
'Birch',
'DBSCAN',
'KMeans',
'FeatureAgglomeration',
'MeanShift',
'MiniBatchKMeans',
'SpectralClustering',
'affinity_propagation',
'dbscan',
'estimate_bandwidth',
'get_bin_seeds',
'k_means',
'linkage_tree',
'mean_shift',
'spectral_clustering',
'ward_tree',
'SpectralBiclustering',
'SpectralCoclustering']
| bsd-3-clause |
Samuc/Proyecto-IV | lib/python2.7/site-packages/jinja2/visitor.py | 1402 | 3316 | # -*- coding: utf-8 -*-
"""
jinja2.visitor
~~~~~~~~~~~~~~
This module implements a visitor for the nodes.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
from jinja2.nodes import Node
class NodeVisitor(object):
"""Walks the abstract syntax tree and call visitor functions for every
node found. The visitor functions may return values which will be
forwarded by the `visit` method.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `get_visitor` function. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
"""
def get_visitor(self, node):
"""Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
method = 'visit_' + node.__class__.__name__
return getattr(self, method, None)
def visit(self, node, *args, **kwargs):
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node, *args, **kwargs)
return self.generic_visit(node, *args, **kwargs)
def generic_visit(self, node, *args, **kwargs):
"""Called if no explicit visitor function exists for a node."""
for node in node.iter_child_nodes():
self.visit(node, *args, **kwargs)
class NodeTransformer(NodeVisitor):
"""Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
"""
def generic_visit(self, node, *args, **kwargs):
for field, old_value in node.iter_fields():
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, Node):
value = self.visit(value, *args, **kwargs)
if value is None:
continue
elif not isinstance(value, Node):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, Node):
new_node = self.visit(old_value, *args, **kwargs)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
def visit_list(self, node, *args, **kwargs):
"""As transformers may return lists in some places this method
can be used to enforce a list as return value.
"""
rv = self.visit(node, *args, **kwargs)
if not isinstance(rv, list):
rv = [rv]
return rv
| gpl-2.0 |
sherbondy/simple-flash | jinja2/visitor.py | 1402 | 3316 | # -*- coding: utf-8 -*-
"""
jinja2.visitor
~~~~~~~~~~~~~~
This module implements a visitor for the nodes.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
from jinja2.nodes import Node
class NodeVisitor(object):
"""Walks the abstract syntax tree and call visitor functions for every
node found. The visitor functions may return values which will be
forwarded by the `visit` method.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `get_visitor` function. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
"""
def get_visitor(self, node):
"""Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
method = 'visit_' + node.__class__.__name__
return getattr(self, method, None)
def visit(self, node, *args, **kwargs):
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node, *args, **kwargs)
return self.generic_visit(node, *args, **kwargs)
def generic_visit(self, node, *args, **kwargs):
"""Called if no explicit visitor function exists for a node."""
for node in node.iter_child_nodes():
self.visit(node, *args, **kwargs)
class NodeTransformer(NodeVisitor):
"""Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
"""
def generic_visit(self, node, *args, **kwargs):
for field, old_value in node.iter_fields():
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, Node):
value = self.visit(value, *args, **kwargs)
if value is None:
continue
elif not isinstance(value, Node):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, Node):
new_node = self.visit(old_value, *args, **kwargs)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
def visit_list(self, node, *args, **kwargs):
"""As transformers may return lists in some places this method
can be used to enforce a list as return value.
"""
rv = self.visit(node, *args, **kwargs)
if not isinstance(rv, list):
rv = [rv]
return rv
| mit |
was4444/chromium.src | third_party/jinja2/visitor.py | 1402 | 3316 | # -*- coding: utf-8 -*-
"""
jinja2.visitor
~~~~~~~~~~~~~~
This module implements a visitor for the nodes.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
from jinja2.nodes import Node
class NodeVisitor(object):
"""Walks the abstract syntax tree and call visitor functions for every
node found. The visitor functions may return values which will be
forwarded by the `visit` method.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `get_visitor` function. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
"""
def get_visitor(self, node):
"""Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
method = 'visit_' + node.__class__.__name__
return getattr(self, method, None)
def visit(self, node, *args, **kwargs):
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node, *args, **kwargs)
return self.generic_visit(node, *args, **kwargs)
def generic_visit(self, node, *args, **kwargs):
"""Called if no explicit visitor function exists for a node."""
for node in node.iter_child_nodes():
self.visit(node, *args, **kwargs)
class NodeTransformer(NodeVisitor):
"""Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
"""
def generic_visit(self, node, *args, **kwargs):
for field, old_value in node.iter_fields():
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, Node):
value = self.visit(value, *args, **kwargs)
if value is None:
continue
elif not isinstance(value, Node):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, Node):
new_node = self.visit(old_value, *args, **kwargs)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
def visit_list(self, node, *args, **kwargs):
"""As transformers may return lists in some places this method
can be used to enforce a list as return value.
"""
rv = self.visit(node, *args, **kwargs)
if not isinstance(rv, list):
rv = [rv]
return rv
| bsd-3-clause |
tapomayukh/projects_in_python | sandbox_tapo/src/skin_related/BMED_8813_HAP/Scaling/best_kNN_PC/cross_validate_objects_kNN_PC_BMED_8813_HAP_scaled_method_I.py | 1 | 4363 |
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/BMED_8813_HAP/Data')
from data_method_I import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
###### Sanity Check ######
i=0
n=0
while i < 123:
j=0
while j < 90:
if X[i,j] != X[i,j]:
print X[i,j]
print i,j
n=n+1
j = j+1
i=i+1
print n
##########################
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
def my_mvpa(Y,num2):
#Using PYMVPA
PCA_data = np.array(Y)
PCA_label_2 = ['Can-Edge-1']*5 + ['Book-Edge-1']*5 + ['Brown-Cardboard-Box-Edge-1']*5 + ['Cinder-Block-Edge-1']*5 + ['Tin-Box-Edge-1']*5 + ['White-Cardboard-Box-Edge-1']*5 + ['Can-Surface']*5 + ['Book-Surface']*5 + ['Brown-Cardboard-Box-Surface']*5 + ['Cinder-Block-Surface']*5 + ['Tin-Box-Surface']*5 + ['White-Cardboard-Box-Surface']*5 + ['Can-Edge-2']*5 + ['Book-Edge-2']*5 + ['Brown-Cardboard-Box-Edge-2']*5 + ['Cinder-Block-Edge-2']*5 + ['Tin-Box-Edge-2']*5 + ['White-Cardboard-Box-Edge-2']*5
clf = kNN(k=num2)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_2)
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
return (1-error)*100
def result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC):
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:num_PC]
m_W, n_W = np.shape(W)
#Projected Data:
Y = (W.T)*B
m_Y, n_Y = np.shape(Y.T)
return Y.T
if __name__ == '__main__':
Fmat = Fmat_original
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
num_PC=1
while num_PC <=20:
Proj = np.zeros((90,num_PC))
Proj = result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC)
# PYMVPA:
num=0
cv_acc = np.zeros(21)
while num <=20:
cv_acc[num] = my_mvpa(Proj,num)
num = num+1
plot(np.arange(21),cv_acc,'-s')
grid('True')
hold('True')
num_PC = num_PC+1
legend(('1-PC', '2-PCs', '3-PCs', '4-PCs', '5-PCs', '6-PCs', '7-PCs', '8-PCs', '9-PCs', '10-PCs', '11-PC', '12-PCs', '13-PCs', '14-PCs', '15-PCs', '16-PCs', '17-PCs', '18-PCs', '19-PCs', '20-PCs'))
ylabel('Cross-Validation Accuracy')
xlabel('k in k-NN Classifier')
show()
| mit |
heli522/scikit-learn | benchmarks/bench_sparsify.py | 320 | 3372 | """
Benchmark SGD prediction time with dense/sparse coefficients.
Invoke with
-----------
$ kernprof.py -l sparsity_benchmark.py
$ python -m line_profiler sparsity_benchmark.py.lprof
Typical output
--------------
input data sparsity: 0.050000
true coef sparsity: 0.000100
test data sparsity: 0.027400
model sparsity: 0.000024
r^2 on test data (dense model) : 0.233651
r^2 on test data (sparse model) : 0.233651
Wrote profile results to sparsity_benchmark.py.lprof
Timer unit: 1e-06 s
File: sparsity_benchmark.py
Function: benchmark_dense_predict at line 51
Total time: 0.532979 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
51 @profile
52 def benchmark_dense_predict():
53 301 640 2.1 0.1 for _ in range(300):
54 300 532339 1774.5 99.9 clf.predict(X_test)
File: sparsity_benchmark.py
Function: benchmark_sparse_predict at line 56
Total time: 0.39274 s
Line # Hits Time Per Hit % Time Line Contents
==============================================================
56 @profile
57 def benchmark_sparse_predict():
58 1 10854 10854.0 2.8 X_test_sparse = csr_matrix(X_test)
59 301 477 1.6 0.1 for _ in range(300):
60 300 381409 1271.4 97.1 clf.predict(X_test_sparse)
"""
from scipy.sparse.csr import csr_matrix
import numpy as np
from sklearn.linear_model.stochastic_gradient import SGDRegressor
from sklearn.metrics import r2_score
np.random.seed(42)
def sparsity_ratio(X):
return np.count_nonzero(X) / float(n_samples * n_features)
n_samples, n_features = 5000, 300
X = np.random.randn(n_samples, n_features)
inds = np.arange(n_samples)
np.random.shuffle(inds)
X[inds[int(n_features / 1.2):]] = 0 # sparsify input
print("input data sparsity: %f" % sparsity_ratio(X))
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[n_features/2:]] = 0 # sparsify coef
print("true coef sparsity: %f" % sparsity_ratio(coef))
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
print("test data sparsity: %f" % sparsity_ratio(X_test))
###############################################################################
clf = SGDRegressor(penalty='l1', alpha=.2, fit_intercept=True, n_iter=2000)
clf.fit(X_train, y_train)
print("model sparsity: %f" % sparsity_ratio(clf.coef_))
def benchmark_dense_predict():
for _ in range(300):
clf.predict(X_test)
def benchmark_sparse_predict():
X_test_sparse = csr_matrix(X_test)
for _ in range(300):
clf.predict(X_test_sparse)
def score(y_test, y_pred, case):
r2 = r2_score(y_test, y_pred)
print("r^2 on test data (%s) : %f" % (case, r2))
score(y_test, clf.predict(X_test), 'dense model')
benchmark_dense_predict()
clf.sparsify()
score(y_test, clf.predict(X_test), 'sparse model')
benchmark_sparse_predict()
| bsd-3-clause |
heli522/scikit-learn | sklearn/svm/tests/test_bounds.py | 277 | 2541 | import nose
from nose.tools import assert_equal, assert_true
from sklearn.utils.testing import clean_warning_registry
import warnings
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['squared_hinge', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
def test_l2_deprecation():
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
assert_equal(l1_min_c(dense_X, Y1, "l2"),
l1_min_c(dense_X, Y1, "squared_hinge"))
assert_equal(w[0].category, DeprecationWarning)
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'squared_hinge': LinearSVC(loss='squared_hinge',
penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
| bsd-3-clause |
arabenjamin/scikit-learn | sklearn/svm/tests/test_bounds.py | 277 | 2541 | import nose
from nose.tools import assert_equal, assert_true
from sklearn.utils.testing import clean_warning_registry
import warnings
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['squared_hinge', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
def test_l2_deprecation():
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
assert_equal(l1_min_c(dense_X, Y1, "l2"),
l1_min_c(dense_X, Y1, "squared_hinge"))
assert_equal(w[0].category, DeprecationWarning)
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'squared_hinge': LinearSVC(loss='squared_hinge',
penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
| bsd-3-clause |
DonBeo/scikit-learn | sklearn/svm/tests/test_bounds.py | 277 | 2541 | import nose
from nose.tools import assert_equal, assert_true
from sklearn.utils.testing import clean_warning_registry
import warnings
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['squared_hinge', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
def test_l2_deprecation():
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
assert_equal(l1_min_c(dense_X, Y1, "l2"),
l1_min_c(dense_X, Y1, "squared_hinge"))
assert_equal(w[0].category, DeprecationWarning)
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'squared_hinge': LinearSVC(loss='squared_hinge',
penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
| bsd-3-clause |
tapomayukh/projects_in_python | classification/Classification_with_kNN/Single_Contact_Classification/Final/best_kNN_PCA/4-categories/24/test11_cross_validate_categories_24_1200ms.py | 1 | 4733 |
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/24')
from data_24 import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
def my_mvpa(Y,num2):
#Using PYMVPA
PCA_data = np.array(Y)
PCA_label_1 = ['Rigid-Fixed']*35 + ['Rigid-Movable']*35 + ['Soft-Fixed']*35 + ['Soft-Movable']*35
PCA_chunk_1 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Cushion-Movable']*5 + ['Sponge-Movable']*5
clf = kNN(k=num2)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_1,chunks=PCA_chunk_1)
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
return (1-error)*100
def result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC):
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:num_PC]
m_W, n_W = np.shape(W)
# Normalizes the data set with respect to its variance (Not an Integral part of PCA, but useful)
length = len(eigval_total)
s = np.matrix(np.zeros(length)).T
i = 0
while i < length:
s[i] = sqrt(C[i,i])
i = i+1
Z = np.divide(B,s)
m_Z, n_Z = np.shape(Z)
#Projected Data:
Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B'
m_Y, n_Y = np.shape(Y.T)
return Y.T
if __name__ == '__main__':
Fmat = Fmat_original
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
num_PC=1
while num_PC <=20:
Proj = np.zeros((140,num_PC))
Proj = result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC)
# PYMVPA:
num=0
cv_acc = np.zeros(21)
while num <=20:
cv_acc[num] = my_mvpa(Proj,num)
num = num+1
plot(np.arange(21),cv_acc,'-s')
grid('True')
hold('True')
num_PC = num_PC+1
legend(('1-PC', '2-PCs', '3-PCs', '4-PCs', '5-PCs', '6-PCs', '7-PCs', '8-PCs', '9-PCs', '10-PCs', '11-PC', '12-PCs', '13-PCs', '14-PCs', '15-PCs', '16-PCs', '17-PCs', '18-PCs', '19-PCs', '20-PCs'))
ylabel('Cross-Validation Accuracy')
xlabel('k in k-NN Classifier')
show()
| mit |
DonBeo/scikit-learn | examples/applications/plot_outlier_detection_housing.py | 241 | 5577 | """
====================================
Outlier detection on a real data set
====================================
This example illustrates the need for robust covariance estimation
on a real data set. It is useful both for outlier detection and for
a better understanding of the data structure.
We selected two sets of two variables from the Boston housing data set
as an illustration of what kind of analysis can be done with several
outlier detection tools. For the purpose of visualization, we are working
with two-dimensional examples, but one should be aware that things are
not so trivial in high-dimension, as it will be pointed out.
In both examples below, the main result is that the empirical covariance
estimate, as a non-robust one, is highly influenced by the heterogeneous
structure of the observations. Although the robust covariance estimate is
able to focus on the main mode of the data distribution, it sticks to the
assumption that the data should be Gaussian distributed, yielding some biased
estimation of the data structure, but yet accurate to some extent.
The One-Class SVM algorithm
First example
-------------
The first example illustrates how robust covariance estimation can help
concentrating on a relevant cluster when another one exists. Here, many
observations are confounded into one and break down the empirical covariance
estimation.
Of course, some screening tools would have pointed out the presence of two
clusters (Support Vector Machines, Gaussian Mixture Models, univariate
outlier detection, ...). But had it been a high-dimensional example, none
of these could be applied that easily.
Second example
--------------
The second example shows the ability of the Minimum Covariance Determinant
robust estimator of covariance to concentrate on the main mode of the data
distribution: the location seems to be well estimated, although the covariance
is hard to estimate due to the banana-shaped distribution. Anyway, we can
get rid of some outlying observations.
The One-Class SVM is able to capture the real data structure, but the
difficulty is to adjust its kernel bandwidth parameter so as to obtain
a good compromise between the shape of the data scatter matrix and the
risk of over-fitting the data.
"""
print(__doc__)
# Author: Virgile Fritsch <[email protected]>
# License: BSD 3 clause
import numpy as np
from sklearn.covariance import EllipticEnvelope
from sklearn.svm import OneClassSVM
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.datasets import load_boston
# Get data
X1 = load_boston()['data'][:, [8, 10]] # two clusters
X2 = load_boston()['data'][:, [5, 12]] # "banana"-shaped
# Define "classifiers" to be used
classifiers = {
"Empirical Covariance": EllipticEnvelope(support_fraction=1.,
contamination=0.261),
"Robust Covariance (Minimum Covariance Determinant)":
EllipticEnvelope(contamination=0.261),
"OCSVM": OneClassSVM(nu=0.261, gamma=0.05)}
colors = ['m', 'g', 'b']
legend1 = {}
legend2 = {}
# Learn a frontier for outlier detection with several classifiers
xx1, yy1 = np.meshgrid(np.linspace(-8, 28, 500), np.linspace(3, 40, 500))
xx2, yy2 = np.meshgrid(np.linspace(3, 10, 500), np.linspace(-5, 45, 500))
for i, (clf_name, clf) in enumerate(classifiers.items()):
plt.figure(1)
clf.fit(X1)
Z1 = clf.decision_function(np.c_[xx1.ravel(), yy1.ravel()])
Z1 = Z1.reshape(xx1.shape)
legend1[clf_name] = plt.contour(
xx1, yy1, Z1, levels=[0], linewidths=2, colors=colors[i])
plt.figure(2)
clf.fit(X2)
Z2 = clf.decision_function(np.c_[xx2.ravel(), yy2.ravel()])
Z2 = Z2.reshape(xx2.shape)
legend2[clf_name] = plt.contour(
xx2, yy2, Z2, levels=[0], linewidths=2, colors=colors[i])
legend1_values_list = list( legend1.values() )
legend1_keys_list = list( legend1.keys() )
# Plot the results (= shape of the data points cloud)
plt.figure(1) # two clusters
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X1[:, 0], X1[:, 1], color='black')
bbox_args = dict(boxstyle="round", fc="0.8")
arrow_args = dict(arrowstyle="->")
plt.annotate("several confounded points", xy=(24, 19),
xycoords="data", textcoords="data",
xytext=(13, 10), bbox=bbox_args, arrowprops=arrow_args)
plt.xlim((xx1.min(), xx1.max()))
plt.ylim((yy1.min(), yy1.max()))
plt.legend((legend1_values_list[0].collections[0],
legend1_values_list[1].collections[0],
legend1_values_list[2].collections[0]),
(legend1_keys_list[0], legend1_keys_list[1], legend1_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("accessibility to radial highways")
plt.xlabel("pupil-teacher ratio by town")
legend2_values_list = list( legend2.values() )
legend2_keys_list = list( legend2.keys() )
plt.figure(2) # "banana" shape
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X2[:, 0], X2[:, 1], color='black')
plt.xlim((xx2.min(), xx2.max()))
plt.ylim((yy2.min(), yy2.max()))
plt.legend((legend2_values_list[0].collections[0],
legend2_values_list[1].collections[0],
legend2_values_list[2].collections[0]),
(legend2_values_list[0], legend2_values_list[1], legend2_values_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("% lower status of the population")
plt.xlabel("average number of rooms per dwelling")
plt.show()
| bsd-3-clause |
arabenjamin/scikit-learn | sklearn/linear_model/tests/test_ridge.py | 129 | 22974 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.metrics import mean_squared_error
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
from sklearn.linear_model.base import LinearRegression
from sklearn.linear_model.ridge import ridge_regression
from sklearn.linear_model.ridge import Ridge
from sklearn.linear_model.ridge import _RidgeGCV
from sklearn.linear_model.ridge import RidgeCV
from sklearn.linear_model.ridge import RidgeClassifier
from sklearn.linear_model.ridge import RidgeClassifierCV
from sklearn.linear_model.ridge import _solve_cholesky
from sklearn.linear_model.ridge import _solve_cholesky_kernel
from sklearn.grid_search import GridSearchCV
from sklearn.cross_validation import KFold
diabetes = datasets.load_diabetes()
X_diabetes, y_diabetes = diabetes.data, diabetes.target
ind = np.arange(X_diabetes.shape[0])
rng = np.random.RandomState(0)
rng.shuffle(ind)
ind = ind[:200]
X_diabetes, y_diabetes = X_diabetes[ind], y_diabetes[ind]
iris = datasets.load_iris()
X_iris = sp.csr_matrix(iris.data)
y_iris = iris.target
DENSE_FILTER = lambda X: X
SPARSE_FILTER = lambda X: sp.csr_matrix(X)
def test_ridge():
# Ridge regression convergence test using score
# TODO: for this test to be robust, we should use a dataset instead
# of np.random.
rng = np.random.RandomState(0)
alpha = 1.0
for solver in ("svd", "sparse_cg", "cholesky", "lsqr"):
# With more samples than features
n_samples, n_features = 6, 5
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (X.shape[1], ))
assert_greater(ridge.score(X, y), 0.47)
if solver == "cholesky":
# Currently the only solver to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.47)
# With more features than samples
n_samples, n_features = 5, 10
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=alpha, solver=solver)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), .9)
if solver == "cholesky":
# Currently the only solver to support sample_weight.
ridge.fit(X, y, sample_weight=np.ones(n_samples))
assert_greater(ridge.score(X, y), 0.9)
def test_primal_dual_relationship():
y = y_diabetes.reshape(-1, 1)
coef = _solve_cholesky(X_diabetes, y, alpha=[1e-2])
K = np.dot(X_diabetes, X_diabetes.T)
dual_coef = _solve_cholesky_kernel(K, y, alpha=[1e-2])
coef2 = np.dot(X_diabetes.T, dual_coef).T
assert_array_almost_equal(coef, coef2)
def test_ridge_singular():
# test on a singular matrix
rng = np.random.RandomState(0)
n_samples, n_features = 6, 6
y = rng.randn(n_samples // 2)
y = np.concatenate((y, y))
X = rng.randn(n_samples // 2, n_features)
X = np.concatenate((X, X), axis=0)
ridge = Ridge(alpha=0)
ridge.fit(X, y)
assert_greater(ridge.score(X, y), 0.9)
def test_ridge_sample_weights():
rng = np.random.RandomState(0)
for solver in ("cholesky", ):
for n_samples, n_features in ((6, 5), (5, 10)):
for alpha in (1.0, 1e-2):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
coefs = ridge_regression(X, y,
alpha=alpha,
sample_weight=sample_weight,
solver=solver)
# Sample weight can be implemented via a simple rescaling
# for the square loss.
coefs2 = ridge_regression(
X * np.sqrt(sample_weight)[:, np.newaxis],
y * np.sqrt(sample_weight),
alpha=alpha, solver=solver)
assert_array_almost_equal(coefs, coefs2)
# Test for fit_intercept = True
est = Ridge(alpha=alpha, solver=solver)
est.fit(X, y, sample_weight=sample_weight)
# Check using Newton's Method
# Quadratic function should be solved in a single step.
# Initialize
sample_weight = np.sqrt(sample_weight)
X_weighted = sample_weight[:, np.newaxis] * (
np.column_stack((np.ones(n_samples), X)))
y_weighted = y * sample_weight
# Gradient is (X*coef-y)*X + alpha*coef_[1:]
# Remove coef since it is initialized to zero.
grad = -np.dot(y_weighted, X_weighted)
# Hessian is (X.T*X) + alpha*I except that the first
# diagonal element should be zero, since there is no
# penalization of intercept.
diag = alpha * np.ones(n_features + 1)
diag[0] = 0.
hess = np.dot(X_weighted.T, X_weighted)
hess.flat[::n_features + 2] += diag
coef_ = - np.dot(linalg.inv(hess), grad)
assert_almost_equal(coef_[0], est.intercept_)
assert_array_almost_equal(coef_[1:], est.coef_)
def test_ridge_shapes():
# Test shape of coef_ and intercept_
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y1 = y[:, np.newaxis]
Y = np.c_[y, 1 + y]
ridge = Ridge()
ridge.fit(X, y)
assert_equal(ridge.coef_.shape, (n_features,))
assert_equal(ridge.intercept_.shape, ())
ridge.fit(X, Y1)
assert_equal(ridge.coef_.shape, (1, n_features))
assert_equal(ridge.intercept_.shape, (1, ))
ridge.fit(X, Y)
assert_equal(ridge.coef_.shape, (2, n_features))
assert_equal(ridge.intercept_.shape, (2, ))
def test_ridge_intercept():
# Test intercept with multiple targets GH issue #708
rng = np.random.RandomState(0)
n_samples, n_features = 5, 10
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
Y = np.c_[y, 1. + y]
ridge = Ridge()
ridge.fit(X, y)
intercept = ridge.intercept_
ridge.fit(X, Y)
assert_almost_equal(ridge.intercept_[0], intercept)
assert_almost_equal(ridge.intercept_[1], intercept + 1.)
def test_toy_ridge_object():
# Test BayesianRegression ridge classifier
# TODO: test also n_samples > n_features
X = np.array([[1], [2]])
Y = np.array([1, 2])
clf = Ridge(alpha=0.0)
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_almost_equal(clf.predict(X_test), [1., 2, 3, 4])
assert_equal(len(clf.coef_.shape), 1)
assert_equal(type(clf.intercept_), np.float64)
Y = np.vstack((Y, Y)).T
clf.fit(X, Y)
X_test = [[1], [2], [3], [4]]
assert_equal(len(clf.coef_.shape), 2)
assert_equal(type(clf.intercept_), np.ndarray)
def test_ridge_vs_lstsq():
# On alpha=0., Ridge and OLS yield the same solution.
rng = np.random.RandomState(0)
# we need more samples than features
n_samples, n_features = 5, 4
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
ridge = Ridge(alpha=0., fit_intercept=False)
ols = LinearRegression(fit_intercept=False)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
ridge.fit(X, y)
ols.fit(X, y)
assert_almost_equal(ridge.coef_, ols.coef_)
def test_ridge_individual_penalties():
# Tests the ridge object using individual penalties
rng = np.random.RandomState(42)
n_samples, n_features, n_targets = 20, 10, 5
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples, n_targets)
penalties = np.arange(n_targets)
coef_cholesky = np.array([
Ridge(alpha=alpha, solver="cholesky").fit(X, target).coef_
for alpha, target in zip(penalties, y.T)])
coefs_indiv_pen = [
Ridge(alpha=penalties, solver=solver, tol=1e-6).fit(X, y).coef_
for solver in ['svd', 'sparse_cg', 'lsqr', 'cholesky']]
for coef_indiv_pen in coefs_indiv_pen:
assert_array_almost_equal(coef_cholesky, coef_indiv_pen)
# Test error is raised when number of targets and penalties do not match.
ridge = Ridge(alpha=penalties[:3])
assert_raises(ValueError, ridge.fit, X, y)
def _test_ridge_loo(filter_):
# test that can work with both dense or sparse matrices
n_samples = X_diabetes.shape[0]
ret = []
ridge_gcv = _RidgeGCV(fit_intercept=False)
ridge = Ridge(alpha=1.0, fit_intercept=False)
# generalized cross-validation (efficient leave-one-out)
decomp = ridge_gcv._pre_compute(X_diabetes, y_diabetes)
errors, c = ridge_gcv._errors(1.0, y_diabetes, *decomp)
values, c = ridge_gcv._values(1.0, y_diabetes, *decomp)
# brute-force leave-one-out: remove one example at a time
errors2 = []
values2 = []
for i in range(n_samples):
sel = np.arange(n_samples) != i
X_new = X_diabetes[sel]
y_new = y_diabetes[sel]
ridge.fit(X_new, y_new)
value = ridge.predict([X_diabetes[i]])[0]
error = (y_diabetes[i] - value) ** 2
errors2.append(error)
values2.append(value)
# check that efficient and brute-force LOO give same results
assert_almost_equal(errors, errors2)
assert_almost_equal(values, values2)
# generalized cross-validation (efficient leave-one-out,
# SVD variation)
decomp = ridge_gcv._pre_compute_svd(X_diabetes, y_diabetes)
errors3, c = ridge_gcv._errors_svd(ridge.alpha, y_diabetes, *decomp)
values3, c = ridge_gcv._values_svd(ridge.alpha, y_diabetes, *decomp)
# check that efficient and SVD efficient LOO give same results
assert_almost_equal(errors, errors3)
assert_almost_equal(values, values3)
# check best alpha
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
alpha_ = ridge_gcv.alpha_
ret.append(alpha_)
# check that we get same best alpha with custom loss_func
f = ignore_warnings
scoring = make_scorer(mean_squared_error, greater_is_better=False)
ridge_gcv2 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv2.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv2.alpha_, alpha_)
# check that we get same best alpha with custom score_func
func = lambda x, y: -mean_squared_error(x, y)
scoring = make_scorer(func)
ridge_gcv3 = RidgeCV(fit_intercept=False, scoring=scoring)
f(ridge_gcv3.fit)(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv3.alpha_, alpha_)
# check that we get same best alpha with a scorer
scorer = get_scorer('mean_squared_error')
ridge_gcv4 = RidgeCV(fit_intercept=False, scoring=scorer)
ridge_gcv4.fit(filter_(X_diabetes), y_diabetes)
assert_equal(ridge_gcv4.alpha_, alpha_)
# check that we get same best alpha with sample weights
ridge_gcv.fit(filter_(X_diabetes), y_diabetes,
sample_weight=np.ones(n_samples))
assert_equal(ridge_gcv.alpha_, alpha_)
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
ridge_gcv.fit(filter_(X_diabetes), Y)
Y_pred = ridge_gcv.predict(filter_(X_diabetes))
ridge_gcv.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge_gcv.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=5)
return ret
def _test_ridge_cv(filter_):
n_samples = X_diabetes.shape[0]
ridge_cv = RidgeCV()
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
cv = KFold(n_samples, 5)
ridge_cv.set_params(cv=cv)
ridge_cv.fit(filter_(X_diabetes), y_diabetes)
ridge_cv.predict(filter_(X_diabetes))
assert_equal(len(ridge_cv.coef_.shape), 1)
assert_equal(type(ridge_cv.intercept_), np.float64)
def _test_ridge_diabetes(filter_):
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), y_diabetes)
return np.round(ridge.score(filter_(X_diabetes), y_diabetes), 5)
def _test_multi_ridge_diabetes(filter_):
# simulate several responses
Y = np.vstack((y_diabetes, y_diabetes)).T
n_features = X_diabetes.shape[1]
ridge = Ridge(fit_intercept=False)
ridge.fit(filter_(X_diabetes), Y)
assert_equal(ridge.coef_.shape, (2, n_features))
Y_pred = ridge.predict(filter_(X_diabetes))
ridge.fit(filter_(X_diabetes), y_diabetes)
y_pred = ridge.predict(filter_(X_diabetes))
assert_array_almost_equal(np.vstack((y_pred, y_pred)).T,
Y_pred, decimal=3)
def _test_ridge_classifiers(filter_):
n_classes = np.unique(y_iris).shape[0]
n_features = X_iris.shape[1]
for clf in (RidgeClassifier(), RidgeClassifierCV()):
clf.fit(filter_(X_iris), y_iris)
assert_equal(clf.coef_.shape, (n_classes, n_features))
y_pred = clf.predict(filter_(X_iris))
assert_greater(np.mean(y_iris == y_pred), .79)
n_samples = X_iris.shape[0]
cv = KFold(n_samples, 5)
clf = RidgeClassifierCV(cv=cv)
clf.fit(filter_(X_iris), y_iris)
y_pred = clf.predict(filter_(X_iris))
assert_true(np.mean(y_iris == y_pred) >= 0.8)
def _test_tolerance(filter_):
ridge = Ridge(tol=1e-5)
ridge.fit(filter_(X_diabetes), y_diabetes)
score = ridge.score(filter_(X_diabetes), y_diabetes)
ridge2 = Ridge(tol=1e-3)
ridge2.fit(filter_(X_diabetes), y_diabetes)
score2 = ridge2.score(filter_(X_diabetes), y_diabetes)
assert_true(score >= score2)
def test_dense_sparse():
for test_func in (_test_ridge_loo,
_test_ridge_cv,
_test_ridge_diabetes,
_test_multi_ridge_diabetes,
_test_ridge_classifiers,
_test_tolerance):
# test dense matrix
ret_dense = test_func(DENSE_FILTER)
# test sparse matrix
ret_sparse = test_func(SPARSE_FILTER)
# test that the outputs are the same
if ret_dense is not None and ret_sparse is not None:
assert_array_almost_equal(ret_dense, ret_sparse, decimal=3)
def test_ridge_cv_sparse_svd():
X = sp.csr_matrix(X_diabetes)
ridge = RidgeCV(gcv_mode="svd")
assert_raises(TypeError, ridge.fit, X)
def test_ridge_sparse_svd():
X = sp.csc_matrix(rng.rand(100, 10))
y = rng.rand(100)
ridge = Ridge(solver='svd')
assert_raises(TypeError, ridge.fit, X, y)
def test_class_weights():
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = RidgeClassifier(class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
# check if class_weight = 'balanced' can handle negative labels.
clf = RidgeClassifier(class_weight='balanced')
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# class_weight = 'balanced', and class_weight = None should return
# same values when y has equal number of all labels
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0], [1.0, 1.0]])
y = [1, 1, -1, -1]
clf = RidgeClassifier(class_weight=None)
clf.fit(X, y)
clfa = RidgeClassifier(class_weight='balanced')
clfa.fit(X, y)
assert_equal(len(clfa.classes_), 2)
assert_array_almost_equal(clf.coef_, clfa.coef_)
assert_array_almost_equal(clf.intercept_, clfa.intercept_)
def test_class_weight_vs_sample_weight():
"""Check class_weights resemble sample_weights behavior."""
for clf in (RidgeClassifier, RidgeClassifierCV):
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = clf()
clf1.fit(iris.data, iris.target)
clf2 = clf(class_weight='balanced')
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.coef_, clf2.coef_)
# Check that sample_weight and class_weight are multiplicative
clf1 = clf()
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = clf(class_weight=class_weight)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_class_weights_cv():
# Test class weights for cross validated ridge classifier.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = RidgeClassifierCV(class_weight=None, alphas=[.01, .1, 1])
clf.fit(X, y)
# we give a small weights to class 1
clf = RidgeClassifierCV(class_weight={1: 0.001}, alphas=[.01, .1, 1, 10])
clf.fit(X, y)
assert_array_equal(clf.predict([[-.2, 2]]), np.array([-1]))
def test_ridgecv_store_cv_values():
# Test _RidgeCV's store_cv_values attribute.
rng = rng = np.random.RandomState(42)
n_samples = 8
n_features = 5
x = rng.randn(n_samples, n_features)
alphas = [1e-1, 1e0, 1e1]
n_alphas = len(alphas)
r = RidgeCV(alphas=alphas, store_cv_values=True)
# with len(y.shape) == 1
y = rng.randn(n_samples)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_alphas))
# with len(y.shape) == 2
n_responses = 3
y = rng.randn(n_samples, n_responses)
r.fit(x, y)
assert_equal(r.cv_values_.shape, (n_samples, n_responses, n_alphas))
def test_ridgecv_sample_weight():
rng = np.random.RandomState(0)
alphas = (0.1, 1.0, 10.0)
# There are different algorithms for n_samples > n_features
# and the opposite, so test them both.
for n_samples, n_features in ((6, 5), (5, 10)):
y = rng.randn(n_samples)
X = rng.randn(n_samples, n_features)
sample_weight = 1 + rng.rand(n_samples)
cv = KFold(n_samples, 5)
ridgecv = RidgeCV(alphas=alphas, cv=cv)
ridgecv.fit(X, y, sample_weight=sample_weight)
# Check using GridSearchCV directly
parameters = {'alpha': alphas}
fit_params = {'sample_weight': sample_weight}
gs = GridSearchCV(Ridge(), parameters, fit_params=fit_params,
cv=cv)
gs.fit(X, y)
assert_equal(ridgecv.alpha_, gs.best_estimator_.alpha)
assert_array_almost_equal(ridgecv.coef_, gs.best_estimator_.coef_)
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights_OK = rng.randn(n_samples) ** 2 + 1
sample_weights_OK_1 = 1.
sample_weights_OK_2 = 2.
sample_weights_not_OK = sample_weights_OK[:, np.newaxis]
sample_weights_not_OK_2 = sample_weights_OK[np.newaxis, :]
ridge = Ridge(alpha=1)
# make sure the "OK" sample weights actually work
ridge.fit(X, y, sample_weights_OK)
ridge.fit(X, y, sample_weights_OK_1)
ridge.fit(X, y, sample_weights_OK_2)
def fit_ridge_not_ok():
ridge.fit(X, y, sample_weights_not_OK)
def fit_ridge_not_ok_2():
ridge.fit(X, y, sample_weights_not_OK_2)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok)
assert_raise_message(ValueError,
"Sample weights must be 1D array or scalar",
fit_ridge_not_ok_2)
def test_sparse_design_with_sample_weights():
# Sample weights must work with sparse matrices
n_sampless = [2, 3]
n_featuress = [3, 2]
rng = np.random.RandomState(42)
sparse_matrix_converters = [sp.coo_matrix,
sp.csr_matrix,
sp.csc_matrix,
sp.lil_matrix,
sp.dok_matrix
]
sparse_ridge = Ridge(alpha=1., fit_intercept=False)
dense_ridge = Ridge(alpha=1., fit_intercept=False)
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
sample_weights = rng.randn(n_samples) ** 2 + 1
for sparse_converter in sparse_matrix_converters:
X_sparse = sparse_converter(X)
sparse_ridge.fit(X_sparse, y, sample_weight=sample_weights)
dense_ridge.fit(X, y, sample_weight=sample_weights)
assert_array_almost_equal(sparse_ridge.coef_, dense_ridge.coef_,
decimal=6)
def test_raises_value_error_if_solver_not_supported():
# Tests whether a ValueError is raised if a non-identified solver
# is passed to ridge_regression
wrong_solver = "This is not a solver (MagritteSolveCV QuantumBitcoin)"
exception = ValueError
message = "Solver %s not understood" % wrong_solver
def func():
X = np.eye(3)
y = np.ones(3)
ridge_regression(X, y, alpha=1., solver=wrong_solver)
assert_raise_message(exception, message, func)
def test_sparse_cg_max_iter():
reg = Ridge(solver="sparse_cg", max_iter=1)
reg.fit(X_diabetes, y_diabetes)
assert_equal(reg.coef_.shape[0], X_diabetes.shape[1])
| bsd-3-clause |
irhete/predictive-monitoring-benchmark | experiments/BucketFactory.py | 1 | 1595 | import EncoderFactory
from bucketers.ClusterBasedBucketer import ClusterBasedBucketer
from bucketers.StateBasedBucketer import StateBasedBucketer
from bucketers.PrefixLengthBucketer import PrefixLengthBucketer
from bucketers.NoBucketer import NoBucketer
from bucketers.KNNBucketer import KNNBucketer
from sklearn.cluster import KMeans
def get_bucketer(method, encoding_method=None, case_id_col=None, cat_cols=None, num_cols=None, n_clusters=None, random_state=None, n_neighbors=None):
if method == "cluster":
bucket_encoder = EncoderFactory.get_encoder(method=encoding_method, case_id_col=case_id_col, dynamic_cat_cols=cat_cols, dynamic_num_cols=num_cols)
clustering = KMeans(n_clusters, random_state=random_state)
return ClusterBasedBucketer(encoder=bucket_encoder, clustering=clustering)
elif method == "state":
bucket_encoder = EncoderFactory.get_encoder(method=encoding_method, case_id_col=case_id_col, dynamic_cat_cols=cat_cols, dynamic_num_cols=num_cols)
return StateBasedBucketer(encoder=bucket_encoder)
elif method == "single":
return NoBucketer(case_id_col=case_id_col)
elif method == "prefix":
return PrefixLengthBucketer(case_id_col=case_id_col)
elif method == "knn":
bucket_encoder = EncoderFactory.get_encoder(method=encoding_method, case_id_col=case_id_col, dynamic_cat_cols=cat_cols, dynamic_num_cols=num_cols)
return KNNBucketer(encoder=bucket_encoder, n_neighbors=n_neighbors)
else:
print("Invalid bucketer type")
return None | apache-2.0 |
arabenjamin/scikit-learn | benchmarks/bench_plot_ward.py | 288 | 1260 | """
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import pylab as pl
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
pl.figure("scikit-learn Ward's method benchmark results")
pl.imshow(np.log(ratio), aspect='auto', origin="lower")
pl.colorbar()
pl.contour(ratio, levels=[1, ], colors='k')
pl.yticks(range(len(n_features)), n_features.astype(np.int))
pl.ylabel('N features')
pl.xticks(range(len(n_samples)), n_samples.astype(np.int))
pl.xlabel('N samples')
pl.title("Scikit's time, in units of scipy time (log)")
pl.show()
| bsd-3-clause |
DonBeo/scikit-learn | examples/covariance/plot_lw_vs_oas.py | 247 | 2903 | """
=============================
Ledoit-Wolf vs OAS estimation
=============================
The usual covariance maximum likelihood estimate can be regularized
using shrinkage. Ledoit and Wolf proposed a close formula to compute
the asymptotically optimal shrinkage parameter (minimizing a MSE
criterion), yielding the Ledoit-Wolf covariance estimate.
Chen et al. proposed an improvement of the Ledoit-Wolf shrinkage
parameter, the OAS coefficient, whose convergence is significantly
better under the assumption that the data are Gaussian.
This example, inspired from Chen's publication [1], shows a comparison
of the estimated MSE of the LW and OAS methods, using Gaussian
distributed data.
[1] "Shrinkage Algorithms for MMSE Covariance Estimation"
Chen et al., IEEE Trans. on Sign. Proc., Volume 58, Issue 10, October 2010.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy.linalg import toeplitz, cholesky
from sklearn.covariance import LedoitWolf, OAS
np.random.seed(0)
###############################################################################
n_features = 100
# simulation covariance matrix (AR(1) process)
r = 0.1
real_cov = toeplitz(r ** np.arange(n_features))
coloring_matrix = cholesky(real_cov)
n_samples_range = np.arange(6, 31, 1)
repeat = 100
lw_mse = np.zeros((n_samples_range.size, repeat))
oa_mse = np.zeros((n_samples_range.size, repeat))
lw_shrinkage = np.zeros((n_samples_range.size, repeat))
oa_shrinkage = np.zeros((n_samples_range.size, repeat))
for i, n_samples in enumerate(n_samples_range):
for j in range(repeat):
X = np.dot(
np.random.normal(size=(n_samples, n_features)), coloring_matrix.T)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X)
lw_mse[i, j] = lw.error_norm(real_cov, scaling=False)
lw_shrinkage[i, j] = lw.shrinkage_
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X)
oa_mse[i, j] = oa.error_norm(real_cov, scaling=False)
oa_shrinkage[i, j] = oa.shrinkage_
# plot MSE
plt.subplot(2, 1, 1)
plt.errorbar(n_samples_range, lw_mse.mean(1), yerr=lw_mse.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_mse.mean(1), yerr=oa_mse.std(1),
label='OAS', color='r')
plt.ylabel("Squared error")
plt.legend(loc="upper right")
plt.title("Comparison of covariance estimators")
plt.xlim(5, 31)
# plot shrinkage coefficient
plt.subplot(2, 1, 2)
plt.errorbar(n_samples_range, lw_shrinkage.mean(1), yerr=lw_shrinkage.std(1),
label='Ledoit-Wolf', color='g')
plt.errorbar(n_samples_range, oa_shrinkage.mean(1), yerr=oa_shrinkage.std(1),
label='OAS', color='r')
plt.xlabel("n_samples")
plt.ylabel("Shrinkage")
plt.legend(loc="lower right")
plt.ylim(plt.ylim()[0], 1. + (plt.ylim()[1] - plt.ylim()[0]) / 10.)
plt.xlim(5, 31)
plt.show()
| bsd-3-clause |
rdevon/cortex | tests/built_ins/networks/test_fully_connected.py | 1 | 1421 | from cortex.built_ins.networks.fully_connected import FullyConnectedNet
from torch import nn
def test_fully_connected_build():
"""
Asserts: True if a the FullyConnectedNet has correct layers and
attributes.
"""
dim_in = 4096
dim_out = 10
dim_h = 64
dim_ex = None
nonlinearity = 'ReLU'
n_levels = None
output_nonlinearity = None
layer_args = {}
expected_name_linear = 'linear_({}/{})'.format(dim_in, dim_h)
expected_name_relu = 'linear_({}/{})_{}'.format(dim_in, dim_h, 'ReLU')
expected_name_out = 'linear_({}/{})_{}'.format(dim_h, dim_out, 'out')
fully_connected_net = FullyConnectedNet(dim_in, dim_out, dim_h, dim_ex,
nonlinearity, n_levels,
output_nonlinearity, **layer_args)
layers = list(fully_connected_net.models._modules.items())
assert layers[0][0] == expected_name_linear
assert layers[1][0] == expected_name_relu
assert layers[2][0] == expected_name_out
assert isinstance(layers[0][1], nn.modules.linear.Linear)
assert isinstance(layers[1][1], nn.modules.activation.ReLU)
assert isinstance(layers[2][1], nn.modules.linear.Linear)
assert layers[0][1].in_features == dim_in
assert layers[0][1].out_features == dim_h
assert layers[2][1].in_features == dim_h
assert layers[2][1].out_features == dim_out
| bsd-3-clause |
DonBeo/scikit-learn | sklearn/feature_extraction/tests/test_dict_vectorizer.py | 274 | 3790 | # Authors: Lars Buitinck <[email protected]>
# Dan Blanchard <[email protected]>
# License: BSD 3 clause
from random import Random
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_array_equal
from sklearn.utils.testing import (assert_equal, assert_in,
assert_false, assert_true)
from sklearn.feature_extraction import DictVectorizer
from sklearn.feature_selection import SelectKBest, chi2
def test_dictvectorizer():
D = [{"foo": 1, "bar": 3},
{"bar": 4, "baz": 2},
{"bar": 1, "quux": 1, "quuux": 2}]
for sparse in (True, False):
for dtype in (int, np.float32, np.int16):
for sort in (True, False):
for iterable in (True, False):
v = DictVectorizer(sparse=sparse, dtype=dtype, sort=sort)
X = v.fit_transform(iter(D) if iterable else D)
assert_equal(sp.issparse(X), sparse)
assert_equal(X.shape, (3, 5))
assert_equal(X.sum(), 14)
assert_equal(v.inverse_transform(X), D)
if sparse:
# CSR matrices can't be compared for equality
assert_array_equal(X.A, v.transform(iter(D) if iterable
else D).A)
else:
assert_array_equal(X, v.transform(iter(D) if iterable
else D))
if sort:
assert_equal(v.feature_names_,
sorted(v.feature_names_))
def test_feature_selection():
# make two feature dicts with two useful features and a bunch of useless
# ones, in terms of chi2
d1 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=1, useful2=20)
d2 = dict([("useless%d" % i, 10) for i in range(20)],
useful1=20, useful2=1)
for indices in (True, False):
v = DictVectorizer().fit([d1, d2])
X = v.transform([d1, d2])
sel = SelectKBest(chi2, k=2).fit(X, [0, 1])
v.restrict(sel.get_support(indices=indices), indices=indices)
assert_equal(v.get_feature_names(), ["useful1", "useful2"])
def test_one_of_k():
D_in = [{"version": "1", "ham": 2},
{"version": "2", "spam": .3},
{"version=3": True, "spam": -1}]
v = DictVectorizer()
X = v.fit_transform(D_in)
assert_equal(X.shape, (3, 5))
D_out = v.inverse_transform(X)
assert_equal(D_out[0], {"version=1": 1, "ham": 2})
names = v.get_feature_names()
assert_true("version=2" in names)
assert_false("version" in names)
def test_unseen_or_no_features():
D = [{"camelot": 0, "spamalot": 1}]
for sparse in [True, False]:
v = DictVectorizer(sparse=sparse).fit(D)
X = v.transform({"push the pram a lot": 2})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
X = v.transform({})
if sparse:
X = X.toarray()
assert_array_equal(X, np.zeros((1, 2)))
try:
v.transform([])
except ValueError as e:
assert_in("empty", str(e))
def test_deterministic_vocabulary():
# Generate equal dictionaries with different memory layouts
items = [("%03d" % i, i) for i in range(1000)]
rng = Random(42)
d_sorted = dict(items)
rng.shuffle(items)
d_shuffled = dict(items)
# check that the memory layout does not impact the resulting vocabulary
v_1 = DictVectorizer().fit([d_sorted])
v_2 = DictVectorizer().fit([d_shuffled])
assert_equal(v_1.vocabulary_, v_2.vocabulary_)
| bsd-3-clause |
heli522/scikit-learn | sklearn/svm/tests/test_svm.py | 115 | 31653 | """
Testing for Support Vector Machine module (sklearn.svm)
TODO: remove hard coded numerical results when possible
"""
import numpy as np
import itertools
from numpy.testing import assert_array_equal, assert_array_almost_equal
from numpy.testing import assert_almost_equal
from scipy import sparse
from nose.tools import assert_raises, assert_true, assert_equal, assert_false
from sklearn.base import ChangedBehaviorWarning
from sklearn import svm, linear_model, datasets, metrics, base
from sklearn.cross_validation import train_test_split
from sklearn.datasets import make_classification, make_blobs
from sklearn.metrics import f1_score
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.utils import check_random_state
from sklearn.utils import ConvergenceWarning
from sklearn.utils.validation import NotFittedError
from sklearn.utils.testing import assert_greater, assert_in, assert_less
from sklearn.utils.testing import assert_raises_regexp, assert_warns
from sklearn.utils.testing import assert_warns_message, assert_raise_message
from sklearn.utils.testing import ignore_warnings
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
Y = [1, 1, 1, 2, 2, 2]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [1, 2, 2]
# also load the iris dataset
iris = datasets.load_iris()
rng = check_random_state(42)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_libsvm_parameters():
# Test parameters on classes that make use of libsvm.
clf = svm.SVC(kernel='linear').fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.support_vectors_, (X[1], X[3]))
assert_array_equal(clf.intercept_, [0.])
assert_array_equal(clf.predict(X), Y)
def test_libsvm_iris():
# Check consistency on dataset iris.
# shuffle the dataset so that labels are not ordered
for k in ('linear', 'rbf'):
clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9)
assert_array_equal(clf.classes_, np.sort(clf.classes_))
# check also the low-level API
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64))
pred = svm.libsvm.predict(iris.data, *model)
assert_greater(np.mean(pred == iris.target), .95)
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64),
kernel='linear')
pred = svm.libsvm.predict(iris.data, *model, kernel='linear')
assert_greater(np.mean(pred == iris.target), .95)
pred = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_greater(np.mean(pred == iris.target), .95)
# If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence
# we should get deteriministic results (assuming that there is no other
# thread calling this wrapper calling `srand` concurrently).
pred2 = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_array_equal(pred, pred2)
def test_single_sample_1d():
# Test whether SVCs work on a single sample given as a 1-d array
clf = svm.SVC().fit(X, Y)
clf.predict(X[0])
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf.predict(X[0])
def test_precomputed():
# SVC with a precomputed kernel.
# We test it with a toy dataset and with iris.
clf = svm.SVC(kernel='precomputed')
# Gram matrix for train data (square matrix)
# (we use just a linear kernel)
K = np.dot(X, np.array(X).T)
clf.fit(K, Y)
# Gram matrix for test data (rectangular matrix)
KT = np.dot(T, np.array(X).T)
pred = clf.predict(KT)
assert_raises(ValueError, clf.predict, KT.T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
KT = np.zeros_like(KT)
for i in range(len(T)):
for j in clf.support_:
KT[i, j] = np.dot(T[i], X[j])
pred = clf.predict(KT)
assert_array_equal(pred, true_result)
# same as before, but using a callable function instead of the kernel
# matrix. kernel is just a linear kernel
kfunc = lambda x, y: np.dot(x, y.T)
clf = svm.SVC(kernel=kfunc)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# test a precomputed kernel with the iris dataset
# and check parameters against a linear SVC
clf = svm.SVC(kernel='precomputed')
clf2 = svm.SVC(kernel='linear')
K = np.dot(iris.data, iris.data.T)
clf.fit(K, iris.target)
clf2.fit(iris.data, iris.target)
pred = clf.predict(K)
assert_array_almost_equal(clf.support_, clf2.support_)
assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_)
assert_array_almost_equal(clf.intercept_, clf2.intercept_)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
K = np.zeros_like(K)
for i in range(len(iris.data)):
for j in clf.support_:
K[i, j] = np.dot(iris.data[i], iris.data[j])
pred = clf.predict(K)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
clf = svm.SVC(kernel=kfunc)
clf.fit(iris.data, iris.target)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
def test_svr():
# Test Support Vector Regression
diabetes = datasets.load_diabetes()
for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0),
svm.NuSVR(kernel='linear', nu=.4, C=10.),
svm.SVR(kernel='linear', C=10.),
svm.LinearSVR(C=10.),
svm.LinearSVR(C=10.),
):
clf.fit(diabetes.data, diabetes.target)
assert_greater(clf.score(diabetes.data, diabetes.target), 0.02)
# non-regression test; previously, BaseLibSVM would check that
# len(np.unique(y)) < 2, which must only be done for SVC
svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data)))
svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data)))
def test_linearsvr():
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score1 = lsvr.score(diabetes.data, diabetes.target)
svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target)
score2 = svr.score(diabetes.data, diabetes.target)
assert np.linalg.norm(lsvr.coef_ - svr.coef_) / np.linalg.norm(svr.coef_) < .1
assert np.abs(score1 - score2) < 0.1
def test_svr_errors():
X = [[0.0], [1.0]]
y = [0.0, 0.5]
# Bad kernel
clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]]))
clf.fit(X, y)
assert_raises(ValueError, clf.predict, X)
def test_oneclass():
# Test OneClassSVM
clf = svm.OneClassSVM()
clf.fit(X)
pred = clf.predict(T)
assert_array_almost_equal(pred, [-1, -1, -1])
assert_array_almost_equal(clf.intercept_, [-1.008], decimal=3)
assert_array_almost_equal(clf.dual_coef_,
[[0.632, 0.233, 0.633, 0.234, 0.632, 0.633]],
decimal=3)
assert_raises(ValueError, lambda: clf.coef_)
def test_oneclass_decision_function():
# Test OneClassSVM decision function
clf = svm.OneClassSVM()
rnd = check_random_state(2)
# Generate train data
X = 0.3 * rnd.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rnd.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
# predict things
y_pred_test = clf.predict(X_test)
assert_greater(np.mean(y_pred_test == 1), .9)
y_pred_outliers = clf.predict(X_outliers)
assert_greater(np.mean(y_pred_outliers == -1), .9)
dec_func_test = clf.decision_function(X_test)
assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1)
dec_func_outliers = clf.decision_function(X_outliers)
assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1)
def test_tweak_params():
# Make sure some tweaking of parameters works.
# We change clf.dual_coef_ at run time and expect .predict() to change
# accordingly. Notice that this is not trivial since it involves a lot
# of C/Python copying in the libsvm bindings.
# The success of this test ensures that the mapping between libsvm and
# the python classifier is complete.
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-.25, .25]])
assert_array_equal(clf.predict([[-.1, -.1]]), [1])
clf._dual_coef_ = np.array([[.0, 1.]])
assert_array_equal(clf.predict([[-.1, -.1]]), [2])
def test_probability():
# Predict probabilities using SVC
# This uses cross validation, so we use a slightly bigger testing set.
for clf in (svm.SVC(probability=True, random_state=0, C=1.0),
svm.NuSVC(probability=True, random_state=0)):
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert_true(np.mean(np.argmax(prob_predict, 1)
== clf.predict(iris.data)) > 0.9)
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8)
def test_decision_function():
# Test decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(iris.data, iris.target)
dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int)])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
# kernel binary:
clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo')
clf.fit(X, Y)
rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma)
dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
def test_decision_function_shape():
# check that decision_function_shape='ovr' gives
# correct shape and is consistent with predict
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(iris.data, iris.target)
dec = clf.decision_function(iris.data)
assert_equal(dec.shape, (len(iris.data), 3))
assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1))
# with five classes:
X, y = make_blobs(n_samples=80, centers=5, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(X_train, y_train)
dec = clf.decision_function(X_test)
assert_equal(dec.shape, (len(X_test), 5))
assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1))
# check shape of ovo_decition_function=True
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(X_train, y_train)
dec = clf.decision_function(X_train)
assert_equal(dec.shape, (len(X_train), 10))
# check deprecation warning
clf.decision_function_shape = None
msg = "change the shape of the decision function"
dec = assert_warns_message(ChangedBehaviorWarning, msg,
clf.decision_function, X_train)
assert_equal(dec.shape, (len(X_train), 10))
def test_svr_decision_function():
# Test SVR's decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
X = iris.data
y = iris.target
# linear kernel
reg = svm.SVR(kernel='linear', C=0.1).fit(X, y)
dec = np.dot(X, reg.coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
# rbf kernel
reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y)
rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma)
dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
def test_weight():
# Test class weights
clf = svm.SVC(class_weight={1: 0.1})
# we give a small weights to class 1
clf.fit(X, Y)
# so all predicted values belong to class 2
assert_array_almost_equal(clf.predict(X), [2] * 6)
X_, y_ = make_classification(n_samples=200, n_features=10,
weights=[0.833, 0.167], random_state=2)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0), svm.SVC()):
clf.set_params(class_weight={0: .1, 1: 10})
clf.fit(X_[:100], y_[:100])
y_pred = clf.predict(X_[100:])
assert_true(f1_score(y_[100:], y_pred) > .3)
def test_sample_weights():
# Test weights on individual samples
# TODO: check on NuSVR, OneClass, etc.
clf = svm.SVC()
clf.fit(X, Y)
assert_array_equal(clf.predict(X[2]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X[2]), [2.])
# test that rescaling all samples is the same as changing C
clf = svm.SVC()
clf.fit(X, Y)
dual_coef_no_weight = clf.dual_coef_
clf.set_params(C=100)
clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X)))
assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_)
def test_auto_weight():
# Test class weights for imbalanced data
from sklearn.linear_model import LogisticRegression
# We take as dataset the two-dimensional projection of iris so
# that it is not separable and remove half of predictors from
# class 1.
# We add one to the targets as a non-regression test: class_weight="balanced"
# used to work only when the labels where a range [0..K).
from sklearn.utils import compute_class_weight
X, y = iris.data[:, :2], iris.target + 1
unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2])
classes = np.unique(y[unbalanced])
class_weights = compute_class_weight('balanced', classes, y[unbalanced])
assert_true(np.argmax(class_weights) == 2)
for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0),
LogisticRegression()):
# check that score is better when class='balanced' is set.
y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
clf.set_params(class_weight='balanced')
y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X)
assert_true(metrics.f1_score(y, y_pred, average='weighted')
<= metrics.f1_score(y, y_pred_balanced,
average='weighted'))
def test_bad_input():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X, Y2)
# Test with arrays that are non-contiguous.
for clf in (svm.SVC(), svm.LinearSVC(random_state=0)):
Xf = np.asfortranarray(X)
assert_false(Xf.flags['C_CONTIGUOUS'])
yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T)
yf = yf[:, -1]
assert_false(yf.flags['F_CONTIGUOUS'])
assert_false(yf.flags['C_CONTIGUOUS'])
clf.fit(Xf, yf)
assert_array_equal(clf.predict(T), true_result)
# error for precomputed kernelsx
clf = svm.SVC(kernel='precomputed')
assert_raises(ValueError, clf.fit, X, Y)
# sample_weight bad dimensions
clf = svm.SVC()
assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1))
# predict with sparse input when trained with dense
clf = svm.SVC().fit(X, Y)
assert_raises(ValueError, clf.predict, sparse.lil_matrix(X))
Xt = np.array(X).T
clf.fit(np.dot(X, Xt), Y)
assert_raises(ValueError, clf.predict, X)
clf = svm.SVC()
clf.fit(X, Y)
assert_raises(ValueError, clf.predict, Xt)
def test_sparse_precomputed():
clf = svm.SVC(kernel='precomputed')
sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]])
try:
clf.fit(sparse_gram, [0, 1])
assert not "reached"
except TypeError as e:
assert_in("Sparse precomputed", str(e))
def test_linearsvc_parameters():
# Test possible parameter combinations in LinearSVC
# Generate list of possible parameter combinations
losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo']
penalties, duals = ['l1', 'l2', 'bar'], [True, False]
X, y = make_classification(n_samples=5, n_features=5)
for loss, penalty, dual in itertools.product(losses, penalties, duals):
clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual)
if ((loss, penalty) == ('hinge', 'l1') or
(loss, penalty, dual) == ('hinge', 'l2', False) or
(penalty, dual) == ('l1', True) or
loss == 'foo' or penalty == 'bar'):
assert_raises_regexp(ValueError,
"Unsupported set of arguments.*penalty='%s.*"
"loss='%s.*dual=%s"
% (penalty, loss, dual),
clf.fit, X, y)
else:
clf.fit(X, y)
# Incorrect loss value - test if explicit error message is raised
assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*",
svm.LinearSVC(loss="l3").fit, X, y)
# FIXME remove in 1.0
def test_linearsvx_loss_penalty_deprecations():
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the %s will be removed in %s")
# LinearSVC
# loss l1/L1 --> hinge
assert_warns_message(DeprecationWarning,
msg % ("l1", "hinge", "loss='l1'", "1.0"),
svm.LinearSVC(loss="l1").fit, X, y)
# loss l2/L2 --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("L2", "squared_hinge", "loss='L2'", "1.0"),
svm.LinearSVC(loss="L2").fit, X, y)
# LinearSVR
# loss l1/L1 --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("L1", "epsilon_insensitive", "loss='L1'",
"1.0"),
svm.LinearSVR(loss="L1").fit, X, y)
# loss l2/L2 --> squared_epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_epsilon_insensitive",
"loss='l2'", "1.0"),
svm.LinearSVR(loss="l2").fit, X, y)
# FIXME remove in 0.18
def test_linear_svx_uppercase_loss_penalty():
# Check if Upper case notation is supported by _fit_liblinear
# which is called by fit
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the uppercase notation will be removed in %s")
# loss SQUARED_hinge --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("SQUARED_hinge", "squared_hinge", "0.18"),
svm.LinearSVC(loss="SQUARED_hinge").fit, X, y)
# penalty L2 --> l2
assert_warns_message(DeprecationWarning,
msg.replace("loss", "penalty")
% ("L2", "l2", "0.18"),
svm.LinearSVC(penalty="L2").fit, X, y)
# loss EPSILON_INSENSITIVE --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("EPSILON_INSENSITIVE", "epsilon_insensitive",
"0.18"),
svm.LinearSVR(loss="EPSILON_INSENSITIVE").fit, X, y)
def test_linearsvc():
# Test basic routines using LinearSVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
# by default should have intercept
assert_true(clf.fit_intercept)
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.intercept_, [0], decimal=3)
# the same with l1 penalty
clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty with dual formulation
clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty, l1 loss
clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0)
clf.fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# test also decision function
dec = clf.decision_function(T)
res = (dec > 0).astype(np.int) + 1
assert_array_equal(res, true_result)
def test_linearsvc_crammer_singer():
# Test LinearSVC with crammer_singer multi-class svm
ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0)
cs_clf.fit(iris.data, iris.target)
# similar prediction for ovr and crammer-singer:
assert_true((ovr_clf.predict(iris.data) ==
cs_clf.predict(iris.data)).mean() > .9)
# classifiers shouldn't be the same
assert_true((ovr_clf.coef_ != cs_clf.coef_).all())
# test decision function
assert_array_equal(cs_clf.predict(iris.data),
np.argmax(cs_clf.decision_function(iris.data), axis=1))
dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_
assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data))
def test_crammer_singer_binary():
# Test Crammer-Singer formulation in the binary case
X, y = make_classification(n_classes=2, random_state=0)
for fit_intercept in (True, False):
acc = svm.LinearSVC(fit_intercept=fit_intercept,
multi_class="crammer_singer",
random_state=0).fit(X, y).score(X, y)
assert_greater(acc, 0.9)
def test_linearsvc_iris():
# Test that LinearSVC gives plausible predictions on the iris dataset
# Also, test symbolic class names (classes_).
target = iris.target_names[iris.target]
clf = svm.LinearSVC(random_state=0).fit(iris.data, target)
assert_equal(set(clf.classes_), set(iris.target_names))
assert_greater(np.mean(clf.predict(iris.data) == target), 0.8)
dec = clf.decision_function(iris.data)
pred = iris.target_names[np.argmax(dec, 1)]
assert_array_equal(pred, clf.predict(iris.data))
def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC):
# Test that dense liblinear honours intercept_scaling param
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge',
dual=False, C=4, tol=1e-7, random_state=0)
assert_true(clf.intercept_scaling == 1, clf.intercept_scaling)
assert_true(clf.fit_intercept)
# when intercept_scaling is low the intercept value is highly "penalized"
# by regularization
clf.intercept_scaling = 1
clf.fit(X, y)
assert_almost_equal(clf.intercept_, 0, decimal=5)
# when intercept_scaling is sufficiently high, the intercept value
# is not affected by regularization
clf.intercept_scaling = 100
clf.fit(X, y)
intercept1 = clf.intercept_
assert_less(intercept1, -1)
# when intercept_scaling is sufficiently high, the intercept value
# doesn't depend on intercept_scaling value
clf.intercept_scaling = 1000
clf.fit(X, y)
intercept2 = clf.intercept_
assert_array_almost_equal(intercept1, intercept2, decimal=2)
def test_liblinear_set_coef():
# multi-class case
clf = svm.LinearSVC().fit(iris.data, iris.target)
values = clf.decision_function(iris.data)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(iris.data)
assert_array_almost_equal(values, values2)
# binary-class case
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = svm.LinearSVC().fit(X, y)
values = clf.decision_function(X)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(X)
assert_array_equal(values, values2)
def test_immutable_coef_property():
# Check that primal coef modification are not silently ignored
svms = [
svm.SVC(kernel='linear').fit(iris.data, iris.target),
svm.NuSVC(kernel='linear').fit(iris.data, iris.target),
svm.SVR(kernel='linear').fit(iris.data, iris.target),
svm.NuSVR(kernel='linear').fit(iris.data, iris.target),
svm.OneClassSVM(kernel='linear').fit(iris.data),
]
for clf in svms:
assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3))
assert_raises((RuntimeError, ValueError),
clf.coef_.__setitem__, (0, 0), 0)
def test_linearsvc_verbose():
# stdout: redirect
import os
stdout = os.dup(1) # save original stdout
os.dup2(os.pipe()[1], 1) # replace it
# actual call
clf = svm.LinearSVC(verbose=1)
clf.fit(X, Y)
# stdout: restore
os.dup2(stdout, 1) # restore original stdout
def test_svc_clone_with_callable_kernel():
# create SVM with callable linear kernel, check that results are the same
# as with built-in linear kernel
svm_callable = svm.SVC(kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0,
decision_function_shape='ovr')
# clone for checking clonability with lambda functions..
svm_cloned = base.clone(svm_callable)
svm_cloned.fit(iris.data, iris.target)
svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0,
decision_function_shape='ovr')
svm_builtin.fit(iris.data, iris.target)
assert_array_almost_equal(svm_cloned.dual_coef_,
svm_builtin.dual_coef_)
assert_array_almost_equal(svm_cloned.intercept_,
svm_builtin.intercept_)
assert_array_equal(svm_cloned.predict(iris.data),
svm_builtin.predict(iris.data))
assert_array_almost_equal(svm_cloned.predict_proba(iris.data),
svm_builtin.predict_proba(iris.data),
decimal=4)
assert_array_almost_equal(svm_cloned.decision_function(iris.data),
svm_builtin.decision_function(iris.data))
def test_svc_bad_kernel():
svc = svm.SVC(kernel=lambda x, y: x)
assert_raises(ValueError, svc.fit, X, Y)
def test_timeout():
a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, a.fit, X, Y)
def test_unfitted():
X = "foo!" # input validation not required when SVM not fitted
clf = svm.SVC()
assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
clf = svm.NuSVR()
assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
# ignore convergence warnings from max_iter=1
@ignore_warnings
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
def test_linear_svc_convergence_warnings():
# Test that warnings are raised if model does not converge
lsvc = svm.LinearSVC(max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, lsvc.fit, X, Y)
assert_equal(lsvc.n_iter_, 2)
def test_svr_coef_sign():
# Test that SVR(kernel="linear") has coef_ with the right sign.
# Non-regression test for #2933.
X = np.random.RandomState(21).randn(10, 3)
y = np.random.RandomState(12).randn(10)
for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'),
svm.LinearSVR()]:
svr.fit(X, y)
assert_array_almost_equal(svr.predict(X),
np.dot(X, svr.coef_.ravel()) + svr.intercept_)
def test_linear_svc_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
lsvc = svm.LinearSVC(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % lsvc.intercept_scaling)
assert_raise_message(ValueError, msg, lsvc.fit, X, Y)
def test_lsvc_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
lsvc = svm.LinearSVC(fit_intercept=False)
lsvc.fit(X, Y)
assert_equal(lsvc.intercept_, 0.)
def test_hasattr_predict_proba():
# Method must be (un)available before or after fit, switched by
# `probability` param
G = svm.SVC(probability=True)
assert_true(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_true(hasattr(G, 'predict_proba'))
G = svm.SVC(probability=False)
assert_false(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_false(hasattr(G, 'predict_proba'))
# Switching to `probability=True` after fitting should make
# predict_proba available, but calling it must not work:
G.probability = True
assert_true(hasattr(G, 'predict_proba'))
msg = "predict_proba is not available when fitted with probability=False"
assert_raise_message(NotFittedError, msg, G.predict_proba, iris.data)
| bsd-3-clause |
arabenjamin/scikit-learn | examples/neighbors/plot_digits_kde_sampling.py | 250 | 2022 | """
=========================
Kernel Density Estimation
=========================
This example shows how kernel density estimation (KDE), a powerful
non-parametric density estimation technique, can be used to learn
a generative model for a dataset. With this generative model in place,
new samples can be drawn. These new samples reflect the underlying model
of the data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.neighbors import KernelDensity
from sklearn.decomposition import PCA
from sklearn.grid_search import GridSearchCV
# load the data
digits = load_digits()
data = digits.data
# project the 64-dimensional data to a lower dimension
pca = PCA(n_components=15, whiten=False)
data = pca.fit_transform(digits.data)
# use grid search cross-validation to optimize the bandwidth
params = {'bandwidth': np.logspace(-1, 1, 20)}
grid = GridSearchCV(KernelDensity(), params)
grid.fit(data)
print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth))
# use the best estimator to compute the kernel density estimate
kde = grid.best_estimator_
# sample 44 new points from the data
new_data = kde.sample(44, random_state=0)
new_data = pca.inverse_transform(new_data)
# turn data into a 4x11 grid
new_data = new_data.reshape((4, 11, -1))
real_data = digits.data[:44].reshape((4, 11, -1))
# plot real digits and resampled digits
fig, ax = plt.subplots(9, 11, subplot_kw=dict(xticks=[], yticks=[]))
for j in range(11):
ax[4, j].set_visible(False)
for i in range(4):
im = ax[i, j].imshow(real_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
im = ax[i + 5, j].imshow(new_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
ax[0, 5].set_title('Selection from the input data')
ax[5, 5].set_title('"New" digits drawn from the kernel density model')
plt.show()
| bsd-3-clause |
tapomayukh/projects_in_python | classification/Classification_with_kNN/Single_Contact_Classification/Feature_Comparison/multiple_features/results/test10_cross_validate_categories_1200ms_scaled_method_v_force_motion.py | 1 | 4711 |
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/Scaled')
from data_method_V import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
###### Sanity Check ######
i=0
n=0
while i < 82:
j=0
while j < 140:
if X[i,j] != X[i,j]:
print X[i,j]
print i,j
n=n+1
j = j+1
i=i+1
print n
##########################
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
if __name__ == '__main__':
Fmat = np.row_stack([Fmat_original[0:41,:], Fmat_original[82:123,:]])
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:2]
m_W, n_W = np.shape(W)
print 'Reduced Dimension Eigenvector Shape:',m_W, n_W
# Normalizes the data set with respect to its variance (Not an Integral part of PCA, but sometimes useful)
length = len(eigval_total)
s = np.matrix(np.zeros(length)).T
i = 0
while i < length:
s[i] = sqrt(C[i,i])
i = i+1
Z = np.divide(B,s)
m_Z, n_Z = np.shape(Z)
print 'Z-Score Shape:', m_Z, n_Z
#Projected Data:
Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B'
m_Y, n_Y = np.shape(Y.T)
print 'Transposed Projected Data Shape:', m_Y, n_Y
#Using PYMVPA
PCA_data = np.array(Y.T)
PCA_label_1 = ['Rigid-Fixed']*35 + ['Rigid-Movable']*35 + ['Soft-Fixed']*35 + ['Soft-Movable']*35
PCA_chunk_1 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Cushion-Movable']*5 + ['Sponge-Movable']*5
clf = kNN(k=3)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_1,chunks=PCA_chunk_1)
print ds1.samples.shape
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
print error
print cvterr.confusion.asstring(description=False)
figure(1)
cvterr.confusion.plot(numbers='True')
#show()
# Variances
figure(2)
title('Variances of PCs')
stem(range(len(perc_total)),perc_total,'--b')
axis([-0.3,30.3,0,1.2])
grid('True')
show()
| mit |
arabenjamin/scikit-learn | examples/ensemble/plot_adaboost_hastie_10_2.py | 352 | 3576 | """
=============================
Discrete versus Real AdaBoost
=============================
This example is based on Figure 10.2 from Hastie et al 2009 [1] and illustrates
the difference in performance between the discrete SAMME [2] boosting
algorithm and real SAMME.R boosting algorithm. Both algorithms are evaluated
on a binary classification task where the target Y is a non-linear function
of 10 input features.
Discrete SAMME AdaBoost adapts based on errors in predicted class labels
whereas real SAMME.R uses the predicted class probabilities.
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
.. [2] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <[email protected]>,
# Noel Dawe <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import zero_one_loss
from sklearn.ensemble import AdaBoostClassifier
n_estimators = 400
# A learning rate of 1. may not be optimal for both SAMME and SAMME.R
learning_rate = 1.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_test, y_test = X[2000:], y[2000:]
X_train, y_train = X[:2000], y[:2000]
dt_stump = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1)
dt_stump.fit(X_train, y_train)
dt_stump_err = 1.0 - dt_stump.score(X_test, y_test)
dt = DecisionTreeClassifier(max_depth=9, min_samples_leaf=1)
dt.fit(X_train, y_train)
dt_err = 1.0 - dt.score(X_test, y_test)
ada_discrete = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME")
ada_discrete.fit(X_train, y_train)
ada_real = AdaBoostClassifier(
base_estimator=dt_stump,
learning_rate=learning_rate,
n_estimators=n_estimators,
algorithm="SAMME.R")
ada_real.fit(X_train, y_train)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, n_estimators], [dt_stump_err] * 2, 'k-',
label='Decision Stump Error')
ax.plot([1, n_estimators], [dt_err] * 2, 'k--',
label='Decision Tree Error')
ada_discrete_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_test)):
ada_discrete_err[i] = zero_one_loss(y_pred, y_test)
ada_discrete_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_discrete.staged_predict(X_train)):
ada_discrete_err_train[i] = zero_one_loss(y_pred, y_train)
ada_real_err = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_test)):
ada_real_err[i] = zero_one_loss(y_pred, y_test)
ada_real_err_train = np.zeros((n_estimators,))
for i, y_pred in enumerate(ada_real.staged_predict(X_train)):
ada_real_err_train[i] = zero_one_loss(y_pred, y_train)
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err,
label='Discrete AdaBoost Test Error',
color='red')
ax.plot(np.arange(n_estimators) + 1, ada_discrete_err_train,
label='Discrete AdaBoost Train Error',
color='blue')
ax.plot(np.arange(n_estimators) + 1, ada_real_err,
label='Real AdaBoost Test Error',
color='orange')
ax.plot(np.arange(n_estimators) + 1, ada_real_err_train,
label='Real AdaBoost Train Error',
color='green')
ax.set_ylim((0.0, 0.5))
ax.set_xlabel('n_estimators')
ax.set_ylabel('error rate')
leg = ax.legend(loc='upper right', fancybox=True)
leg.get_frame().set_alpha(0.7)
plt.show()
| bsd-3-clause |
tensorflow/tensorflow-experimental_link_static_libraries_once | tensorflow/tools/compatibility/ast_edits.py | 10 | 39593 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Upgrader for Python scripts according to an API change specification."""
import ast
import collections
import os
import re
import shutil
import sys
import tempfile
import traceback
import pasta
# Some regular expressions we will need for parsing
FIND_OPEN = re.compile(r"^\s*(\[).*$")
FIND_STRING_CHARS = re.compile(r"['\"]")
INFO = "INFO"
WARNING = "WARNING"
ERROR = "ERROR"
ImportRename = collections.namedtuple(
"ImportRename", ["new_name", "excluded_prefixes"])
def full_name_node(name, ctx=ast.Load()):
"""Make an Attribute or Name node for name.
Translate a qualified name into nested Attribute nodes (and a Name node).
Args:
name: The name to translate to a node.
ctx: What context this name is used in. Defaults to Load()
Returns:
A Name or Attribute node.
"""
names = name.split(".")
names.reverse()
node = ast.Name(id=names.pop(), ctx=ast.Load())
while names:
node = ast.Attribute(value=node, attr=names.pop(), ctx=ast.Load())
# Change outermost ctx to the one given to us (inner ones should be Load).
node.ctx = ctx
return node
def get_arg_value(node, arg_name, arg_pos=None):
"""Get the value of an argument from a ast.Call node.
This function goes through the positional and keyword arguments to check
whether a given argument was used, and if so, returns its value (the node
representing its value).
This cannot introspect *args or **args, but it safely handles *args in
Python3.5+.
Args:
node: The ast.Call node to extract arg values from.
arg_name: The name of the argument to extract.
arg_pos: The position of the argument (in case it's passed as a positional
argument).
Returns:
A tuple (arg_present, arg_value) containing a boolean indicating whether
the argument is present, and its value in case it is.
"""
# Check keyword args
if arg_name is not None:
for kw in node.keywords:
if kw.arg == arg_name:
return (True, kw.value)
# Check positional args
if arg_pos is not None:
idx = 0
for arg in node.args:
if sys.version_info[:2] >= (3, 5) and isinstance(arg, ast.Starred):
continue # Can't parse Starred
if idx == arg_pos:
return (True, arg)
idx += 1
return (False, None)
def uses_star_args_in_call(node):
"""Check if an ast.Call node uses arbitrary-length positional *args.
This function works with the AST call node format of Python3.5+
as well as the different AST format of earlier versions of Python.
Args:
node: The ast.Call node to check arg values for.
Returns:
True if the node uses starred variadic positional args or keyword args.
False if it does not.
"""
if sys.version_info[:2] >= (3, 5):
# Check for an *args usage in python 3.5+
for arg in node.args:
if isinstance(arg, ast.Starred):
return True
else:
if node.starargs:
return True
return False
def uses_star_kwargs_in_call(node):
"""Check if an ast.Call node uses arbitrary-length **kwargs.
This function works with the AST call node format of Python3.5+
as well as the different AST format of earlier versions of Python.
Args:
node: The ast.Call node to check arg values for.
Returns:
True if the node uses starred variadic positional args or keyword args.
False if it does not.
"""
if sys.version_info[:2] >= (3, 5):
# Check for a **kwarg usage in python 3.5+
for keyword in node.keywords:
if keyword.arg is None:
return True
else:
if node.kwargs:
return True
return False
def uses_star_args_or_kwargs_in_call(node):
"""Check if an ast.Call node uses arbitrary-length *args or **kwargs.
This function works with the AST call node format of Python3.5+
as well as the different AST format of earlier versions of Python.
Args:
node: The ast.Call node to check arg values for.
Returns:
True if the node uses starred variadic positional args or keyword args.
False if it does not.
"""
return uses_star_args_in_call(node) or uses_star_kwargs_in_call(node)
def excluded_from_module_rename(module, import_rename_spec):
"""Check if this module import should not be renamed.
Args:
module: (string) module name.
import_rename_spec: ImportRename instance.
Returns:
True if this import should not be renamed according to the
import_rename_spec.
"""
for excluded_prefix in import_rename_spec.excluded_prefixes:
if module.startswith(excluded_prefix):
return True
return False
class APIChangeSpec:
"""This class defines the transformations that need to happen.
This class must provide the following fields:
* `function_keyword_renames`: maps function names to a map of old -> new
argument names
* `symbol_renames`: maps function names to new function names
* `change_to_function`: a set of function names that have changed (for
notifications)
* `function_reorders`: maps functions whose argument order has changed to the
list of arguments in the new order
* `function_warnings`: maps full names of functions to warnings that will be
printed out if the function is used. (e.g. tf.nn.convolution())
* `function_transformers`: maps function names to custom handlers
* `module_deprecations`: maps module names to warnings that will be printed
if the module is still used after all other transformations have run
* `import_renames`: maps import name (must be a short name without '.')
to ImportRename instance.
For an example, see `TFAPIChangeSpec`.
"""
def preprocess(self, root_node): # pylint: disable=unused-argument
"""Preprocess a parse tree. Return a preprocessed node, logs and errors."""
return root_node, [], []
def clear_preprocessing(self):
"""Restore this APIChangeSpec to before it preprocessed a file.
This is needed if preprocessing a file changed any rewriting rules.
"""
pass
class NoUpdateSpec(APIChangeSpec):
"""A specification of an API change which doesn't change anything."""
def __init__(self):
self.function_handle = {}
self.function_reorders = {}
self.function_keyword_renames = {}
self.symbol_renames = {}
self.function_warnings = {}
self.change_to_function = {}
self.module_deprecations = {}
self.function_transformers = {}
self.import_renames = {}
class _PastaEditVisitor(ast.NodeVisitor):
"""AST Visitor that processes function calls.
Updates function calls from old API version to new API version using a given
change spec.
"""
def __init__(self, api_change_spec):
self._api_change_spec = api_change_spec
self._log = [] # Holds 4-tuples: severity, line, col, msg.
self._stack = [] # Allow easy access to parents.
# Overridden to maintain a stack of nodes to allow for parent access
def visit(self, node):
self._stack.append(node)
super(_PastaEditVisitor, self).visit(node)
self._stack.pop()
@property
def errors(self):
return [log for log in self._log if log[0] == ERROR]
@property
def warnings(self):
return [log for log in self._log if log[0] == WARNING]
@property
def warnings_and_errors(self):
return [log for log in self._log if log[0] in (WARNING, ERROR)]
@property
def info(self):
return [log for log in self._log if log[0] == INFO]
@property
def log(self):
return self._log
def add_log(self, severity, lineno, col, msg):
self._log.append((severity, lineno, col, msg))
print("%s line %d:%d: %s" % (severity, lineno, col, msg))
def add_logs(self, logs):
"""Record a log and print it.
The log should be a tuple `(severity, lineno, col_offset, msg)`, which will
be printed and recorded. It is part of the log available in the `self.log`
property.
Args:
logs: The logs to add. Must be a list of tuples
`(severity, lineno, col_offset, msg)`.
"""
self._log.extend(logs)
for log in logs:
print("%s line %d:%d: %s" % log)
def _get_applicable_entries(self, transformer_field, full_name, name):
"""Get all list entries indexed by name that apply to full_name or name."""
# Transformers are indexed to full name, name, or no name
# as a performance optimization.
function_transformers = getattr(self._api_change_spec,
transformer_field, {})
glob_name = "*." + name if name else None
transformers = []
if full_name in function_transformers:
transformers.append(function_transformers[full_name])
if glob_name in function_transformers:
transformers.append(function_transformers[glob_name])
if "*" in function_transformers:
transformers.append(function_transformers["*"])
return transformers
def _get_applicable_dict(self, transformer_field, full_name, name):
"""Get all dict entries indexed by name that apply to full_name or name."""
# Transformers are indexed to full name, name, or no name
# as a performance optimization.
function_transformers = getattr(self._api_change_spec,
transformer_field, {})
glob_name = "*." + name if name else None
transformers = function_transformers.get("*", {}).copy()
transformers.update(function_transformers.get(glob_name, {}))
transformers.update(function_transformers.get(full_name, {}))
return transformers
def _get_full_name(self, node):
"""Traverse an Attribute node to generate a full name, e.g., "tf.foo.bar".
This is the inverse of `full_name_node`.
Args:
node: A Node of type Attribute.
Returns:
a '.'-delimited full-name or None if node was not Attribute or Name.
i.e. `foo()+b).bar` returns None, while `a.b.c` would return "a.b.c".
"""
curr = node
items = []
while not isinstance(curr, ast.Name):
if not isinstance(curr, ast.Attribute):
return None
items.append(curr.attr)
curr = curr.value
items.append(curr.id)
return ".".join(reversed(items))
def _maybe_add_warning(self, node, full_name):
"""Adds an error to be printed about full_name at node."""
function_warnings = self._api_change_spec.function_warnings
if full_name in function_warnings:
level, message = function_warnings[full_name]
message = message.replace("<function name>", full_name)
self.add_log(level, node.lineno, node.col_offset,
"%s requires manual check. %s" % (full_name, message))
return True
else:
return False
def _maybe_add_module_deprecation_warning(self, node, full_name, whole_name):
"""Adds a warning if full_name is a deprecated module."""
warnings = self._api_change_spec.module_deprecations
if full_name in warnings:
level, message = warnings[full_name]
message = message.replace("<function name>", whole_name)
self.add_log(level, node.lineno, node.col_offset,
"Using member %s in deprecated module %s. %s" % (whole_name,
full_name,
message))
return True
else:
return False
def _maybe_add_call_warning(self, node, full_name, name):
"""Print a warning when specific functions are called with selected args.
The function _print_warning_for_function matches the full name of the called
function, e.g., tf.foo.bar(). This function matches the function name that
is called, as long as the function is an attribute. For example,
`tf.foo.bar()` and `foo.bar()` are matched, but not `bar()`.
Args:
node: ast.Call object
full_name: The precomputed full name of the callable, if one exists, None
otherwise.
name: The precomputed name of the callable, if one exists, None otherwise.
Returns:
Whether an error was recorded.
"""
# Only look for *.-warnings here, the other will be handled by the Attribute
# visitor. Also, do not warn for bare functions, only if the call func is
# an attribute.
warned = False
if isinstance(node.func, ast.Attribute):
warned = self._maybe_add_warning(node, "*." + name)
# All arg warnings are handled here, since only we have the args
arg_warnings = self._get_applicable_dict("function_arg_warnings",
full_name, name)
variadic_args = uses_star_args_or_kwargs_in_call(node)
for (kwarg, arg), (level, warning) in sorted(arg_warnings.items()):
present, _ = get_arg_value(node, kwarg, arg) or variadic_args
if present:
warned = True
warning_message = warning.replace("<function name>", full_name or name)
template = "%s called with %s argument, requires manual check: %s"
if variadic_args:
template = ("%s called with *args or **kwargs that may include %s, "
"requires manual check: %s")
self.add_log(level, node.lineno, node.col_offset,
template % (full_name or name, kwarg, warning_message))
return warned
def _maybe_rename(self, parent, node, full_name):
"""Replace node (Attribute or Name) with a node representing full_name."""
new_name = self._api_change_spec.symbol_renames.get(full_name, None)
if new_name:
self.add_log(INFO, node.lineno, node.col_offset,
"Renamed %r to %r" % (full_name, new_name))
new_node = full_name_node(new_name, node.ctx)
ast.copy_location(new_node, node)
pasta.ast_utils.replace_child(parent, node, new_node)
return True
else:
return False
def _maybe_change_to_function_call(self, parent, node, full_name):
"""Wraps node (typically, an Attribute or Expr) in a Call."""
if full_name in self._api_change_spec.change_to_function:
if not isinstance(parent, ast.Call):
# ast.Call's constructor is really picky about how many arguments it
# wants, and also, it changed between Py2 and Py3.
new_node = ast.Call(node, [], [])
pasta.ast_utils.replace_child(parent, node, new_node)
ast.copy_location(new_node, node)
self.add_log(INFO, node.lineno, node.col_offset,
"Changed %r to a function call" % full_name)
return True
return False
def _maybe_add_arg_names(self, node, full_name):
"""Make args into keyword args if function called full_name requires it."""
function_reorders = self._api_change_spec.function_reorders
if full_name in function_reorders:
if uses_star_args_in_call(node):
self.add_log(WARNING, node.lineno, node.col_offset,
"(Manual check required) upgrading %s may require "
"re-ordering the call arguments, but it was passed "
"variable-length positional *args. The upgrade "
"script cannot handle these automatically." % full_name)
reordered = function_reorders[full_name]
new_keywords = []
idx = 0
for arg in node.args:
if sys.version_info[:2] >= (3, 5) and isinstance(arg, ast.Starred):
continue # Can't move Starred to keywords
keyword_arg = reordered[idx]
keyword = ast.keyword(arg=keyword_arg, value=arg)
new_keywords.append(keyword)
idx += 1
if new_keywords:
self.add_log(INFO, node.lineno, node.col_offset,
"Added keywords to args of function %r" % full_name)
node.args = []
node.keywords = new_keywords + (node.keywords or [])
return True
return False
def _maybe_modify_args(self, node, full_name, name):
"""Rename keyword args if the function called full_name requires it."""
renamed_keywords = self._get_applicable_dict("function_keyword_renames",
full_name, name)
if not renamed_keywords:
return False
if uses_star_kwargs_in_call(node):
self.add_log(WARNING, node.lineno, node.col_offset,
"(Manual check required) upgrading %s may require "
"renaming or removing call arguments, but it was passed "
"variable-length *args or **kwargs. The upgrade "
"script cannot handle these automatically." %
(full_name or name))
modified = False
new_keywords = []
for keyword in node.keywords:
argkey = keyword.arg
if argkey in renamed_keywords:
modified = True
if renamed_keywords[argkey] is None:
lineno = getattr(keyword, "lineno", node.lineno)
col_offset = getattr(keyword, "col_offset", node.col_offset)
self.add_log(INFO, lineno, col_offset,
"Removed argument %s for function %s" % (
argkey, full_name or name))
else:
keyword.arg = renamed_keywords[argkey]
lineno = getattr(keyword, "lineno", node.lineno)
col_offset = getattr(keyword, "col_offset", node.col_offset)
self.add_log(INFO, lineno, col_offset,
"Renamed keyword argument for %s from %s to %s" % (
full_name, argkey, renamed_keywords[argkey]))
new_keywords.append(keyword)
else:
new_keywords.append(keyword)
if modified:
node.keywords = new_keywords
return modified
def visit_Call(self, node): # pylint: disable=invalid-name
"""Handle visiting a call node in the AST.
Args:
node: Current Node
"""
assert self._stack[-1] is node
# Get the name for this call, so we can index stuff with it.
full_name = self._get_full_name(node.func)
if full_name:
name = full_name.split(".")[-1]
elif isinstance(node.func, ast.Name):
name = node.func.id
elif isinstance(node.func, ast.Attribute):
name = node.func.attr
else:
name = None
# Call standard transformers for this node.
# Make sure warnings come first, since args or names triggering warnings
# may be removed by the other transformations.
self._maybe_add_call_warning(node, full_name, name)
# Make all args into kwargs
self._maybe_add_arg_names(node, full_name)
# Argument name changes or deletions
self._maybe_modify_args(node, full_name, name)
# Call transformers. These have the ability to modify the node, and if they
# do, will return the new node they created (or the same node if they just
# changed it). The are given the parent, but we will take care of
# integrating their changes into the parent if they return a new node.
#
# These are matched on the old name, since renaming is performed by the
# Attribute visitor, which happens later.
transformers = self._get_applicable_entries("function_transformers",
full_name, name)
parent = self._stack[-2]
if transformers:
if uses_star_args_or_kwargs_in_call(node):
self.add_log(WARNING, node.lineno, node.col_offset,
"(Manual check required) upgrading %s may require "
"modifying call arguments, but it was passed "
"variable-length *args or **kwargs. The upgrade "
"script cannot handle these automatically." %
(full_name or name))
for transformer in transformers:
logs = []
new_node = transformer(parent, node, full_name, name, logs)
self.add_logs(logs)
if new_node and new_node is not node:
pasta.ast_utils.replace_child(parent, node, new_node)
node = new_node
self._stack[-1] = node
self.generic_visit(node)
def visit_Attribute(self, node): # pylint: disable=invalid-name
"""Handle bare Attributes i.e. [tf.foo, tf.bar]."""
assert self._stack[-1] is node
full_name = self._get_full_name(node)
if full_name:
parent = self._stack[-2]
# Make sure the warning comes first, otherwise the name may have changed
self._maybe_add_warning(node, full_name)
# Once we did a modification, node is invalid and not worth inspecting
# further. Also, we only perform modifications for simple nodes, so
# There'd be no point in descending further.
if self._maybe_rename(parent, node, full_name):
return
if self._maybe_change_to_function_call(parent, node, full_name):
return
# The isinstance check is enough -- a bare Attribute is never root.
i = 2
while isinstance(self._stack[-i], ast.Attribute):
i += 1
whole_name = pasta.dump(self._stack[-(i-1)])
self._maybe_add_module_deprecation_warning(node, full_name, whole_name)
self.generic_visit(node)
def visit_Import(self, node): # pylint: disable=invalid-name
"""Handle visiting an import node in the AST.
Args:
node: Current Node
"""
new_aliases = []
import_updated = False
import_renames = getattr(self._api_change_spec, "import_renames", {})
max_submodule_depth = getattr(self._api_change_spec, "max_submodule_depth",
1)
inserts_after_imports = getattr(self._api_change_spec,
"inserts_after_imports", {})
# This loop processes imports in the format
# import foo as f, bar as b
for import_alias in node.names:
all_import_components = import_alias.name.split(".")
# Look for rename, starting with longest import levels.
found_update = False
for i in reversed(list(range(1, max_submodule_depth + 1))):
import_component = all_import_components[0]
for j in range(1, min(i, len(all_import_components))):
import_component += "." + all_import_components[j]
import_rename_spec = import_renames.get(import_component, None)
if not import_rename_spec or excluded_from_module_rename(
import_alias.name, import_rename_spec):
continue
new_name = (
import_rename_spec.new_name +
import_alias.name[len(import_component):])
# If current import is
# import foo
# then new import should preserve imported name:
# import new_foo as foo
# This happens when module has just one component.
new_asname = import_alias.asname
if not new_asname and "." not in import_alias.name:
new_asname = import_alias.name
new_alias = ast.alias(name=new_name, asname=new_asname)
new_aliases.append(new_alias)
import_updated = True
found_update = True
# Insert any followup lines that should happen after this import.
full_import = (import_alias.name, import_alias.asname)
insert_offset = 1
for line_to_insert in inserts_after_imports.get(full_import, []):
assert self._stack[-1] is node
parent = self._stack[-2]
new_line_node = pasta.parse(line_to_insert)
ast.copy_location(new_line_node, node)
parent.body.insert(
parent.body.index(node) + insert_offset, new_line_node)
insert_offset += 1
# Insert a newline after the import if necessary
old_suffix = pasta.base.formatting.get(node, "suffix")
if old_suffix is None:
old_suffix = os.linesep
if os.linesep not in old_suffix:
pasta.base.formatting.set(node, "suffix", old_suffix + os.linesep)
# Apply indentation to new node.
pasta.base.formatting.set(new_line_node, "prefix",
pasta.base.formatting.get(node, "prefix"))
pasta.base.formatting.set(new_line_node, "suffix", os.linesep)
self.add_log(
INFO, node.lineno, node.col_offset,
"Adding `%s` after import of %s" %
(new_line_node, import_alias.name))
# Find one match, break
if found_update:
break
# No rename is found for all levels
if not found_update:
new_aliases.append(import_alias) # no change needed
# Replace the node if at least one import needs to be updated.
if import_updated:
assert self._stack[-1] is node
parent = self._stack[-2]
new_node = ast.Import(new_aliases)
ast.copy_location(new_node, node)
pasta.ast_utils.replace_child(parent, node, new_node)
self.add_log(
INFO, node.lineno, node.col_offset,
"Changed import from %r to %r." %
(pasta.dump(node), pasta.dump(new_node)))
self.generic_visit(node)
def visit_ImportFrom(self, node): # pylint: disable=invalid-name
"""Handle visiting an import-from node in the AST.
Args:
node: Current Node
"""
if not node.module:
self.generic_visit(node)
return
from_import = node.module
# Look for rename based on first component of from-import.
# i.e. based on foo in foo.bar.
from_import_first_component = from_import.split(".")[0]
import_renames = getattr(self._api_change_spec, "import_renames", {})
import_rename_spec = import_renames.get(from_import_first_component, None)
if not import_rename_spec:
self.generic_visit(node)
return
# Split module aliases into the ones that require import update
# and those that don't. For e.g. if we want to rename "a" to "b"
# unless we import "a.c" in the following:
# from a import c, d
# we want to update import for "d" but not for "c".
updated_aliases = []
same_aliases = []
for import_alias in node.names:
full_module_name = "%s.%s" % (from_import, import_alias.name)
if excluded_from_module_rename(full_module_name, import_rename_spec):
same_aliases.append(import_alias)
else:
updated_aliases.append(import_alias)
if not updated_aliases:
self.generic_visit(node)
return
assert self._stack[-1] is node
parent = self._stack[-2]
# Replace first component of from-import with new name.
new_from_import = (
import_rename_spec.new_name +
from_import[len(from_import_first_component):])
updated_node = ast.ImportFrom(new_from_import, updated_aliases, node.level)
ast.copy_location(updated_node, node)
pasta.ast_utils.replace_child(parent, node, updated_node)
# If some imports had to stay the same, add another import for them.
additional_import_log = ""
if same_aliases:
same_node = ast.ImportFrom(from_import, same_aliases, node.level,
col_offset=node.col_offset, lineno=node.lineno)
ast.copy_location(same_node, node)
parent.body.insert(parent.body.index(updated_node), same_node)
# Apply indentation to new node.
pasta.base.formatting.set(
same_node, "prefix",
pasta.base.formatting.get(updated_node, "prefix"))
additional_import_log = " and %r" % pasta.dump(same_node)
self.add_log(
INFO, node.lineno, node.col_offset,
"Changed import from %r to %r%s." %
(pasta.dump(node),
pasta.dump(updated_node),
additional_import_log))
self.generic_visit(node)
class AnalysisResult:
"""This class represents an analysis result and how it should be logged.
This class must provide the following fields:
* `log_level`: The log level to which this detection should be logged
* `log_message`: The message that should be logged for this detection
For an example, see `VersionedTFImport`.
"""
class APIAnalysisSpec:
"""This class defines how `AnalysisResult`s should be generated.
It specifies how to map imports and symbols to `AnalysisResult`s.
This class must provide the following fields:
* `symbols_to_detect`: maps function names to `AnalysisResult`s
* `imports_to_detect`: maps imports represented as (full module name, alias)
tuples to `AnalysisResult`s
notifications)
For an example, see `TFAPIImportAnalysisSpec`.
"""
class PastaAnalyzeVisitor(_PastaEditVisitor):
"""AST Visitor that looks for specific API usage without editing anything.
This is used before any rewriting is done to detect if any symbols are used
that require changing imports or disabling rewriting altogether.
"""
def __init__(self, api_analysis_spec):
super(PastaAnalyzeVisitor, self).__init__(NoUpdateSpec())
self._api_analysis_spec = api_analysis_spec
self._results = [] # Holds AnalysisResult objects
@property
def results(self):
return self._results
def add_result(self, analysis_result):
self._results.append(analysis_result)
def visit_Attribute(self, node): # pylint: disable=invalid-name
"""Handle bare Attributes i.e. [tf.foo, tf.bar]."""
full_name = self._get_full_name(node)
if full_name:
detection = self._api_analysis_spec.symbols_to_detect.get(full_name, None)
if detection:
self.add_result(detection)
self.add_log(
detection.log_level, node.lineno, node.col_offset,
detection.log_message)
self.generic_visit(node)
def visit_Import(self, node): # pylint: disable=invalid-name
"""Handle visiting an import node in the AST.
Args:
node: Current Node
"""
for import_alias in node.names:
# Detect based on full import name and alias)
full_import = (import_alias.name, import_alias.asname)
detection = (self._api_analysis_spec
.imports_to_detect.get(full_import, None))
if detection:
self.add_result(detection)
self.add_log(
detection.log_level, node.lineno, node.col_offset,
detection.log_message)
self.generic_visit(node)
def visit_ImportFrom(self, node): # pylint: disable=invalid-name
"""Handle visiting an import-from node in the AST.
Args:
node: Current Node
"""
if not node.module:
self.generic_visit(node)
return
from_import = node.module
for import_alias in node.names:
# Detect based on full import name(to & as)
full_module_name = "%s.%s" % (from_import, import_alias.name)
full_import = (full_module_name, import_alias.asname)
detection = (self._api_analysis_spec
.imports_to_detect.get(full_import, None))
if detection:
self.add_result(detection)
self.add_log(
detection.log_level, node.lineno, node.col_offset,
detection.log_message)
self.generic_visit(node)
class ASTCodeUpgrader:
"""Handles upgrading a set of Python files using a given API change spec."""
def __init__(self, api_change_spec):
if not isinstance(api_change_spec, APIChangeSpec):
raise TypeError("Must pass APIChangeSpec to ASTCodeUpgrader, got %s" %
type(api_change_spec))
self._api_change_spec = api_change_spec
def process_file(self,
in_filename,
out_filename,
no_change_to_outfile_on_error=False):
"""Process the given python file for incompatible changes.
Args:
in_filename: filename to parse
out_filename: output file to write to
no_change_to_outfile_on_error: not modify the output file on errors
Returns:
A tuple representing number of files processed, log of actions, errors
"""
# Write to a temporary file, just in case we are doing an implace modify.
# pylint: disable=g-backslash-continuation
with open(in_filename, "r") as in_file, \
tempfile.NamedTemporaryFile("w", delete=False) as temp_file:
ret = self.process_opened_file(in_filename, in_file, out_filename,
temp_file)
# pylint: enable=g-backslash-continuation
if no_change_to_outfile_on_error and ret[0] == 0:
os.remove(temp_file.name)
else:
shutil.move(temp_file.name, out_filename)
return ret
def format_log(self, log, in_filename):
log_string = "%d:%d: %s: %s" % (log[1], log[2], log[0], log[3])
if in_filename:
return in_filename + ":" + log_string
else:
return log_string
def update_string_pasta(self, text, in_filename):
"""Updates a file using pasta."""
try:
t = pasta.parse(text)
except (SyntaxError, ValueError, TypeError):
log = ["ERROR: Failed to parse.\n" + traceback.format_exc()]
return 0, "", log, []
t, preprocess_logs, preprocess_errors = self._api_change_spec.preprocess(t)
visitor = _PastaEditVisitor(self._api_change_spec)
visitor.visit(t)
self._api_change_spec.clear_preprocessing()
logs = [self.format_log(log, None) for log in (preprocess_logs +
visitor.log)]
errors = [self.format_log(error, in_filename)
for error in (preprocess_errors +
visitor.warnings_and_errors)]
return 1, pasta.dump(t), logs, errors
def _format_log(self, log, in_filename, out_filename):
text = "-" * 80 + "\n"
text += "Processing file %r\n outputting to %r\n" % (in_filename,
out_filename)
text += "-" * 80 + "\n\n"
text += "\n".join(log) + "\n"
text += "-" * 80 + "\n\n"
return text
def process_opened_file(self, in_filename, in_file, out_filename, out_file):
"""Process the given python file for incompatible changes.
This function is split out to facilitate StringIO testing from
tf_upgrade_test.py.
Args:
in_filename: filename to parse
in_file: opened file (or StringIO)
out_filename: output file to write to
out_file: opened file (or StringIO)
Returns:
A tuple representing number of files processed, log of actions, errors
"""
lines = in_file.readlines()
processed_file, new_file_content, log, process_errors = (
self.update_string_pasta("".join(lines), in_filename))
if out_file and processed_file:
out_file.write(new_file_content)
return (processed_file,
self._format_log(log, in_filename, out_filename),
process_errors)
def process_tree(self, root_directory, output_root_directory,
copy_other_files):
"""Processes upgrades on an entire tree of python files in place.
Note that only Python files. If you have custom code in other languages,
you will need to manually upgrade those.
Args:
root_directory: Directory to walk and process.
output_root_directory: Directory to use as base.
copy_other_files: Copy files that are not touched by this converter.
Returns:
A tuple of files processed, the report string for all files, and a dict
mapping filenames to errors encountered in that file.
"""
if output_root_directory == root_directory:
return self.process_tree_inplace(root_directory)
# make sure output directory doesn't exist
if output_root_directory and os.path.exists(output_root_directory):
print("Output directory %r must not already exist." %
(output_root_directory))
sys.exit(1)
# make sure output directory does not overlap with root_directory
norm_root = os.path.split(os.path.normpath(root_directory))
norm_output = os.path.split(os.path.normpath(output_root_directory))
if norm_root == norm_output:
print("Output directory %r same as input directory %r" %
(root_directory, output_root_directory))
sys.exit(1)
# Collect list of files to process (we do this to correctly handle if the
# user puts the output directory in some sub directory of the input dir)
files_to_process = []
files_to_copy = []
for dir_name, _, file_list in os.walk(root_directory):
py_files = [f for f in file_list if f.endswith(".py")]
copy_files = [f for f in file_list if not f.endswith(".py")]
for filename in py_files:
fullpath = os.path.join(dir_name, filename)
fullpath_output = os.path.join(output_root_directory,
os.path.relpath(fullpath,
root_directory))
files_to_process.append((fullpath, fullpath_output))
if copy_other_files:
for filename in copy_files:
fullpath = os.path.join(dir_name, filename)
fullpath_output = os.path.join(output_root_directory,
os.path.relpath(
fullpath, root_directory))
files_to_copy.append((fullpath, fullpath_output))
file_count = 0
tree_errors = {}
report = ""
report += ("=" * 80) + "\n"
report += "Input tree: %r\n" % root_directory
report += ("=" * 80) + "\n"
for input_path, output_path in files_to_process:
output_directory = os.path.dirname(output_path)
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
if os.path.islink(input_path):
link_target = os.readlink(input_path)
link_target_output = os.path.join(
output_root_directory, os.path.relpath(link_target, root_directory))
if (link_target, link_target_output) in files_to_process:
# Create a link to the new location of the target file
os.symlink(link_target_output, output_path)
else:
report += "Copying symlink %s without modifying its target %s" % (
input_path, link_target)
os.symlink(link_target, output_path)
continue
file_count += 1
_, l_report, l_errors = self.process_file(input_path, output_path)
tree_errors[input_path] = l_errors
report += l_report
for input_path, output_path in files_to_copy:
output_directory = os.path.dirname(output_path)
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
shutil.copy(input_path, output_path)
return file_count, report, tree_errors
def process_tree_inplace(self, root_directory):
"""Process a directory of python files in place."""
files_to_process = []
for dir_name, _, file_list in os.walk(root_directory):
py_files = [
os.path.join(dir_name, f) for f in file_list if f.endswith(".py")
]
files_to_process += py_files
file_count = 0
tree_errors = {}
report = ""
report += ("=" * 80) + "\n"
report += "Input tree: %r\n" % root_directory
report += ("=" * 80) + "\n"
for path in files_to_process:
if os.path.islink(path):
report += "Skipping symlink %s.\n" % path
continue
file_count += 1
_, l_report, l_errors = self.process_file(path, path)
tree_errors[path] = l_errors
report += l_report
return file_count, report, tree_errors
| apache-2.0 |
yashodhank/frappe | frappe/tests/test_goal.py | 3 | 1378 | # Copyright (c) 2022, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
import frappe
from frappe.test_runner import make_test_objects
from frappe.tests.utils import FrappeTestCase
from frappe.utils import format_date, today
from frappe.utils.goal import get_monthly_goal_graph_data, get_monthly_results
class TestGoal(FrappeTestCase):
def setUp(self):
make_test_objects("Event", reset=True)
def tearDown(self):
frappe.db.delete("Event")
def test_get_monthly_results(self):
"""Test monthly aggregation values of a field"""
result_dict = get_monthly_results(
"Event",
"subject",
"creation",
filters={"event_type": "Private"},
aggregation="count",
)
self.assertEqual(result_dict.get(format_date(today(), "MM-yyyy")), 2)
def test_get_monthly_goal_graph_data(self):
"""Test for accurate values in graph data (based on test_get_monthly_results)"""
docname = frappe.get_list("Event", filters={"subject": ["=", "_Test Event 1"]})[0]["name"]
frappe.db.set_value("Event", docname, "description", 1)
data = get_monthly_goal_graph_data(
"Test",
"Event",
docname,
"description",
"description",
"description",
"Event",
"",
"description",
"creation",
filters={"starts_on": "2014-01-01"},
aggregation="count",
)
self.assertEqual(float(data["data"]["datasets"][0]["values"][-1]), 1)
| mit |
heli522/scikit-learn | examples/svm/plot_separating_hyperplane_unbalanced.py | 326 | 1850 | """
=================================================
SVM: Separating hyperplane for unbalanced classes
=================================================
Find the optimal separating hyperplane using an SVC for classes that
are unbalanced.
We first find the separating plane with a plain SVC and then plot
(dashed) the separating hyperplane with automatically correction for
unbalanced classes.
.. currentmodule:: sklearn.linear_model
.. note::
This example will also work by replacing ``SVC(kernel="linear")``
with ``SGDClassifier(loss="hinge")``. Setting the ``loss`` parameter
of the :class:`SGDClassifier` equal to ``hinge`` will yield behaviour
such as that of a SVC with a linear kernel.
For example try instead of the ``SVC``::
clf = SGDClassifier(n_iter=100, alpha=0.01)
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
#from sklearn.linear_model import SGDClassifier
# we create 40 separable points
rng = np.random.RandomState(0)
n_samples_1 = 1000
n_samples_2 = 100
X = np.r_[1.5 * rng.randn(n_samples_1, 2),
0.5 * rng.randn(n_samples_2, 2) + [2, 2]]
y = [0] * (n_samples_1) + [1] * (n_samples_2)
# fit the model and get the separating hyperplane
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, y)
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - clf.intercept_[0] / w[1]
# get the separating hyperplane using weighted classes
wclf = svm.SVC(kernel='linear', class_weight={1: 10})
wclf.fit(X, y)
ww = wclf.coef_[0]
wa = -ww[0] / ww[1]
wyy = wa * xx - wclf.intercept_[0] / ww[1]
# plot separating hyperplanes and samples
h0 = plt.plot(xx, yy, 'k-', label='no weights')
h1 = plt.plot(xx, wyy, 'k--', label='with weights')
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.legend()
plt.axis('tight')
plt.show()
| bsd-3-clause |
tapomayukh/projects_in_python | classification/Classification_with_kNN/Single_Contact_Classification/Final/best_kNN_PCA/4-categories/6/test11_cross_validate_categories_6_no_motion_1200ms.py | 1 | 4739 |
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/6')
from data_6 import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
def my_mvpa(Y,num2):
#Using PYMVPA
PCA_data = np.array(Y)
PCA_label_1 = ['Rigid-Fixed']*35 + ['Rigid-Movable']*35 + ['Soft-Fixed']*35 + ['Soft-Movable']*35
PCA_chunk_1 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Cushion-Movable']*5 + ['Sponge-Movable']*5
clf = kNN(k=num2)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_1,chunks=PCA_chunk_1)
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
return (1-error)*100
def result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC):
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:num_PC]
m_W, n_W = np.shape(W)
# Normalizes the data set with respect to its variance (Not an Integral part of PCA, but useful)
length = len(eigval_total)
s = np.matrix(np.zeros(length)).T
i = 0
while i < length:
s[i] = sqrt(C[i,i])
i = i+1
Z = np.divide(B,s)
m_Z, n_Z = np.shape(Z)
#Projected Data:
Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B'
m_Y, n_Y = np.shape(Y.T)
return Y.T
if __name__ == '__main__':
Fmat = Fmat_original[0:82,:]
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
num_PC=1
while num_PC <=20:
Proj = np.zeros((140,num_PC))
Proj = result(eigvec_total,eigval_total,mean_data_total,B,C,num_PC)
# PYMVPA:
num=0
cv_acc = np.zeros(21)
while num <=20:
cv_acc[num] = my_mvpa(Proj,num)
num = num+1
plot(np.arange(21),cv_acc,'-s')
grid('True')
hold('True')
num_PC = num_PC+1
legend(('1-PC', '2-PCs', '3-PCs', '4-PCs', '5-PCs', '6-PCs', '7-PCs', '8-PCs', '9-PCs', '10-PCs', '11-PC', '12-PCs', '13-PCs', '14-PCs', '15-PCs', '16-PCs', '17-PCs', '18-PCs', '19-PCs', '20-PCs'))
ylabel('Cross-Validation Accuracy')
xlabel('k in k-NN Classifier')
show()
| mit |
KellyChan/Python | python/tensorflow/demos/tensorflow/concepts/learn.py | 3 | 1427 | import pickle
import numpy as np
from matplotlib import pyplot as plt
from sklearn import svm
from sklearn import metrics
from sklearn.datasets import load_digits
from sklearn.cross_validation import train_test_split
from tensorflow.contrib import skflow
def load_cifar(file):
with open(file, 'rb') as inf:
cifar = pickle.load(inf, encoding='latin1')
data = cifar['data'].reshape((10000, 3, 32, 32))
data = np.rollaxis(data, 3, 1)
data = np.rollaxis(data, 3, 1)
y = np.array(cifar['labels'])
mask = (y == 2) | (y == 9)
data = data[mask]
y = y[mask]
return data, y
def classifier_svm(digits):
classifier = svm.SVC(gamma=0.001)
classifier.fit(digits.data, digits.target)
predicted = classifier.predict(digits.data)
print(np.mean(digits.target == predicted))
def classifier_tf(digits):
X_train, X_test, y_train, y_test = train_test_split(digits.data, digits.target)
n_classes = len(set(y_train))
classifier = skflow.TensorFlowLinearClassifier(n_classes=n_classes)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
print(metrics.classification_report(y_true=y_test, y_pred=y_pred))
if __name__ == '__main__':
digits = load_digits()
fig = plt.figure(figsize=(3,3))
plt.imshow(digits['images'][66], cmap="gray", interpolation='none')
plt.show()
classifier_svm(digits)
classifier_tf(digits)
| mit |
geoadmin/mapproxy | mapproxy/service/wms.py | 5 | 31048 | # This file is part of the MapProxy project.
# Copyright (C) 2010-2014 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
WMS service handler
"""
from mapproxy.compat import iteritems
from mapproxy.compat.itertools import chain
from functools import partial
from mapproxy.cache.tile import CacheInfo
from mapproxy.request.wms import (wms_request, WMS111LegendGraphicRequest,
mimetype_from_infotype, infotype_from_mimetype, switch_bbox_epsg_axis_order)
from mapproxy.srs import SRS, TransformationError
from mapproxy.service.base import Server
from mapproxy.response import Response
from mapproxy.source import SourceError
from mapproxy.exception import RequestError
from mapproxy.image import bbox_position_in_image, SubImageSource, BlankImageSource
from mapproxy.image.merge import concat_legends, LayerMerger
from mapproxy.image.opts import ImageOptions
from mapproxy.image.message import attribution_image, message_image
from mapproxy.layer import BlankImage, MapQuery, InfoQuery, LegendQuery, MapError, LimitedLayer
from mapproxy.layer import MapBBOXError, merge_layer_extents, merge_layer_res_ranges
from mapproxy.util import async
from mapproxy.util.py import cached_property
from mapproxy.util.coverage import load_limited_to
from mapproxy.util.ext.odict import odict
from mapproxy.template import template_loader, bunch, recursive_bunch
from mapproxy.service import template_helper
from mapproxy.layer import DefaultMapExtent, MapExtent
get_template = template_loader(__name__, 'templates', namespace=template_helper.__dict__)
class PERMIT_ALL_LAYERS(object):
pass
class WMSServer(Server):
service = 'wms'
fi_transformers = None
def __init__(self, root_layer, md, srs, image_formats,
request_parser=None, tile_layers=None, attribution=None,
info_types=None, strict=False, on_error='raise',
concurrent_layer_renderer=1, max_output_pixels=None,
srs_extents=None, max_tile_age=None,
versions=None,
inspire_md=None,
):
Server.__init__(self)
self.request_parser = request_parser or partial(wms_request, strict=strict, versions=versions)
self.root_layer = root_layer
self.layers = root_layer.child_layers()
self.tile_layers = tile_layers or {}
self.strict = strict
self.attribution = attribution
self.md = md
self.on_error = on_error
self.concurrent_layer_renderer = concurrent_layer_renderer
self.image_formats = image_formats
self.info_types = info_types
self.srs = srs
self.srs_extents = srs_extents
self.max_output_pixels = max_output_pixels
self.max_tile_age = max_tile_age
self.inspire_md = inspire_md
def map(self, map_request):
self.check_map_request(map_request)
params = map_request.params
query = MapQuery(params.bbox, params.size, SRS(params.srs), params.format)
if map_request.params.get('tiled', 'false').lower() == 'true':
query.tiled_only = True
orig_query = query
if self.srs_extents and params.srs in self.srs_extents:
# limit query to srs_extent if query is larger
query_extent = MapExtent(params.bbox, SRS(params.srs))
if not self.srs_extents[params.srs].contains(query_extent):
limited_extent = self.srs_extents[params.srs].intersection(query_extent)
if not limited_extent:
img_opts = self.image_formats[params.format_mime_type].copy()
img_opts.bgcolor = params.bgcolor
img_opts.transparent = params.transparent
img = BlankImageSource(size=params.size, image_opts=img_opts, cacheable=True)
return Response(img.as_buffer(), content_type=img_opts.format.mime_type)
sub_size, offset, sub_bbox = bbox_position_in_image(params.bbox, params.size, limited_extent.bbox)
query = MapQuery(sub_bbox, sub_size, SRS(params.srs), params.format)
actual_layers = odict()
for layer_name in map_request.params.layers:
layer = self.layers[layer_name]
# only add if layer renders the query
if layer.renders_query(query):
# if layer is not transparent and will be rendered,
# remove already added (then hidden) layers
if not layer.transparent:
actual_layers = odict()
for layer_name, map_layers in layer.map_layers_for_query(query):
actual_layers[layer_name] = map_layers
authorized_layers, coverage = self.authorized_layers('map', actual_layers.keys(),
map_request.http.environ, query_extent=(query.srs.srs_code, query.bbox))
self.filter_actual_layers(actual_layers, map_request.params.layers, authorized_layers)
render_layers = []
for layers in actual_layers.values():
render_layers.extend(layers)
self.update_query_with_fwd_params(query, params=params,
layers=render_layers)
raise_source_errors = True if self.on_error == 'raise' else False
renderer = LayerRenderer(render_layers, query, map_request,
raise_source_errors=raise_source_errors,
concurrent_rendering=self.concurrent_layer_renderer)
merger = LayerMerger()
renderer.render(merger)
if self.attribution and self.attribution.get('text') and not query.tiled_only:
merger.add(attribution_image(self.attribution['text'], query.size))
img_opts = self.image_formats[params.format_mime_type].copy()
img_opts.bgcolor = params.bgcolor
img_opts.transparent = params.transparent
result = merger.merge(size=query.size, image_opts=img_opts,
bbox=query.bbox, bbox_srs=params.srs, coverage=coverage)
if query != orig_query:
result = SubImageSource(result, size=orig_query.size, offset=offset, image_opts=img_opts)
# Provide the wrapping WSGI app or filter the opportunity to process the
# image before it's wrapped up in a response
result = self.decorate_img(result, 'wms.map', actual_layers.keys(),
map_request.http.environ, (query.srs.srs_code, query.bbox))
try:
result_buf = result.as_buffer(img_opts)
except IOError as ex:
raise RequestError('error while processing image file: %s' % ex,
request=map_request)
resp = Response(result_buf, content_type=img_opts.format.mime_type)
if query.tiled_only and isinstance(result.cacheable, CacheInfo):
cache_info = result.cacheable
resp.cache_headers(cache_info.timestamp, etag_data=(cache_info.timestamp, cache_info.size),
max_age=self.max_tile_age)
resp.make_conditional(map_request.http)
if not result.cacheable:
resp.cache_headers(no_cache=True)
return resp
def capabilities(self, map_request):
# TODO: debug layer
# if '__debug__' in map_request.params:
# layers = self.layers.values()
# else:
# layers = [layer for name, layer in iteritems(self.layers)
# if name != '__debug__']
if map_request.params.get('tiled', 'false').lower() == 'true':
tile_layers = self.tile_layers.values()
else:
tile_layers = []
service = self._service_md(map_request)
root_layer = self.authorized_capability_layers(map_request.http.environ)
info_types = ['text', 'html', 'xml'] # defaults
if self.info_types:
info_types = self.info_types
elif self.fi_transformers:
info_types = self.fi_transformers.keys()
info_formats = [mimetype_from_infotype(map_request.version, info_type) for info_type in info_types]
result = Capabilities(service, root_layer, tile_layers,
self.image_formats, info_formats, srs=self.srs, srs_extents=self.srs_extents,
inspire_md=self.inspire_md,
).render(map_request)
return Response(result, mimetype=map_request.mime_type)
def featureinfo(self, request):
infos = []
self.check_featureinfo_request(request)
p = request.params
query = InfoQuery(p.bbox, p.size, SRS(p.srs), p.pos,
p['info_format'], format=request.params.format or None,
feature_count=p.get('feature_count'))
actual_layers = odict()
for layer_name in request.params.query_layers:
layer = self.layers[layer_name]
if not layer.queryable:
raise RequestError('layer %s is not queryable' % layer_name, request=request)
for layer_name, info_layers in layer.info_layers_for_query(query):
actual_layers[layer_name] = info_layers
authorized_layers, coverage = self.authorized_layers('featureinfo', actual_layers.keys(),
request.http.environ, query_extent=(query.srs.srs_code, query.bbox))
self.filter_actual_layers(actual_layers, request.params.layers, authorized_layers)
# outside of auth-coverage
if coverage and not coverage.contains(query.coord, query.srs):
infos = []
else:
info_layers = []
for layers in actual_layers.values():
info_layers.extend(layers)
for layer in info_layers:
info = layer.get_info(query)
if info is None:
continue
infos.append(info)
mimetype = None
if 'info_format' in request.params:
mimetype = request.params.info_format
if not infos:
return Response('', mimetype=mimetype)
if self.fi_transformers:
doc = infos[0].combine(infos)
if doc.info_type == 'text':
resp = doc.as_string()
mimetype = 'text/plain'
else:
if not mimetype:
if 'xml' in self.fi_transformers:
info_type = 'xml'
elif 'html' in self.fi_transformers:
info_type = 'html'
else:
info_type = 'text'
mimetype = mimetype_from_infotype(request.version, info_type)
else:
info_type = infotype_from_mimetype(request.version, mimetype)
resp = self.fi_transformers[info_type](doc).as_string()
else:
mimetype = mimetype_from_infotype(request.version, infos[0].info_type)
if len(infos) > 1:
resp = infos[0].combine(infos).as_string()
else:
resp = infos[0].as_string()
return Response(resp, mimetype=mimetype)
def check_map_request(self, request):
if self.max_output_pixels and \
(request.params.size[0] * request.params.size[1]) > self.max_output_pixels:
request.prevent_image_exception = True
raise RequestError("image size too large", request=request)
self.validate_layers(request)
request.validate_format(self.image_formats)
request.validate_srs(self.srs)
def update_query_with_fwd_params(self, query, params, layers):
# forward relevant request params into MapQuery.dimensions
for layer in layers:
if not hasattr(layer, 'fwd_req_params'):
continue
for p in layer.fwd_req_params:
if p in params:
query.dimensions[p] = params[p]
def check_featureinfo_request(self, request):
self.validate_layers(request)
request.validate_srs(self.srs)
def validate_layers(self, request):
query_layers = request.params.query_layers if hasattr(request, 'query_layers') else []
for layer in chain(request.params.layers, query_layers):
if layer not in self.layers:
raise RequestError('unknown layer: ' + str(layer), code='LayerNotDefined',
request=request)
def check_legend_request(self, request):
if request.params.layer not in self.layers:
raise RequestError('unknown layer: ' + request.params.layer,
code='LayerNotDefined', request=request)
#TODO: If layer not in self.layers raise RequestError
def legendgraphic(self, request):
legends = []
self.check_legend_request(request)
layer = request.params.layer
if not self.layers[layer].has_legend:
raise RequestError('layer %s has no legend graphic' % layer, request=request)
legend = self.layers[layer].legend(request)
[legends.append(i) for i in legend if i is not None]
result = concat_legends(legends)
if 'format' in request.params:
mimetype = request.params.format_mime_type
else:
mimetype = 'image/png'
img_opts = self.image_formats[request.params.format_mime_type]
return Response(result.as_buffer(img_opts), mimetype=mimetype)
def _service_md(self, map_request):
md = dict(self.md)
md['url'] = map_request.url
md['has_legend'] = self.root_layer.has_legend
return md
def authorized_layers(self, feature, layers, env, query_extent):
if 'mapproxy.authorize' in env:
result = env['mapproxy.authorize']('wms.' + feature, layers[:],
environ=env, query_extent=query_extent)
if result['authorized'] == 'unauthenticated':
raise RequestError('unauthorized', status=401)
if result['authorized'] == 'full':
return PERMIT_ALL_LAYERS, None
layers = {}
if result['authorized'] == 'partial':
for layer_name, permissions in iteritems(result['layers']):
if permissions.get(feature, False) == True:
layers[layer_name] = permissions.get('limited_to')
limited_to = result.get('limited_to')
if limited_to:
coverage = load_limited_to(limited_to)
else:
coverage = None
return layers, coverage
else:
return PERMIT_ALL_LAYERS, None
def filter_actual_layers(self, actual_layers, requested_layers, authorized_layers):
if authorized_layers is not PERMIT_ALL_LAYERS:
requested_layer_names = set(requested_layers)
for layer_name in actual_layers.keys():
if layer_name not in authorized_layers:
# check whether layer was requested explicit...
if layer_name in requested_layer_names:
raise RequestError('forbidden', status=403)
# or implicit (part of group layer)
else:
del actual_layers[layer_name]
elif authorized_layers[layer_name] is not None:
limited_to = load_limited_to(authorized_layers[layer_name])
actual_layers[layer_name] = [LimitedLayer(lyr, limited_to) for lyr in actual_layers[layer_name]]
def authorized_capability_layers(self, env):
if 'mapproxy.authorize' in env:
result = env['mapproxy.authorize']('wms.capabilities', self.layers.keys(), environ=env)
if result['authorized'] == 'unauthenticated':
raise RequestError('unauthorized', status=401)
if result['authorized'] == 'full':
return self.root_layer
if result['authorized'] == 'partial':
limited_to = result.get('limited_to')
if limited_to:
coverage = load_limited_to(limited_to)
else:
coverage = None
return FilteredRootLayer(self.root_layer, result['layers'], coverage=coverage)
raise RequestError('forbidden', status=403)
else:
return self.root_layer
class FilteredRootLayer(object):
def __init__(self, root_layer, permissions, coverage=None):
self.root_layer = root_layer
self.permissions = permissions
self.coverage = coverage
def __getattr__(self, name):
return getattr(self.root_layer, name)
@cached_property
def extent(self):
layer_name = self.root_layer.name
limited_to = self.permissions.get(layer_name, {}).get('limited_to')
extent = self.root_layer.extent
if limited_to:
coverage = load_limited_to(limited_to)
limited_coverage = coverage.intersection(extent.bbox, extent.srs)
extent = limited_coverage.extent
if self.coverage:
limited_coverage = self.coverage.intersection(extent.bbox, extent.srs)
extent = limited_coverage.extent
return extent
@property
def queryable(self):
if not self.root_layer.queryable: return False
layer_name = self.root_layer.name
if not layer_name or self.permissions.get(layer_name, {}).get('featureinfo', False):
return True
return False
def layer_permitted(self, layer):
if not self.permissions.get(layer.name, {}).get('map', False):
return False
extent = layer.extent
limited_to = self.permissions.get(layer.name, {}).get('limited_to')
if limited_to:
coverage = load_limited_to(limited_to)
if not coverage.intersects(extent.bbox, extent.srs):
return False
if self.coverage:
if not self.coverage.intersects(extent.bbox, extent.srs):
return False
return True
@cached_property
def layers(self):
layers = []
for layer in self.root_layer.layers:
if not layer.name or self.layer_permitted(layer):
filtered_layer = FilteredRootLayer(layer, self.permissions, self.coverage)
if filtered_layer.is_active or filtered_layer.layers:
# add filtered_layer only if it is active (no grouping layer)
# or if it contains other active layers
layers.append(filtered_layer)
return layers
DEFAULT_EXTENTS = {
'EPSG:3857': DefaultMapExtent(),
'EPSG:4326': DefaultMapExtent(),
'EPSG:900913': DefaultMapExtent(),
}
def limit_srs_extents(srs_extents, supported_srs):
"""
Limit srs_extents to supported_srs.
"""
if srs_extents:
srs_extents = srs_extents.copy()
else:
srs_extents = DEFAULT_EXTENTS.copy()
for srs in list(srs_extents.keys()):
if srs not in supported_srs:
srs_extents.pop(srs)
return srs_extents
class Capabilities(object):
"""
Renders WMS capabilities documents.
"""
def __init__(self, server_md, layers, tile_layers, image_formats, info_formats,
srs, srs_extents=None, epsg_axis_order=False,
inspire_md=None,
):
self.service = server_md
self.layers = layers
self.tile_layers = tile_layers
self.image_formats = image_formats
self.info_formats = info_formats
self.srs = srs
self.srs_extents = limit_srs_extents(srs_extents, srs)
self.inspire_md = inspire_md
def layer_srs_bbox(self, layer, epsg_axis_order=False):
layer_srs_code = layer.extent.srs.srs_code
for srs, extent in iteritems(self.srs_extents):
if extent.is_default:
bbox = layer.extent.bbox_for(SRS(srs))
else:
bbox = extent.bbox_for(SRS(srs))
if epsg_axis_order:
bbox = switch_bbox_epsg_axis_order(bbox, srs)
yield srs, bbox
# add native srs
if layer_srs_code not in self.srs_extents:
bbox = layer.extent.bbox
if epsg_axis_order:
bbox = switch_bbox_epsg_axis_order(bbox, layer_srs_code)
yield layer_srs_code, bbox
def render(self, _map_request):
return self._render_template(_map_request.capabilities_template)
def _render_template(self, template):
template = get_template(template)
inspire_md = None
if self.inspire_md:
inspire_md = recursive_bunch(default='', **self.inspire_md)
doc = template.substitute(service=bunch(default='', **self.service),
layers=self.layers,
formats=self.image_formats,
info_formats=self.info_formats,
srs=self.srs,
tile_layers=self.tile_layers,
layer_srs_bbox=self.layer_srs_bbox,
inspire_md=inspire_md,
)
# strip blank lines
doc = '\n'.join(l for l in doc.split('\n') if l.rstrip())
return doc
class LayerRenderer(object):
def __init__(self, layers, query, request, raise_source_errors=True,
concurrent_rendering=1):
self.layers = layers
self.query = query
self.request = request
self.raise_source_errors = raise_source_errors
self.concurrent_rendering = concurrent_rendering
def render(self, layer_merger):
render_layers = combined_layers(self.layers, self.query)
if not render_layers: return
async_pool = async.Pool(size=min(len(render_layers), self.concurrent_rendering))
if self.raise_source_errors:
return self._render_raise_exceptions(async_pool, render_layers, layer_merger)
else:
return self._render_capture_source_errors(async_pool, render_layers,
layer_merger)
def _render_raise_exceptions(self, async_pool, render_layers, layer_merger):
# call _render_layer, raise all exceptions
try:
for layer_task in async_pool.imap(self._render_layer, render_layers,
use_result_objects=True):
if layer_task.exception is None:
layer, layer_img = layer_task.result
if layer_img is not None:
layer_merger.add(layer_img, layer=layer)
else:
ex = layer_task.exception
async_pool.shutdown(True)
raise ex[1]
except SourceError as ex:
raise RequestError(ex.args[0], request=self.request)
def _render_capture_source_errors(self, async_pool, render_layers, layer_merger):
# call _render_layer, capture SourceError exceptions
errors = []
rendered = 0
for layer_task in async_pool.imap(self._render_layer, render_layers,
use_result_objects=True):
if layer_task.exception is None:
layer, layer_img = layer_task.result
if layer_img is not None:
layer_merger.add(layer_img, layer=layer)
rendered += 1
else:
layer_merger.cacheable = False
ex = layer_task.exception
if isinstance(ex[1], SourceError):
errors.append(ex[1].args[0])
else:
async_pool.shutdown(True)
raise ex[1]
if render_layers and not rendered:
errors = '\n'.join(errors)
raise RequestError('Could not get any sources:\n'+errors, request=self.request)
if errors:
layer_merger.add(message_image('\n'.join(errors), self.query.size,
image_opts=ImageOptions(transparent=True)))
def _render_layer(self, layer):
try:
layer_img = layer.get_map(self.query)
if layer_img is not None:
layer_img.opacity = layer.opacity
return layer, layer_img
except SourceError:
raise
except MapBBOXError:
raise RequestError('Request too large or invalid BBOX.', request=self.request)
except MapError as e:
raise RequestError('Invalid request: %s' % e.args[0], request=self.request)
except TransformationError:
raise RequestError('Could not transform BBOX: Invalid result.',
request=self.request)
except BlankImage:
return layer, None
class WMSLayerBase(object):
"""
Base class for WMS layer (layer groups and leaf layers).
"""
"True if layer is an actual layer (not a group only)"
is_active = True
"list of sublayers"
layers = []
"metadata dictionary with tile, name, etc."
md = {}
"True if .info() is supported"
queryable = False
transparent = False
"True is .legend() is supported"
has_legend = False
legend_url = None
legend_size = None
"resolution range (i.e. ScaleHint) of the layer"
res_range = None
"MapExtend of the layer"
extent = None
def is_opaque(self):
return not self.transparent
def map_layers_for_query(self, query):
raise NotImplementedError()
def legend(self, query):
raise NotImplementedError()
def info(self, query):
raise NotImplementedError()
class WMSLayer(WMSLayerBase):
"""
Class for WMS layers.
Combines map, info and legend sources with metadata.
"""
is_active = True
layers = []
def __init__(self, name, title, map_layers, info_layers=[], legend_layers=[],
res_range=None, md=None):
self.name = name
self.title = title
self.md = md or {}
self.map_layers = map_layers
self.info_layers = info_layers
self.legend_layers = legend_layers
self.extent = merge_layer_extents(map_layers)
if res_range is None:
res_range = merge_layer_res_ranges(map_layers)
self.res_range = res_range
self.queryable = True if info_layers else False
self.transparent = all(not map_lyr.is_opaque() for map_lyr in self.map_layers)
self.has_legend = True if legend_layers else False
def renders_query(self, query):
if self.res_range and not self.res_range.contains(query.bbox, query.size, query.srs):
return False
return True
def map_layers_for_query(self, query):
if not self.map_layers:
return []
return [(self.name, self.map_layers)]
def info_layers_for_query(self, query):
if not self.info_layers:
return []
return [(self.name, self.info_layers)]
def legend(self, request):
p = request.params
query = LegendQuery(p.format, p.scale)
for lyr in self.legend_layers:
yield lyr.get_legend(query)
@property
def legend_size(self):
width = 0
height = 0
for layer in self.legend_layers:
width = max(layer.size[0], width)
height += layer.size[1]
return (width, height)
@property
def legend_url(self):
if self.has_legend:
req = WMS111LegendGraphicRequest(url='?',
param=dict(format='image/png', layer=self.name, sld_version='1.1.0'))
return req.complete_url
else:
return None
def child_layers(self):
return {self.name: self}
class WMSGroupLayer(WMSLayerBase):
"""
Class for WMS group layers.
Groups multiple wms layers, but can also contain a single layer (``this``)
that represents this layer.
"""
def __init__(self, name, title, this, layers, md=None):
self.name = name
self.title = title
self.this = this
self.md = md or {}
self.is_active = True if this is not None else False
self.layers = layers
self.transparent = True if this and not this.is_opaque() or all(not l.is_opaque() for l in layers) else False
self.has_legend = True if this and this.has_legend or any(l.has_legend for l in layers) else False
self.queryable = True if this and this.queryable or any(l.queryable for l in layers) else False
all_layers = layers + ([self.this] if self.this else [])
self.extent = merge_layer_extents(all_layers)
self.res_range = merge_layer_res_ranges(all_layers)
@property
def legend_size(self):
return self.this.legend_size
@property
def legend_url(self):
return self.this.legend_url
def renders_query(self, query):
if self.res_range and not self.res_range.contains(query.bbox, query.size, query.srs):
return False
return True
def map_layers_for_query(self, query):
if self.this:
return self.this.map_layers_for_query(query)
else:
layers = []
for layer in self.layers:
layers.extend(layer.map_layers_for_query(query))
return layers
def info_layers_for_query(self, query):
if self.this:
return self.this.info_layers_for_query(query)
else:
layers = []
for layer in self.layers:
layers.extend(layer.info_layers_for_query(query))
return layers
def child_layers(self):
layers = odict()
if self.name:
layers[self.name] = self
for lyr in self.layers:
if hasattr(lyr, 'child_layers'):
layers.update(lyr.child_layers())
elif lyr.name:
layers[lyr.name] = lyr
return layers
def combined_layers(layers, query):
"""
Returns a new list of the layers where all adjacent layers are combined
if possible.
"""
if len(layers) <= 1:
return layers
layers = layers[:]
combined_layers = [layers.pop(0)]
while layers:
current_layer = layers.pop(0)
combined = combined_layers[-1].combined_layer(current_layer, query)
if combined:
# change last layer with combined
combined_layers[-1] = combined
else:
combined_layers.append(current_layer)
return combined_layers
| apache-2.0 |
YihaoLu/statsmodels | statsmodels/iolib/summary.py | 22 | 33071 | from statsmodels.compat.python import range, lrange, lmap, lzip, zip_longest
import numpy as np
from statsmodels.iolib.table import SimpleTable
from statsmodels.iolib.tableformatting import (gen_fmt, fmt_2,
fmt_params, fmt_base, fmt_2cols)
#from statsmodels.iolib.summary2d import summary_params_2dflat
#from summary2d import summary_params_2dflat
def forg(x, prec=3):
if prec == 3:
#for 3 decimals
if (abs(x) >= 1e4) or (abs(x) < 1e-4):
return '%9.3g' % x
else:
return '%9.3f' % x
elif prec == 4:
if (abs(x) >= 1e4) or (abs(x) < 1e-4):
return '%10.4g' % x
else:
return '%10.4f' % x
else:
raise NotImplementedError
def summary(self, yname=None, xname=None, title=0, alpha=.05,
returns='text', model_info=None):
"""
Parameters
-----------
yname : string
optional, Default is `Y`
xname : list of strings
optional, Default is `X.#` for # in p the number of regressors
Confidance interval : (0,1) not implimented
title : string
optional, Defualt is 'Generalized linear model'
returns : string
'text', 'table', 'csv', 'latex', 'html'
Returns
-------
Default :
returns='print'
Prints the summarirized results
Option :
returns='text'
Prints the summarirized results
Option :
returns='table'
SimpleTable instance : summarizing the fit of a linear model.
Option :
returns='csv'
returns a string of csv of the results, to import into a spreadsheet
Option :
returns='latex'
Not implimented yet
Option :
returns='HTML'
Not implimented yet
Examples (needs updating)
--------
>>> import statsmodels as sm
>>> data = sm.datasets.longley.load()
>>> data.exog = sm.add_constant(data.exog)
>>> ols_results = sm.OLS(data.endog, data.exog).results
>>> print ols_results.summary()
...
Notes
-----
conf_int calculated from normal dist.
"""
import time as time
#TODO Make sure all self.model.__class__.__name__ are listed
model_types = {'OLS' : 'Ordinary least squares',
'GLS' : 'Generalized least squares',
'GLSAR' : 'Generalized least squares with AR(p)',
'WLS' : 'Weigthed least squares',
'RLM' : 'Robust linear model',
'GLM' : 'Generalized linear model'
}
model_methods = {'OLS' : 'Least Squares',
'GLS' : 'Least Squares',
'GLSAR' : 'Least Squares',
'WLS' : 'Least Squares',
'RLM' : '?',
'GLM' : '?'
}
if title==0:
title = model_types[self.model.__class__.__name__]
if yname is None:
try:
yname = self.model.endog_names
except AttributeError:
yname = 'y'
if xname is None:
try:
xname = self.model.exog_names
except AttributeError:
xname = ['var_%d' % i for i in range(len(self.params))]
time_now = time.localtime()
time_of_day = [time.strftime("%H:%M:%S", time_now)]
date = time.strftime("%a, %d %b %Y", time_now)
modeltype = self.model.__class__.__name__
#dist_family = self.model.family.__class__.__name__
nobs = self.nobs
df_model = self.df_model
df_resid = self.df_resid
#General part of the summary table, Applicable to all? models
#------------------------------------------------------------
#TODO: define this generically, overwrite in model classes
#replace definition of stubs data by single list
#e.g.
gen_left = [('Model type:', [modeltype]),
('Date:', [date]),
('Dependent Variable:', yname), #What happens with multiple names?
('df model', [df_model])
]
gen_stubs_left, gen_data_left = zip_longest(*gen_left) #transpose row col
gen_title = title
gen_header = None
## gen_stubs_left = ('Model type:',
## 'Date:',
## 'Dependent Variable:',
## 'df model'
## )
## gen_data_left = [[modeltype],
## [date],
## yname, #What happens with multiple names?
## [df_model]
## ]
gen_table_left = SimpleTable(gen_data_left,
gen_header,
gen_stubs_left,
title = gen_title,
txt_fmt = gen_fmt
)
gen_stubs_right = ('Method:',
'Time:',
'Number of Obs:',
'df resid'
)
gen_data_right = ([modeltype], #was dist family need to look at more
time_of_day,
[nobs],
[df_resid]
)
gen_table_right = SimpleTable(gen_data_right,
gen_header,
gen_stubs_right,
title = gen_title,
txt_fmt = gen_fmt
)
gen_table_left.extend_right(gen_table_right)
general_table = gen_table_left
#Parameters part of the summary table
#------------------------------------
#Note: this is not necessary since we standardized names, only t versus normal
tstats = {'OLS' : self.t(),
'GLS' : self.t(),
'GLSAR' : self.t(),
'WLS' : self.t(),
'RLM' : self.t(),
'GLM' : self.t()
}
prob_stats = {'OLS' : self.pvalues,
'GLS' : self.pvalues,
'GLSAR' : self.pvalues,
'WLS' : self.pvalues,
'RLM' : self.pvalues,
'GLM' : self.pvalues
}
#Dictionary to store the header names for the parameter part of the
#summary table. look up by modeltype
alp = str((1-alpha)*100)+'%'
param_header = {
'OLS' : ['coef', 'std err', 't', 'P>|t|', alp + ' Conf. Interval'],
'GLS' : ['coef', 'std err', 't', 'P>|t|', alp + ' Conf. Interval'],
'GLSAR' : ['coef', 'std err', 't', 'P>|t|', alp + ' Conf. Interval'],
'WLS' : ['coef', 'std err', 't', 'P>|t|', alp + ' Conf. Interval'],
'GLM' : ['coef', 'std err', 't', 'P>|t|', alp + ' Conf. Interval'], #glm uses t-distribution
'RLM' : ['coef', 'std err', 'z', 'P>|z|', alp + ' Conf. Interval'] #checke z
}
params_stubs = xname
params = self.params
conf_int = self.conf_int(alpha)
std_err = self.bse
exog_len = lrange(len(xname))
tstat = tstats[modeltype]
prob_stat = prob_stats[modeltype]
# Simpletable should be able to handle the formating
params_data = lzip(["%#6.4g" % (params[i]) for i in exog_len],
["%#6.4f" % (std_err[i]) for i in exog_len],
["%#6.4f" % (tstat[i]) for i in exog_len],
["%#6.4f" % (prob_stat[i]) for i in exog_len],
["(%#5g, %#5g)" % tuple(conf_int[i]) for i in \
exog_len]
)
parameter_table = SimpleTable(params_data,
param_header[modeltype],
params_stubs,
title = None,
txt_fmt = fmt_2, #gen_fmt,
)
#special table
#-------------
#TODO: exists in linear_model, what about other models
#residual diagnostics
#output options
#--------------
#TODO: JP the rest needs to be fixed, similar to summary in linear_model
def ols_printer():
"""
print summary table for ols models
"""
table = str(general_table)+'\n'+str(parameter_table)
return table
def ols_to_csv():
"""
exports ols summary data to csv
"""
pass
def glm_printer():
table = str(general_table)+'\n'+str(parameter_table)
return table
pass
printers = {'OLS': ols_printer,
'GLM' : glm_printer
}
if returns=='print':
try:
return printers[modeltype]()
except KeyError:
return printers['OLS']()
def _getnames(self, yname=None, xname=None):
'''extract names from model or construct names
'''
if yname is None:
if hasattr(self.model, 'endog_names') and (
not self.model.endog_names is None):
yname = self.model.endog_names
else:
yname = 'y'
if xname is None:
if hasattr(self.model, 'exog_names') and (
not self.model.exog_names is None):
xname = self.model.exog_names
else:
xname = ['var_%d' % i for i in range(len(self.params))]
return yname, xname
def summary_top(results, title=None, gleft=None, gright=None, yname=None, xname=None):
'''generate top table(s)
TODO: this still uses predefined model_methods
? allow gleft, gright to be 1 element tuples instead of filling with None?
'''
#change of names ?
gen_left, gen_right = gleft, gright
#time and names are always included
import time
time_now = time.localtime()
time_of_day = [time.strftime("%H:%M:%S", time_now)]
date = time.strftime("%a, %d %b %Y", time_now)
yname, xname = _getnames(results, yname=yname, xname=xname)
#create dictionary with default
#use lambdas because some values raise exception if they are not available
#alternate spellings are commented out to force unique labels
default_items = dict([
('Dependent Variable:', lambda: [yname]),
('Dep. Variable:', lambda: [yname]),
('Model:', lambda: [results.model.__class__.__name__]),
#('Model type:', lambda: [results.model.__class__.__name__]),
('Date:', lambda: [date]),
('Time:', lambda: time_of_day),
('Number of Obs:', lambda: [results.nobs]),
#('No. of Observations:', lambda: ["%#6d" % results.nobs]),
('No. Observations:', lambda: ["%#6d" % results.nobs]),
#('Df model:', lambda: [results.df_model]),
('Df Model:', lambda: ["%#6d" % results.df_model]),
#TODO: check when we have non-integer df
('Df Residuals:', lambda: ["%#6d" % results.df_resid]),
#('Df resid:', lambda: [results.df_resid]),
#('df resid:', lambda: [results.df_resid]), #check capitalization
('Log-Likelihood:', lambda: ["%#8.5g" % results.llf]) #doesn't exist for RLM - exception
#('Method:', lambda: [???]), #no default for this
])
if title is None:
title = results.model.__class__.__name__ + 'Regression Results'
if gen_left is None:
#default: General part of the summary table, Applicable to all? models
gen_left = [('Dep. Variable:', None),
('Model type:', None),
('Date:', None),
('No. Observations:', None),
('Df model:', None),
('Df resid:', None)]
try:
llf = results.llf
gen_left.append(('Log-Likelihood', None))
except: #AttributeError, NotImplementedError
pass
gen_right = []
gen_title = title
gen_header = None
#needed_values = [k for k,v in gleft + gright if v is None] #not used anymore
#replace missing (None) values with default values
gen_left_ = []
for item, value in gen_left:
if value is None:
value = default_items[item]() #let KeyErrors raise exception
gen_left_.append((item, value))
gen_left = gen_left_
if gen_right:
gen_right_ = []
for item, value in gen_right:
if value is None:
value = default_items[item]() #let KeyErrors raise exception
gen_right_.append((item, value))
gen_right = gen_right_
#check
missing_values = [k for k,v in gen_left + gen_right if v is None]
assert missing_values == [], missing_values
#pad both tables to equal number of rows
if gen_right:
if len(gen_right) < len(gen_left):
#fill up with blank lines to same length
gen_right += [(' ', ' ')] * (len(gen_left) - len(gen_right))
elif len(gen_right) > len(gen_left):
#fill up with blank lines to same length, just to keep it symmetric
gen_left += [(' ', ' ')] * (len(gen_right) - len(gen_left))
#padding in SimpleTable doesn't work like I want
#force extra spacing and exact string length in right table
gen_right = [('%-21s' % (' '+k), v) for k,v in gen_right]
gen_stubs_right, gen_data_right = zip_longest(*gen_right) #transpose row col
gen_table_right = SimpleTable(gen_data_right,
gen_header,
gen_stubs_right,
title = gen_title,
txt_fmt = fmt_2cols #gen_fmt
)
else:
gen_table_right = [] #because .extend_right seems works with []
#moved below so that we can pad if needed to match length of gen_right
#transpose rows and columns, `unzip`
gen_stubs_left, gen_data_left = zip_longest(*gen_left) #transpose row col
gen_table_left = SimpleTable(gen_data_left,
gen_header,
gen_stubs_left,
title = gen_title,
txt_fmt = fmt_2cols
)
gen_table_left.extend_right(gen_table_right)
general_table = gen_table_left
return general_table #, gen_table_left, gen_table_right
def summary_params(results, yname=None, xname=None, alpha=.05, use_t=True,
skip_header=False, title=None):
'''create a summary table for the parameters
Parameters
----------
res : results instance
some required information is directly taken from the result
instance
yname : string or None
optional name for the endogenous variable, default is "y"
xname : list of strings or None
optional names for the exogenous variables, default is "var_xx"
alpha : float
significance level for the confidence intervals
use_t : bool
indicator whether the p-values are based on the Student-t
distribution (if True) or on the normal distribution (if False)
skip_headers : bool
If false (default), then the header row is added. If true, then no
header row is added.
Returns
-------
params_table : SimpleTable instance
'''
#Parameters part of the summary table
#------------------------------------
#Note: this is not necessary since we standardized names, only t versus normal
if isinstance(results, tuple):
#for multivariate endog
#TODO: check whether I don't want to refactor this
#we need to give parameter alpha to conf_int
results, params, std_err, tvalues, pvalues, conf_int = results
else:
params = results.params
std_err = results.bse
tvalues = results.tvalues #is this sometimes called zvalues
pvalues = results.pvalues
conf_int = results.conf_int(alpha)
#Dictionary to store the header names for the parameter part of the
#summary table. look up by modeltype
if use_t:
param_header = ['coef', 'std err', 't', 'P>|t|',
'[' + str(alpha/2), str(1-alpha/2) + ']']
else:
param_header = ['coef', 'std err', 'z', 'P>|z|',
'[' + str(alpha/2), str(1-alpha/2) + ']']
if skip_header:
param_header = None
_, xname = _getnames(results, yname=yname, xname=xname)
params_stubs = xname
exog_idx = lrange(len(xname))
params_data = lzip([forg(params[i], prec=4) for i in exog_idx],
[forg(std_err[i]) for i in exog_idx],
[forg(tvalues[i]) for i in exog_idx],
["%#6.3f" % (pvalues[i]) for i in exog_idx],
[forg(conf_int[i,0]) for i in exog_idx],
[forg(conf_int[i,1]) for i in exog_idx]
)
parameter_table = SimpleTable(params_data,
param_header,
params_stubs,
title = title,
txt_fmt = fmt_params #gen_fmt #fmt_2, #gen_fmt,
)
return parameter_table
def summary_params_frame(results, yname=None, xname=None, alpha=.05,
use_t=True):
'''create a summary table for the parameters
Parameters
----------
res : results instance
some required information is directly taken from the result
instance
yname : string or None
optional name for the endogenous variable, default is "y"
xname : list of strings or None
optional names for the exogenous variables, default is "var_xx"
alpha : float
significance level for the confidence intervals
use_t : bool
indicator whether the p-values are based on the Student-t
distribution (if True) or on the normal distribution (if False)
skip_headers : bool
If false (default), then the header row is added. If true, then no
header row is added.
Returns
-------
params_table : SimpleTable instance
'''
#Parameters part of the summary table
#------------------------------------
#Note: this is not necessary since we standardized names, only t versus normal
if isinstance(results, tuple):
#for multivariate endog
#TODO: check whether I don't want to refactor this
#we need to give parameter alpha to conf_int
results, params, std_err, tvalues, pvalues, conf_int = results
else:
params = results.params
std_err = results.bse
tvalues = results.tvalues #is this sometimes called zvalues
pvalues = results.pvalues
conf_int = results.conf_int(alpha)
#Dictionary to store the header names for the parameter part of the
#summary table. look up by modeltype
alp = str((1-alpha)*100)+'%'
if use_t:
param_header = ['coef', 'std err', 't', 'P>|t|',
'Conf. Int. Low', 'Conf. Int. Upp.']
else:
param_header = ['coef', 'std err', 'z', 'P>|z|',
'Conf. Int. Low', 'Conf. Int. Upp.']
_, xname = _getnames(results, yname=yname, xname=xname)
#------------------
from pandas import DataFrame
table = np.column_stack((params, std_err, tvalues, pvalues, conf_int))
return DataFrame(table, columns=param_header, index=xname)
def summary_params_2d(result, extras=None, endog_names=None, exog_names=None,
title=None):
'''create summary table of regression parameters with several equations
This allows interleaving of parameters with bse and/or tvalues
Parameters
----------
result : result instance
the result instance with params and attributes in extras
extras : list of strings
additional attributes to add below a parameter row, e.g. bse or tvalues
endog_names : None or list of strings
names for rows of the parameter array (multivariate endog)
exog_names : None or list of strings
names for columns of the parameter array (exog)
alpha : float
level for confidence intervals, default 0.95
title : None or string
Returns
-------
tables : list of SimpleTable
this contains a list of all seperate Subtables
table_all : SimpleTable
the merged table with results concatenated for each row of the parameter
array
'''
if endog_names is None:
#TODO: note the [1:] is specific to current MNLogit
endog_names = ['endog_%d' % i for i in
np.unique(result.model.endog)[1:]]
if exog_names is None:
exog_names = ['var%d' %i for i in range(len(result.params))]
#TODO: check formatting options with different values
#res_params = [['%10.4f'%item for item in row] for row in result.params]
res_params = [[forg(item, prec=4) for item in row] for row in result.params]
if extras: #not None or non-empty
#maybe this should be a simple triple loop instead of list comprehension?
#below_list = [[['%10s' % ('('+('%10.3f'%v).strip()+')')
extras_list = [[['%10s' % ('(' + forg(v, prec=3).strip() + ')')
for v in col]
for col in getattr(result, what)]
for what in extras
]
data = lzip(res_params, *extras_list)
data = [i for j in data for i in j] #flatten
stubs = lzip(endog_names, *[['']*len(endog_names)]*len(extras))
stubs = [i for j in stubs for i in j] #flatten
#return SimpleTable(data, headers=exog_names, stubs=stubs)
else:
data = res_params
stubs = endog_names
# return SimpleTable(data, headers=exog_names, stubs=stubs,
# data_fmts=['%10.4f'])
import copy
txt_fmt = copy.deepcopy(fmt_params)
txt_fmt.update(dict(data_fmts = ["%s"]*result.params.shape[1]))
return SimpleTable(data, headers=exog_names,
stubs=stubs,
title=title,
# data_fmts = ["%s"]),
txt_fmt = txt_fmt)
def summary_params_2dflat(result, endog_names=None, exog_names=None, alpha=0.05,
use_t=True, keep_headers=True, endog_cols=False):
#skip_headers2=True):
'''summary table for parameters that are 2d, e.g. multi-equation models
Parameters
----------
result : result instance
the result instance with params, bse, tvalues and conf_int
endog_names : None or list of strings
names for rows of the parameter array (multivariate endog)
exog_names : None or list of strings
names for columns of the parameter array (exog)
alpha : float
level for confidence intervals, default 0.95
use_t : bool
indicator whether the p-values are based on the Student-t
distribution (if True) or on the normal distribution (if False)
keep_headers : bool
If true (default), then sub-tables keep their headers. If false, then
only the first headers are kept, the other headerse are blanked out
endog_cols : bool
If false (default) then params and other result statistics have
equations by rows. If true, then equations are assumed to be in columns.
Not implemented yet.
Returns
-------
tables : list of SimpleTable
this contains a list of all seperate Subtables
table_all : SimpleTable
the merged table with results concatenated for each row of the parameter
array
'''
res = result
params = res.params
if params.ndim == 2: # we've got multiple equations
n_equ = params.shape[1]
if not len(endog_names) == params.shape[1]:
raise ValueError('endog_names has wrong length')
else:
if not len(endog_names) == len(params):
raise ValueError('endog_names has wrong length')
n_equ = 1
#VAR doesn't have conf_int
#params = res.params.T # this is a convention for multi-eq models
if not isinstance(endog_names, list):
#this might be specific to multinomial logit type, move?
if endog_names is None:
endog_basename = 'endog'
else:
endog_basename = endog_names
#TODO: note, the [1:] is specific to current MNLogit
endog_names = res.model.endog_names[1:]
#check if we have the right length of names
tables = []
for eq in range(n_equ):
restup = (res, res.params[:,eq], res.bse[:,eq], res.tvalues[:,eq],
res.pvalues[:,eq], res.conf_int(alpha)[eq])
#not used anymore in current version
# if skip_headers2:
# skiph = (row != 0)
# else:
# skiph = False
skiph = False
tble = summary_params(restup, yname=endog_names[eq],
xname=exog_names, alpha=alpha, use_t=use_t,
skip_header=skiph)
tables.append(tble)
#add titles, they will be moved to header lines in table_extend
for i in range(len(endog_names)):
tables[i].title = endog_names[i]
table_all = table_extend(tables, keep_headers=keep_headers)
return tables, table_all
def table_extend(tables, keep_headers=True):
'''extend a list of SimpleTables, adding titles to header of subtables
This function returns the merged table as a deepcopy, in contrast to the
SimpleTable extend method.
Parameters
----------
tables : list of SimpleTable instances
keep_headers : bool
If true, then all headers are kept. If falls, then the headers of
subtables are blanked out.
Returns
-------
table_all : SimpleTable
merged tables as a single SimpleTable instance
'''
from copy import deepcopy
for ii, t in enumerate(tables[:]): #[1:]:
t = deepcopy(t)
#move title to first cell of header
#TODO: check if we have multiline headers
if t[0].datatype == 'header':
t[0][0].data = t.title
t[0][0]._datatype = None
t[0][0].row = t[0][1].row
if not keep_headers and (ii > 0):
for c in t[0][1:]:
c.data = ''
#add separating line and extend tables
if ii == 0:
table_all = t
else:
r1 = table_all[-1]
r1.add_format('txt', row_dec_below='-')
table_all.extend(t)
table_all.title = None
return table_all
def summary_return(tables, return_fmt='text'):
######## Return Summary Tables ########
# join table parts then print
if return_fmt == 'text':
strdrop = lambda x: str(x).rsplit('\n',1)[0]
#convert to string drop last line
return '\n'.join(lmap(strdrop, tables[:-1]) + [str(tables[-1])])
elif return_fmt == 'tables':
return tables
elif return_fmt == 'csv':
return '\n'.join(map(lambda x: x.as_csv(), tables))
elif return_fmt == 'latex':
#TODO: insert \hline after updating SimpleTable
import copy
table = copy.deepcopy(tables[0])
del table[-1]
for part in tables[1:]:
table.extend(part)
return table.as_latex_tabular()
elif return_fmt == 'html':
return "\n".join(table.as_html() for table in tables)
else:
raise ValueError('available output formats are text, csv, latex, html')
class Summary(object):
'''class to hold tables for result summary presentation
Construction does not take any parameters. Tables and text can be added
with the `add_` methods.
Attributes
----------
tables : list of tables
Contains the list of SimpleTable instances, horizontally concatenated tables are not saved separately.
extra_txt : string
extra lines that are added to the text output, used for warnings and explanations.
'''
def __init__(self):
self.tables = []
self.extra_txt = None
def __str__(self):
return self.as_text()
def __repr__(self):
#return '<' + str(type(self)) + '>\n"""\n' + self.__str__() + '\n"""'
return str(type(self)) + '\n"""\n' + self.__str__() + '\n"""'
def _repr_html_(self):
'''Display as HTML in IPython notebook.'''
return self.as_html()
def add_table_2cols(self, res, title=None, gleft=None, gright=None,
yname=None, xname=None):
'''add a double table, 2 tables with one column merged horizontally
Parameters
----------
res : results instance
some required information is directly taken from the result
instance
title : string or None
if None, then a default title is used.
gleft : list of tuples
elements for the left table, tuples are (name, value) pairs
If gleft is None, then a default table is created
gright : list of tuples or None
elements for the right table, tuples are (name, value) pairs
yname : string or None
optional name for the endogenous variable, default is "y"
xname : list of strings or None
optional names for the exogenous variables, default is "var_xx"
Returns
-------
None : tables are attached
'''
table = summary_top(res, title=title, gleft=gleft, gright=gright,
yname=yname, xname=xname)
self.tables.append(table)
def add_table_params(self, res, yname=None, xname=None, alpha=.05,
use_t=True):
'''create and add a table for the parameter estimates
Parameters
----------
res : results instance
some required information is directly taken from the result
instance
yname : string or None
optional name for the endogenous variable, default is "y"
xname : list of strings or None
optional names for the exogenous variables, default is "var_xx"
alpha : float
significance level for the confidence intervals
use_t : bool
indicator whether the p-values are based on the Student-t
distribution (if True) or on the normal distribution (if False)
Returns
-------
None : table is attached
'''
if res.params.ndim == 1:
table = summary_params(res, yname=yname, xname=xname, alpha=alpha,
use_t=use_t)
elif res.params.ndim == 2:
# _, table = summary_params_2dflat(res, yname=yname, xname=xname,
# alpha=alpha, use_t=use_t)
_, table = summary_params_2dflat(res, endog_names=yname,
exog_names=xname,
alpha=alpha, use_t=use_t)
else:
raise ValueError('params has to be 1d or 2d')
self.tables.append(table)
def add_extra_txt(self, etext):
'''add additional text that will be added at the end in text format
Parameters
----------
etext : string
string with lines that are added to the text output.
'''
self.extra_txt = '\n'.join(etext)
def as_text(self):
'''return tables as string
Returns
-------
txt : string
summary tables and extra text as one string
'''
txt = summary_return(self.tables, return_fmt='text')
if not self.extra_txt is None:
txt = txt + '\n\n' + self.extra_txt
return txt
def as_latex(self):
'''return tables as string
Returns
-------
latex : string
summary tables and extra text as string of Latex
Notes
-----
This currently merges tables with different number of columns.
It is recommended to use `as_latex_tabular` directly on the individual
tables.
'''
return summary_return(self.tables, return_fmt='latex')
def as_csv(self):
'''return tables as string
Returns
-------
csv : string
concatenated summary tables in comma delimited format
'''
return summary_return(self.tables, return_fmt='csv')
def as_html(self):
'''return tables as string
Returns
-------
html : string
concatenated summary tables in HTML format
'''
return summary_return(self.tables, return_fmt='html')
if __name__ == "__main__":
import statsmodels.api as sm
data = sm.datasets.longley.load()
data.exog = sm.add_constant(data.exog)
res = sm.OLS(data.endog, data.exog).fit()
#summary(
| bsd-3-clause |
ChadFulton/statsmodels | statsmodels/datasets/cpunish/data.py | 2 | 2584 | """US Capital Punishment dataset."""
from statsmodels.datasets import utils as du
__docformat__ = 'restructuredtext'
COPYRIGHT = """Used with express permission from the original author,
who retains all rights."""
TITLE = __doc__
SOURCE = """
Jeff Gill's `Generalized Linear Models: A Unified Approach`
http://jgill.wustl.edu/research/books.html
"""
DESCRSHORT = """Number of state executions in 1997"""
DESCRLONG = """This data describes the number of times capital punishment is implemented
at the state level for the year 1997. The outcome variable is the number of
executions. There were executions in 17 states.
Included in the data are explanatory variables for median per capita income
in dollars, the percent of the population classified as living in poverty,
the percent of Black citizens in the population, the rate of violent
crimes per 100,000 residents for 1996, a dummy variable indicating
whether the state is in the South, and (an estimate of) the proportion
of the population with a college degree of some kind.
"""
NOTE = """::
Number of Observations - 17
Number of Variables - 7
Variable name definitions::
EXECUTIONS - Executions in 1996
INCOME - Median per capita income in 1996 dollars
PERPOVERTY - Percent of the population classified as living in poverty
PERBLACK - Percent of black citizens in the population
VC100k96 - Rate of violent crimes per 100,00 residents for 1996
SOUTH - SOUTH == 1 indicates a state in the South
DEGREE - An esimate of the proportion of the state population with a
college degree of some kind
State names are included in the data file, though not returned by load.
"""
def load_pandas():
"""
Load the cpunish data and return a Dataset class.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
data = _get_data()
return du.process_pandas(data, endog_idx=0)
def load(as_pandas=None):
"""
Load the cpunish data and return a Dataset class.
Parameters
----------
as_pandas : bool
Flag indicating whether to return pandas DataFrames and Series
or numpy recarrays and arrays. If True, returns pandas.
Returns
-------
Dataset instance:
See DATASET_PROPOSAL.txt for more information.
"""
return du.as_numpy_dataset(load_pandas(), as_pandas=as_pandas)
def _get_data():
data = du.load_csv(__file__, 'cpunish.csv')
data = data.iloc[:, 1:8].astype(float)
return data
| bsd-3-clause |
codeforfrankfurt/PolBotCheck | polbotcheck/word_cluster.py | 1 | 3812 | import json
from sklearn.feature_extraction.text import TfidfVectorizer
import nltk
from nltk.corpus import stopwords
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import db
import os
DATASET_PATH = os.environ['HOME'] + '/nltk_data/corpora/twitter_samples/tweets.20150430-223406.json'
def calc_frequencies(words, words_n=50, lang='german'):
words = [word for word in words if len(word) > 1]
words = [word for word in words if not word.isnumeric()]
words = [word.lower() for word in words]
# words = [word for word in words if word not in all_stopwords]
# Stemming words seems to make matters worse, disabled
# stemmer = nltk.stem.snowball.SnowballStemmer(lang)
# words = [stemmer.stem(word) for word in words]
fdist = nltk.FreqDist(words)
return fdist.most_common(words_n)
def get_word_clouds(tweets, users, words_n=50, lang='english'):
default_stopwords = set(nltk.corpus.stopwords.words(lang))
stopwords_file = '../data/stopwords.txt'
custom_stopwords = set(open(stopwords_file, 'r').read().splitlines())
all_stopwords = default_stopwords | custom_stopwords
vectorizer = TfidfVectorizer(max_df=0.5, min_df=2, stop_words=list(all_stopwords))
X = vectorizer.fit_transform(tweets)
terms = vectorizer.get_feature_names()
word_cloud_per_person = {}
for doc in range(len(tweets)):
feature_index = X[doc, :].nonzero()[1]
tfidf_scores = zip(feature_index, [X[doc, x] for x in feature_index])
doc_terms = []
for word, score in [(terms[i], score) for (i, score) in tfidf_scores]:
doc_terms.append((word, score))
important_terms = [(word, score) for word, score in sorted(doc_terms, key=lambda x: x[1], reverse=True)][:words_n]
word_cloud_per_person[users[doc]] = important_terms
return word_cloud_per_person
def save_wordcloud_image(frequencies, filename):
wordcloud = WordCloud(width=1024, height=786, min_font_size=1).fit_words(frequencies)
fig = plt.figure()
fig.set_figwidth(12)
fig.set_figheight(16)
plt.imshow(wordcloud)
plt.axis("off")
plt.savefig(filename, facecolor='k', bbox_inches='tight')
print('imaged created')
def load_example_data():
tweets = []
with open(DATASET_PATH) as f:
for line in f:
tweets.append(json.loads(line)['text'])
return tweets
def get_corpus_of_most_active_users(n_users=5):
tweets = []
texts = []
with open(DATASET_PATH) as f:
for line in f:
tweets.append(json.loads(line)['user']['screen_name'])
texts.append((json.loads(line)['user']['screen_name'], json.loads(line)['text']))
users = nltk.FreqDist(tweets).most_common(n_users)
dict = {}
for user, tweet in texts:
if user in dict:
dict[user] = " ".join([dict[user],tweet])
else:
dict[user] = tweet
corpus = [dict[name] for name, _ in users]
user_names = [name for name, _ in users]
return corpus, user_names
if __name__ == "__main__":
corpus, users = get_corpus_of_most_active_users()
word_cloud_per_person = get_word_clouds(corpus, users, words_n=100, lang='english')
for user in users:
topic_frequencies = word_cloud_per_person[user]
print user
print topic_frequencies
db.save_word_frequencies('test_user_seb', dict(topic_frequencies))
exit()
# save_wordcloud_image(dict(topic_frequencies), 'plots/word_clouds/' + user + '.png')
# This is an example how to save a word_cloud in the database
# user_in_db = 'malechanissen'
# db.save_word_frequencies(user_in_db, {'w3':10, 'w4':20})
# db.save_word_frequencies(user_in_db, dict(topic_frequencies))
# db.save_word_frequencies('test_user_seb', {'w3':10, 'w4':20})
| mit |
rdevon/cortex | tests/built_ins/models/test_routine.py | 1 | 3029 | '''Module for testing model routines.
'''
import torch.optim as optim
from cortex.plugins import ModelPlugin
def test_routine(model_class, arguments, data_class):
ModelPlugin._reset_class()
kwargs = {arguments['arg1']: 11, arguments['arg2']: 13}
data = data_class(11)
model = model_class(contract=dict(inputs=dict(A='test')))
model._data = data
model.kwargs.update(**kwargs)
model.build()
model.eval_step()
print('Training nets: ', model._training_nets)
assert 'net' in list(model._training_nets.values())[0]
params = list(model.nets.net.parameters())
op = optim.SGD(params, lr=0.0001)
model._optimizers = dict(net=op)
A = model.inputs('A')
model.routine(A)
model._reset_epoch()
model.train_step()
model.train_step()
model.train_step()
print('Results:', model._all_epoch_results)
print('Losses:', model._all_epoch_losses)
print('Times:', model._all_epoch_times)
assert len(list(model._all_epoch_results.values())[0]) == 3
assert len(list(model._all_epoch_losses.values())[0]) == 3
assert len(list(model._all_epoch_times.values())[0]) == 3
def test_routine_with_submodels(model_with_submodel):
model = model_with_submodel
model.build()
params = list(model.nets.net.parameters())
op = optim.SGD(params, lr=0.0001)
params2 = list(model.nets.net2.parameters())
op2 = optim.SGD(params2, lr=0.001)
model._optimizers = dict(net=op, net2=op2)
model.submodel._optimizers = dict(net=op, net2=op2)
assert model._get_training_nets() == []
model.train_step()
assert model._get_training_nets() == ['net', 'net2']
model.train_step()
model.train_step()
def test_routine_with_submodels_2(model_class_with_submodel_2, data_class):
ModelPlugin._reset_class()
kwargs = {'d': 11, 'c': 13}
data = data_class(11)
contract = dict(inputs=dict(B='test'))
sub_contract = dict(
kwargs=dict(a='d'),
nets=dict(net='net2'),
inputs=dict(A='test')
)
sub_contract2 = dict(
kwargs=dict(a='d'),
nets=dict(net='net3'),
inputs=dict(A='test')
)
model = model_class_with_submodel_2(sub_contract1=sub_contract,
sub_contract2=sub_contract2,
contract=contract)
model._data = data
model.submodel1._data = data
model.submodel2._data = data
model.kwargs.update(**kwargs)
model.build()
params = list(model.nets.net.parameters())
op = optim.SGD(params, lr=0.0001)
params2 = list(model.nets.net2.parameters())
op2 = optim.SGD(params2, lr=0.001)
params3 = list(model.nets.net3.parameters())
op3 = optim.SGD(params3, lr=0.001)
model._optimizers = dict(net=op, net2=op2, net3=op3)
model.submodel1._optimizers = dict(net=op, net2=op2, net3=op3)
model.submodel2._optimizers = dict(net=op, net2=op2, net3=op3)
model.train_step()
model.train_step()
model.train_step()
| bsd-3-clause |
kaniblu/pytorch-skipthoughts | src/torchst/train.py | 1 | 24927 | import os
import pickle
import shutil
import logging
import datetime
import multiprocessing.pool as mp
import torch
import torch.optim as O
import tqdm
from torch.autograd import Variable
from torch.nn.parallel import data_parallel
from torch.nn.utils import clip_grad_norm
from torchtextutils import Vocabulary
from torchtextutils import BatchPreprocessor
from torchtextutils import DirectoryReader
from torchtextutils import OmissionNoisifier
from torchtextutils import SwapNoisifier
from torchtextutils import create_generator_st
from yaap import ArgParser
from yaap import path
from configargparse import YAMLConfigFileParser
from .model import MultiContextSkipThoughts
from .model import compute_loss
from .wordembed import load_embeddings_mp
from .wordembed import load_embeddings
from .wordembed import load_fasttext_embeddings
from .wordembed import preinitialize_embeddings
def parse_args():
parser = ArgParser(allow_config=True,
config_file_parser_class=YAMLConfigFileParser)
parser.add("--name", type=str, default="main")
parser.add("--data-path", type=path, action="append", required=True,
help="Path to a sentence file or directory that contains "
"a set of sentence files where each line is a sentence, "
"in which tokens are separated by spaces.")
parser.add("--vocab-path", type=path, required=True)
parser.add("--save-dir", type=path, required=True)
parser.add("--gpus", type=int, action="append")
parser.add("--previews", type=int, default=10)
parser.add("--batch-first", action="store_true", default=True,
help="currently due to the limitation of DataParallel API,"
"it is impossible to operate without batch-first data")
parser.add("--visualizer", type=str, default=None,
choices=["visdom", "tensorboard"])
parser.add("--ckpt-name", type=str, default="model-e{epoch}-s{iter}-{loss}")
parser.add("-v", "--verbose", action="store_true", default=False)
group = parser.add_group("Word Embedding Options")
group.add("--wordembed-type", type=str, default="none",
choices=["glove", "fasttext", "none"])
group.add("--wordembed-path", type=path, default=None)
group.add("--fasttext-path", type=path, default=None,
help="Path to FastText binary.")
group.add("--wordembed-freeze", action="store_true", default=False)
group.add("--wordembed-processes", type=int, default=4)
group = parser.add_group("Training Options")
group.add("--epochs", type=int, default=3)
group.add("--batch-size", type=int, default=32)
group.add("--omit-prob", type=float, default=0.05)
group.add("--swap-prob", type=float, default=0.05)
group.add("--val-period", type=int, default=100)
group.add("--save-period", type=int, default=1000)
group.add("--max-len", type=int, default=30)
group = parser.add_group("Visdom Options")
group.add("--visdom-host", type=str, default="localhost")
group.add("--visdom-port", type=int, default=8097)
group.add("--visdom-buffer-size", type=int, default=10)
group = parser.add_group("Model Parameters")
group.add("--reverse-encoder", action="store_true", default=False)
group.add("--encoder-cell", type=str, default="lstm",
choices=["lstm", "gru", "sru"])
group.add("--decoder-cell", type=str, default="gru",
choices=["lstm", "gru", "sru"])
group.add("--conditional-decoding", action="store_true", default=False)
group.add("--before", type=int, default=1)
group.add("--after", type=int, default=1)
group.add("--predict-self", action="store_true", default=False)
group.add("--word-dim", type=int, default=100)
group.add("--hidden-dim", type=int, default=100)
group.add("--layers", type=int, default=2)
group.add("--encoder-direction", default="bi",
choices=["uni", "bi", "combine"])
group.add("--dropout-prob", type=float, default=0.05)
args = parser.parse_args()
return args
class DataGenerator(object):
def __init__(self, data_paths, vocab, omit_prob, swap_prob, batch_size,
max_len, n_before, n_after, predict_self=False,
shuffle_files=True, batch_first=True, pin_memory=True,
allow_residual=True):
self.data_paths = data_paths
self.vocab = vocab
self.omit_prob = omit_prob
self.swap_prob = swap_prob
self.batch_size = batch_size
self.max_len = max_len
self.n_before = n_before
self.n_after = n_after
self.predict_self = predict_self
self.shuffle_files = shuffle_files
self.batch_first = batch_first
self.pin_memory = pin_memory
self.allow_residual = allow_residual
def __iter__(self):
return self.generate()
def generate(self):
preprocessor = BatchPreprocessor(self.vocab)
line_reader = file_list_reader(self.data_paths,
shuffle_files=self.shuffle_files)
data_generator = create_generator_st(line_reader,
batch_first=self.batch_first,
batch_size=self.batch_size,
preprocessor=preprocessor,
pin_memory=self.pin_memory,
allow_residual=self.allow_residual,
max_length=self.max_len,
n_before=self.n_before,
n_after=self.n_after,
predict_self=self.predict_self)
noisifiers = []
if self.omit_prob > 0:
unk_idx = self.vocab[self.vocab.unk]
omitter = OmissionNoisifier(self.omit_prob, unk_idx)
noisifiers.append(omitter)
if self.swap_prob > 0:
swapper = SwapNoisifier(self.swap_prob)
noisifiers.append(swapper)
for in_data, in_lens, out_data, out_lens in data_generator:
for nosifier in noisifiers:
# in-place noisification
nosifier((in_data, in_lens))
yield in_data, in_lens, out_data, out_lens
class Trainer(object):
def __init__(self, model, gpu_devices, data_generator, n_epochs,
logger, save_dir, save_period, val_period, previews, ckpt_format,
batch_first=True):
self.model = model
self.gpu_devices = gpu_devices
self.data_generator = data_generator
self.n_epochs = n_epochs
self.logger = logger
self.save_dir = save_dir
self.save_period = save_period
self.val_period = val_period
self.previews = previews
self.ckpt_format = ckpt_format
self.batch_first = batch_first
self.legend = ["average"] + ["decoder_{}".format(i)
for i in range(self.model.n_decoders)]
if gpu_devices:
self.model = model.cuda()
@property
def is_cuda(self):
return len(self.gpu_devices) > 0
def prepare_batches(self, batch_data, chunks, **kwargs):
x, x_lens, ys, ys_lens = batch_data
batch_dim = 0 if self.batch_first else 1
x_list = x.chunk(chunks, 0)
x_lens_list = x_lens.chunk(chunks, 0)
ys_list = ys.chunk(chunks, batch_dim)
ys_lens_list = ys_lens.chunk(chunks, batch_dim)
inp_list = [x_list, x_lens_list, ys_list, ys_lens_list]
data_list = []
for inp in zip(*inp_list):
data = self.prepare_batch(inp, **kwargs)
data_list.append(data)
data_list = list(zip(*data_list))
ret_list = []
for data in data_list:
data = [d.unsqueeze(0) for d in data]
data = torch.cat(data)
ret_list.append(data)
return ret_list
def merge_batches(self, batch_data):
x, x_lens, ys_i, ys_t, ys_lens, xys_idx = batch_data
n_devices = len(self.gpu_devices)
sbatch_size = x.data.shape[1]
xys_idx = xys_idx.chunk(n_devices)
xys_idx = [xy_idx + i * sbatch_size for i, xy_idx in enumerate(xys_idx)]
xys_idx = torch.cat(xys_idx)
if not self.batch_first:
ys_i = ys_i.transpose(0, 2, 1)
ys_t = ys_t.transpose(0, 2, 1)
ys_lens = ys_lens.transpose(0, 2, 1)
xys_idx = xys_idx.transpose(0, 2, 1)
data = [x, x_lens, ys_i, ys_t, ys_lens, xys_idx]
x, x_lens, ys_i, ys_t, ys_lens, xys_idx = [torch.cat(d) for d in data]
if not self.batch_first:
ys_i = ys_i.transpose(1, 0)
ys_t = ys_t.transpose(1, 0)
ys_lens = ys_lens.transpose(1, 0)
xys_idx = xys_idx.transpose(1, 0)
data = [x, x_lens, ys_i, ys_t, ys_lens, xys_idx]
data = [d.contiguous() for d in data]
return data
def prepare_batch(self, batch_data, volatile=False):
x, x_lens, ys, ys_lens = batch_data
batch_dim = 0 if self.batch_first else 1
context_dim = 1 if self.batch_first else 0
x_lens, x_idx = torch.sort(x_lens, 0, True)
_, x_ridx = torch.sort(x_idx)
ys_lens, ys_idx = torch.sort(ys_lens, batch_dim, True)
x_ridx_exp = x_ridx.unsqueeze(context_dim).expand_as(ys_idx)
xys_idx = torch.gather(x_ridx_exp, batch_dim, ys_idx)
x = x[x_idx]
ys = torch.gather(ys, batch_dim, ys_idx.unsqueeze(-1).expand_as(ys))
x = Variable(x, volatile=volatile)
x_lens = Variable(x_lens, volatile=volatile)
ys_i = Variable(ys[..., :-1], volatile=volatile).contiguous()
ys_t = Variable(ys[..., 1:], volatile=volatile).contiguous()
ys_lens = Variable(ys_lens - 1, volatile=volatile)
xys_idx = Variable(xys_idx, volatile=volatile)
if self.is_cuda:
x = x.cuda(async=True)
x_lens = x_lens.cuda(async=True)
ys_i = ys_i.cuda(async=True)
ys_t = ys_t.cuda(async=True)
ys_lens = ys_lens.cuda(async=True)
xys_idx = xys_idx.cuda(async=True)
return x, x_lens, ys_i, ys_t, ys_lens, xys_idx
def calculate_loss(self, data, dec_logits):
x, x_lens, ys_i, ys_t, ys_lens, xys_idx = data
if self.batch_first:
cdata = [ys_t, ys_lens, dec_logits]
cdata = [d.transpose(1, 0).contiguous() for d in cdata]
ys_t, ys_lens, dec_logits = cdata
losses_b = []
losses_s = []
for logits, y, lens in zip(dec_logits, ys_t, ys_lens):
loss_batch, loss_step = compute_loss(logits, y, lens)
losses_b.append(loss_batch)
losses_s.append(loss_step)
return losses_b, losses_s
def val_text(self, x_sents, yi_sents, yt_sents, o_sents):
text = ""
for x_sent, yi_sent, yt_sent, o_sent in \
zip(x_sents, yi_sents, yt_sents, o_sents):
text += "Encoder Input: {}\n".format(x_sent)
for i, (si, st, so) in enumerate(zip(yi_sent, yt_sent, o_sent)):
text += "Decoder_{} Input: {}\n".format(i, si)
text += "Decoder_{} Target: {}\n".format(i, st)
text += "Decoder_{} Output: {}\n".format(i, so)
return text
def val_sents(self, data, dec_logits):
vocab, previews = self.model.vocab, self.previews
x, x_lens, ys_i, ys_t, ys_lens, xys_idx = data
if self.batch_first:
cdata = [ys_i, ys_t, ys_lens, xys_idx, dec_logits]
cdata = [d.transpose(1, 0).contiguous() for d in cdata]
ys_i, ys_t, ys_lens, xys_idx, dec_logits = cdata
_, xys_ridx = torch.sort(xys_idx, 1)
xys_ridx_exp = xys_ridx.unsqueeze(-1).expand_as(ys_i)
ys_i = torch.gather(ys_i, 1, xys_ridx_exp)
ys_t = torch.gather(ys_t, 1, xys_ridx_exp)
dec_logits = [torch.index_select(logits, 0, xy_ridx)
for logits, xy_ridx in zip(dec_logits, xys_ridx)]
ys_lens = torch.gather(ys_lens, 1, xys_ridx)
x, x_lens = x[:previews], x_lens[:previews]
ys_i, ys_t = ys_i[:, :previews], ys_t[:, :previews]
dec_logits = torch.cat(
[logits[:previews].max(2)[1].squeeze(-1).unsqueeze(0)
for logits in dec_logits], 0)
ys_lens = ys_lens[:, :previews]
ys_i, ys_t = ys_i.transpose(1, 0), ys_t.transpose(1, 0)
dec_logits, ys_lens = dec_logits.transpose(1, 0), ys_lens.transpose(1,
0)
x, x_lens = x.data.tolist(), x_lens.data.tolist()
ys_i, ys_t = ys_i.data.tolist(), ys_t.data.tolist()
dec_logits, ys_lens = dec_logits.data.tolist(), ys_lens.data.tolist()
def to_sent(data, length, vocab):
return " ".join(vocab.i2f[data[i]] for i in range(length))
def to_sents(data, lens, vocab):
return [to_sent(d, l, vocab) for d, l in zip(data, lens)]
x_sents = to_sents(x, x_lens, vocab)
yi_sents = [to_sents(yi, y_lens, vocab) for yi, y_lens in
zip(ys_i, ys_lens)]
yt_sents = [to_sents(yt, y_lens, vocab) for yt, y_lens in
zip(ys_t, ys_lens)]
o_sents = [to_sents(dec_logit, y_lens, vocab)
for dec_logit, y_lens in zip(dec_logits, ys_lens)]
return x_sents, yi_sents, yt_sents, o_sents
def forward(self, inputs):
return data_parallel(self.model, inputs,
device_ids=self.gpu_devices,
output_device=None,
dim=0,
module_kwargs=None)
def step(self, step, batch_data, volatile=True, title=None):
processed_data = self.prepare_batches(batch_data,
chunks=len(self.gpu_devices),
volatile=volatile)
x, x_lens, ys_i, ys_t, ys_lens, xys_idx = processed_data
inputs = (x, x_lens, ys_i, ys_lens, xys_idx)
dec_logits, h = self.forward(inputs)
merged_data = self.merge_batches(processed_data)
losses_batch, losses_step = self.calculate_loss(merged_data, dec_logits)
losses_step_val = [l.data[0] for l in losses_step]
loss_step = (sum(losses_step) / len(losses_step))
loss_batch = sum(losses_batch) / len(losses_batch)
plot_X = [step] * (self.model.n_decoders + 1)
plot_Y = [loss_step.data[0]] + losses_step_val
self.logger.add_loss(title, **{
t: p for t, p in zip(self.legend, plot_Y)
})
return merged_data, dec_logits, loss_batch, loss_step
def step_val(self, step, batch_data):
data, dec_logits, loss_b, loss_s = self.step(step, batch_data,
volatile=True,
title="Validation Loss")
sents = self.val_sents(data, dec_logits)
text = self.val_text(*sents)
self.logger.add_text("Validation Examples", text)
return loss_b, loss_s
def step_train(self, step, batch_data):
data, dec_logits, loss_b, loss_s = self.step(step, batch_data,
volatile=False,
title="Training Loss")
return loss_b, loss_s
def save(self, filename):
path = os.path.join(self.save_dir, filename)
torch.save(self.model.state_dict(), path)
self.logger.save(self.save_dir)
def train(self):
optimizer = O.Adam([p for p in self.model.parameters()
if p.requires_grad])
step = 0
t = tqdm.tqdm()
for epoch in range(self.n_epochs):
for data in self.data_generator:
step += 1
optimizer.zero_grad()
if step % self.val_period == 0:
loss_b, loss_s = self.step_val(step, data)
else:
loss_b, loss_s = self.step_train(step, data)
loss_b.backward()
clip_grad_norm(self.model.parameters(), 10)
optimizer.step()
loss_val = loss_s.data[0]
if step % self.save_period == 0:
filename = self.ckpt_format.format(
epoch="{:02d}".format(epoch),
step="{:07d}".format(step),
loss="{:.4f}".format(loss_val)
)
self.save(filename)
t.set_description("[{}|{}]: loss={:.4f}".format(
epoch, step, loss_val
))
t.update()
def init_viz(args, kwargs):
global viz
viz = Visdom(*args, **kwargs)
def viz_run(f_name, args, kwargs):
global viz
getattr(viz, f_name).__call__(*args, **kwargs)
def file_list_reader(dir_or_paths, shuffle_files=False):
if shuffle_files:
import random
random.shuffle(dir_or_paths)
for x in dir_or_paths:
if os.path.isfile(x):
with open(x, "r") as f:
for line in f:
yield line.strip()
else:
reader = DirectoryReader(x, shuffle_files=shuffle_files)
for line in reader:
yield line
def prod(*args):
x = 1
for a in args:
x *= a
return x
def count_parameters(model):
counts = 0
for param in model.parameters():
if param.requires_grad:
counts += prod(*param.size())
return counts
class DataParallelSkipThoughts(MultiContextSkipThoughts):
def __init__(self, *args, **kwargs):
super(DataParallelSkipThoughts, self).__init__(*args, **kwargs)
def forward(self, *inputs, **kwargs):
inputs = [d.squeeze(0) for d in inputs]
return super(DataParallelSkipThoughts, self).forward(*inputs, **kwargs)
class TrainLogger(object):
def __init__(self):
self.step = 0
def next(self):
s = self.step
self.step += 1
return s
def add_loss(self, prefix, **losses):
raise NotImplementedError()
def add_text(self, name, text):
raise NotImplementedError()
def save(self, save_dir):
raise NotImplementedError()
class TensorboardTrainLogger(TrainLogger):
def __init__(self, log_dir):
super(TensorboardTrainLogger, self).__init__()
self.log_dir = log_dir
self.writers = {}
def add_loss(self, prefix, **losses):
for name, value in losses.items():
if name not in self.writers:
dir = os.path.join(self.log_dir, name)
self.writers[name] = SummaryWriter(dir)
self.writers[name].add_scalar(prefix, value, self.next())
def add_text(self, name, text):
pass
def save(self, save_dir):
pass
class VisdomTrainLogger(TrainLogger):
def __init__(self, viz_pool):
super(VisdomTrainLogger, self).__init__()
self.viz_pool = viz_pool
def add_loss(self, name, **losses):
self.viz_pool.apply_async(viz_run, ("plot", tuple(), dict(
X=[self.next()] * len(losses),
Y=[list(losses.values())],
opts=dict(
legend=list(losses.keys()),
title=name
)
)))
def add_text(self, name, text):
self.viz_pool.apply_async(viz_run, ("code", tuple(), dict(
text=text,
opts=dict(
title=name
)
)))
def save(self, save_dir):
viz.save([save_dir])
class DummyTrainLogger(TrainLogger):
def add_loss(self, prefix, **losses):
pass
def add_text(self, name, text):
pass
def save(self, save_dir):
pass
def main():
args = parse_args()
if args.verbose:
loglvl = logging.INFO
else:
loglvl = logging.CRITICAL
logging.basicConfig(level=loglvl)
n_decoders = args.before + args.after + (1 if args.predict_self else 0)
assert os.path.exists(args.vocab_path)
logging.info("loading vocabulary...")
with open(args.vocab_path, "rb") as f:
vocab = pickle.load(f)
timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
save_basename = timestamp + "-{}".format(args.name)
save_dir = os.path.join(args.save_dir, save_basename)
if not os.path.exists(save_dir):
os.makedirs(save_dir, exist_ok=True)
logging.info("initializing model...")
model_cls = DataParallelSkipThoughts
model = model_cls(vocab, args.word_dim, args.hidden_dim,
reverse_encoder=args.reverse_encoder,
encoder_cell=args.encoder_cell,
decoder_cell=args.decoder_cell,
n_decoders=n_decoders,
n_layers=args.layers,
dropout_prob=args.dropout_prob,
batch_first=args.batch_first,
conditional_decoding=args.conditional_decoding,
encoder_direction=args.encoder_direction)
model.reset_parameters()
n_params = count_parameters(model)
logging.info("number of params: {}".format(n_params))
logging.info("loading word embeddings...")
assert args.wordembed_processes >= 1, \
"number of processes must be larger than or equal to 1."
if args.wordembed_processes > 1:
def embedding_loader(path, word_dim):
return load_embeddings_mp(path, word_dim,
processes=args.wordembed_processes)
else:
embedding_loader = load_embeddings
if args.wordembed_type == "glove":
embeddings = embedding_loader(args.wordembed_path, model.word_dim)
preinitialize_embeddings(model, vocab, embeddings)
elif args.wordembed_type == "fasttext":
fasttext_path = args.fasttext_path
assert fasttext_path is not None, \
"fasttext_path must specified when embed_type is fasttext."
embeddings = load_fasttext_embeddings(vocab.words,
fasttext_path,
args.wordembed_path)
preinitialize_embeddings(model, vocab, embeddings)
elif args.wordembed_type == "none":
pass
else:
raise ValueError("Unrecognized word embedding type: {}".format(
args.wordembed_type
))
if args.wordembed_freeze:
model.embeddings.weight.requires_grad = False
if args.visualizer is None:
logger = DummyTrainLogger()
elif args.visualizer == "tensorboard":
from tensorboard import SummaryWriter
logger = TensorboardTrainLogger(save_dir)
elif args.visualizer == "visdom":
from visdom_pooled import Visdom
viz_pool = mp.ThreadPool(1, initializer=init_viz, initargs=(tuple(), dict(
buffer_size=args.visdom_buffer_size,
server="http://{}".format(args.visdom_host),
port=args.visdom_port,
env=args.name,
name=timestamp
)))
logger = VisdomTrainLogger(viz_pool)
else:
raise ValueError("Unrecognized visualizer type: {}".format(args.visualizer))
logger.add_text("Arguments", str(args)[10:-1].replace(", ", "\n"))
config_path = os.path.join(save_dir, os.path.basename(args.config))
shutil.copy(args.config, config_path)
logging.info("preparing training environment...")
# Refer to torchtextutils.ContextDataGenerator
batch_size = args.batch_size + args.before + args.after
data_generator = DataGenerator(
data_paths=args.data_path,
vocab=vocab,
omit_prob=args.omit_prob,
swap_prob=args.swap_prob,
batch_size=batch_size,
max_len=args.max_len,
n_before=args.before,
n_after=args.after,
predict_self=args.predict_self,
shuffle_files=True,
batch_first=args.batch_first,
pin_memory=True,
allow_residual=False
)
trainer = Trainer(
model=model,
gpu_devices=args.gpus,
data_generator=data_generator,
n_epochs=args.epochs,
ckpt_format=args.ckpt_name,
logger=logger,
save_dir=save_dir,
save_period=args.save_period,
val_period=args.val_period,
previews=args.previews,
batch_first=args.batch_first
)
logging.info("training...")
trainer.train()
logging.info("done!")
if __name__ == '__main__':
main()
| mit |
deepgram/kur | kur/optimizer/sgd.py | 1 | 2603 | """
Copyright 2017 Deepgram
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from . import Optimizer, keras_clip, keras_wrap
logger = logging.getLogger(__name__)
###############################################################################
class SGD(Optimizer):
""" Stochastic gradient descent optimizer
"""
###########################################################################
def __init__(self, learning_rate=None, momentum=None, decay=None,
nesterov=None, *args, **kwargs):
""" Create a new Adam optimizer.
# Arguments
learning_rate: float. The learning rate to use.
momentum: float. Momentum for parameter updates
decay: float learning rate decay over each update
nesterov: bool. Whether or not to apply Nesterov momentum.
"""
super().__init__(*args, **kwargs)
self.learning_rate = learning_rate or 0.01
self.momentum = momentum or 0.0
self.decay = decay or 0.0
self.nesterov = nesterov or False
self.optimizer = None
###########################################################################
def get_optimizer(self, backend):
""" Returns a backend-specific instantiation of the optimizer.
"""
if backend.get_name() == 'keras':
import keras.optimizers as O # pylint: disable=import-error
self.optimizer = self.optimizer or O.SGD(
lr=self.learning_rate,
momentum=self.momentum,
decay=self.decay,
nesterov=self.nesterov,
**keras_clip(self)
)
return keras_wrap(self.optimizer)
elif backend.get_name() == 'pytorch':
import torch.optim as optim # pylint: disable=import-error
if self.nesterov:
logger.warning('The PyTorch backend does not use Nesterov '
'momentum. Ignoring it...')
if self.optimizer is None:
self.optimizer = lambda params: optim.SGD(
params,
lr=self.learning_rate,
momentum=self.momentum,
weight_decay=self.decay
)
return self.optimizer
else:
raise ValueError('Unsupported backend "{}" for optimizer "{}"'
.format(backend.get_name(), self.get_name()))
### EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF.EOF
| apache-2.0 |
kjchalup/dtit | fcit/fcit.py | 1 | 7474 | """ A fast conditional independence test.
This implementation uses the joblib library to parallelize test
statistic computation over all available cores. By default, num_perm=8
(instead of num_perm=10 in the non-parallel version) as 8 cores is a
common number on current architectures.
Reference:
Chalupka, Krzysztof and Perona, Pietro and Eberhardt, Frederick, 2017.
"""
import os
import time
import joblib
import numpy as np
from scipy.stats import ttest_1samp
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import ShuffleSplit
from sklearn.random_projection import GaussianRandomProjection
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error as mse
def interleave(x, z, seed=None):
""" Interleave x and z dimension-wise.
Args:
x (n_samples, x_dim) array.
z (n_samples, z_dim) array.
Returns
An array of shape (n_samples, x_dim + z_dim) in which
the columns of x and z are interleaved at random.
"""
state = np.random.get_state()
np.random.seed(seed or int(time.time()))
total_ids = np.random.permutation(x.shape[1]+z.shape[1])
np.random.set_state(state)
out = np.zeros([x.shape[0], x.shape[1] + z.shape[1]])
out[:, total_ids[:x.shape[1]]] = x
out[:, total_ids[x.shape[1]:]] = z
return out
def cv_besttree(x, y, z, cv_grid, logdim, verbose, prop_test):
""" Choose the best decision tree hyperparameters by
cross-validation. The hyperparameter to optimize is min_samples_split
(see sklearn's DecisionTreeRegressor).
Args:
x (n_samples, x_dim): Input data array.
y (n_samples, y_dim): Output data array.
z (n_samples, z_dim): Optional auxiliary input data.
cv_grid (list of floats): List of hyperparameter values to try.
logdim (bool): If True, set max_features to 'log2'.
verbose (bool): If True, print out extra info.
prop_test (float): Proportion of validation data to use.
Returns:
DecisionTreeRegressor with the best hyperparameter setting.
"""
xz_dim = x.shape[1] + z.shape[1]
max_features='log2' if (logdim and xz_dim > 10) else None
if cv_grid is None:
min_samples_split = 2
elif len(cv_grid) == 1:
min_samples_split = cv_grid[0]
else:
clf = DecisionTreeRegressor(max_features=max_features)
splitter = ShuffleSplit(n_splits=3, test_size=prop_test)
cv = GridSearchCV(estimator=clf, cv=splitter,
param_grid={'min_samples_split': cv_grid}, n_jobs=-1)
cv.fit(interleave(x, z), y)
min_samples_split = cv.best_params_['min_samples_split']
if verbose:
print('min_samples_split: {}.'.format(min_samples_split))
clf = DecisionTreeRegressor(max_features=max_features,
min_samples_split=min_samples_split)
return clf
def obtain_error(data_and_i):
"""
A function used for multithreaded computation of the fcit test statistic.
data['x']: First variable.
data['y']: Second variable.
data['z']: Conditioning variable.
data['data_permutation']: Permuted indices of the data.
data['perm_ids']: Permutation for the bootstrap.
data['n_test']: Number of test points.
data['clf']: Decision tree regressor.
"""
data, i = data_and_i
x = data['x']
y = data['y']
z = data['z']
if data['reshuffle']:
perm_ids = np.random.permutation(x.shape[0])
else:
perm_ids = np.arange(x.shape[0])
data_permutation = data['data_permutation'][i]
n_test = data['n_test']
clf = data['clf']
x_z = interleave(x[perm_ids], z, seed=i)
clf.fit(x_z[data_permutation][n_test:], y[data_permutation][n_test:])
return mse(y[data_permutation][:n_test],
clf.predict(x_z[data_permutation][:n_test]))
def test(x, y, z=None, num_perm=8, prop_test=.1,
discrete=(False, False), plot_return=False, verbose=False,
logdim=False, cv_grid=[2, 8, 64, 512, 1e-2, .2, .4], **kwargs):
""" Fast conditional independence test, based on decision-tree regression.
See Chalupka, Perona, Eberhardt 2017 [arXiv link coming].
Args:
x (n_samples, x_dim): First variable.
y (n_samples, y_dim): Second variable.
z (n_samples, z_dim): Conditioning variable. If z==None (default),
then performs an unconditional independence test.
num_perm: Number of data permutations to estimate
the p-value from marginal stats.
prop_test (int): Proportion of data to evaluate test stat on.
discrete (bool, bool): Whether x or y are discrete.
plot_return (bool): If True, return statistics useful for plotting.
verbose (bool): Print out progress messages (or not).
logdim (bool): If True, set max_features='log2' in the decision tree.
cv_grid (list): min_impurity_splits to cross-validate when training
the decision tree regressor.
Returns:
p (float): The p-value for the null hypothesis
that x is independent of y.
"""
# Compute test set size.
n_samples = x.shape[0]
n_test = int(n_samples * prop_test)
if z is None:
z = np.empty([n_samples, 0])
if discrete[0] and not discrete[1]:
# If x xor y is discrete, use the continuous variable as input.
x, y = y, x
elif x.shape[1] < y.shape[1]:
# Otherwise, predict the variable with fewer dimensions.
x, y = y, x
# Normalize y to make the decision tree stopping criterion meaningful.
y = StandardScaler().fit_transform(y)
# Set up storage for true data and permuted data MSEs.
d0_stats = np.zeros(num_perm)
d1_stats = np.zeros(num_perm)
data_permutations = [
np.random.permutation(n_samples) for i in range(num_perm)]
# Compute mses for y = f(x, z), varying train-test splits.
clf = cv_besttree(x, y, z, cv_grid, logdim, verbose, prop_test=prop_test)
datadict = {
'x': x,
'y': y,
'z': z,
'data_permutation': data_permutations,
'n_test': n_test,
'reshuffle': False,
'clf': clf,
}
d1_stats = np.array(joblib.Parallel(n_jobs=-1, max_nbytes=100e6)(
joblib.delayed(obtain_error)((datadict, i)) for i in range(num_perm)))
# Compute mses for y = f(x, reshuffle(z)), varying train-test splits.
if z.shape[1] == 0:
x_indep_y = x[np.random.permutation(n_samples)]
else:
x_indep_y = np.empty([x.shape[0], 0])
clf = cv_besttree(x_indep_y, y, z, cv_grid, logdim,
verbose, prop_test=prop_test)
datadict['reshuffle'] = True
datadict['x'] = x_indep_y
d0_stats = np.array(joblib.Parallel(n_jobs=-1, max_nbytes=100e6)(
joblib.delayed(obtain_error)((datadict, i)) for i in range(num_perm)))
if verbose:
np.set_printoptions(precision=3)
print('D0 statistics: {}'.format(d0_stats))
print('D1 statistics: {}\n'.format(d1_stats))
# Compute the p-value (one-tailed t-test
# that mean of mse ratios equals 1).
t, p_value = ttest_1samp(d0_stats / d1_stats, 1)
if t < 0:
p_value = 1 - p_value / 2
else:
p_value = p_value / 2
if plot_return:
return (p_value, d0_stats, d1_stats)
else:
return p_value
| mit |
lmcinnes/umap | umap/tests/test_umap_on_iris.py | 1 | 9572 | from umap import UMAP
from umap.umap_ import nearest_neighbors
from scipy import sparse
import numpy as np
from sklearn.cluster import KMeans
from sklearn.metrics import adjusted_rand_score
from sklearn.neighbors import KDTree
from scipy.spatial.distance import cdist, pdist, squareform
import pytest
import warnings
try:
# works for sklearn>=0.22
from sklearn.manifold import trustworthiness
except ImportError:
# this is to comply with requirements (scikit-learn>=0.20)
# More recent versions of sklearn have exposed trustworthiness
# in top level module API
# see: https://github.com/scikit-learn/scikit-learn/pull/15337
from sklearn.manifold.t_sne import trustworthiness
# ===================================================
# UMAP Test cases on IRIS Dataset
# ===================================================
# UMAP Trustworthiness on iris
# ----------------------------
def test_umap_trustworthiness_on_iris(iris, iris_model):
embedding = iris_model.embedding_
trust = trustworthiness(iris.data, embedding, n_neighbors=10)
assert (
trust >= 0.97
), "Insufficiently trustworthy embedding for" "iris dataset: {}".format(trust)
def test_initialized_umap_trustworthiness_on_iris(iris):
data = iris.data
embedding = UMAP(
n_neighbors=10,
min_dist=0.01,
init=data[:, 2:],
n_epochs=200,
random_state=42,
).fit_transform(data)
trust = trustworthiness(iris.data, embedding, n_neighbors=10)
assert (
trust >= 0.97
), "Insufficiently trustworthy embedding for" "iris dataset: {}".format(trust)
def test_umap_trustworthiness_on_sphere_iris(
iris,
):
data = iris.data
embedding = UMAP(
n_neighbors=10,
min_dist=0.01,
n_epochs=200,
random_state=42,
output_metric="haversine",
).fit_transform(data)
# Since trustworthiness doesn't support haversine, project onto
# a 3D embedding of the sphere and use cosine distance
r = 3
projected_embedding = np.vstack(
[
r * np.sin(embedding[:, 0]) * np.cos(embedding[:, 1]),
r * np.sin(embedding[:, 0]) * np.sin(embedding[:, 1]),
r * np.cos(embedding[:, 0]),
]
).T
trust = trustworthiness(
iris.data, projected_embedding, n_neighbors=10, metric="cosine"
)
assert (
trust >= 0.70
), "Insufficiently trustworthy spherical embedding for iris dataset: {}".format(
trust
)
# UMAP Transform on iris
# ----------------------
def test_umap_transform_on_iris(iris, iris_subset_model, iris_selection):
fitter = iris_subset_model
new_data = iris.data[~iris_selection]
embedding = fitter.transform(new_data)
trust = trustworthiness(new_data, embedding, n_neighbors=10)
assert (
trust >= 0.85
), "Insufficiently trustworthy transform for" "iris dataset: {}".format(trust)
def test_umap_transform_on_iris_w_pynndescent(iris, iris_selection):
data = iris.data[iris_selection]
fitter = UMAP(
n_neighbors=10,
min_dist=0.01,
n_epochs=100,
random_state=42,
force_approximation_algorithm=True,
).fit(data)
new_data = iris.data[~iris_selection]
embedding = fitter.transform(new_data)
trust = trustworthiness(new_data, embedding, n_neighbors=10)
assert (
trust >= 0.85
), "Insufficiently trustworthy transform for" "iris dataset: {}".format(trust)
def test_umap_transform_on_iris_modified_dtype(iris, iris_subset_model, iris_selection):
fitter = iris_subset_model
fitter.embedding_ = fitter.embedding_.astype(np.float64)
new_data = iris.data[~iris_selection]
embedding = fitter.transform(new_data)
trust = trustworthiness(new_data, embedding, n_neighbors=10)
assert (
trust >= 0.8
), "Insufficiently trustworthy transform for iris dataset: {}".format(trust)
def test_umap_sparse_transform_on_iris(iris, iris_selection):
data = sparse.csr_matrix(iris.data[iris_selection])
assert sparse.issparse(data)
fitter = UMAP(
n_neighbors=10,
min_dist=0.01,
random_state=42,
n_epochs=100,
# force_approximation_algorithm=True,
).fit(data)
new_data = sparse.csr_matrix(iris.data[~iris_selection])
assert sparse.issparse(new_data)
embedding = fitter.transform(new_data)
trust = trustworthiness(new_data, embedding, n_neighbors=10)
assert (
trust >= 0.80
), "Insufficiently trustworthy transform for" "iris dataset: {}".format(trust)
# UMAP precomputed metric transform on iris
# ----------------------
def test_precomputed_transform_on_iris(iris, iris_selection):
data = iris.data[iris_selection]
distance_matrix = squareform(pdist(data))
fitter = UMAP(
n_neighbors=10,
min_dist=0.01,
random_state=42,
n_epochs=100,
metric="precomputed",
).fit(distance_matrix)
new_data = iris.data[~iris_selection]
new_distance_matrix = cdist(new_data, data)
embedding = fitter.transform(new_distance_matrix)
trust = trustworthiness(new_data, embedding, n_neighbors=10)
assert (
trust >= 0.85
), "Insufficiently trustworthy transform for" "iris dataset: {}".format(trust)
# UMAP precomputed metric transform on iris with sparse distances
# ----------------------
def test_precomputed_sparse_transform_on_iris(iris, iris_selection):
data = iris.data[iris_selection]
distance_matrix = sparse.csr_matrix(squareform(pdist(data)))
fitter = UMAP(
n_neighbors=10,
min_dist=0.01,
random_state=42,
n_epochs=100,
metric="precomputed",
).fit(distance_matrix)
new_data = iris.data[~iris_selection]
new_distance_matrix = sparse.csr_matrix(cdist(new_data, data))
embedding = fitter.transform(new_distance_matrix)
trust = trustworthiness(new_data, embedding, n_neighbors=10)
assert (
trust >= 0.85
), "Insufficiently trustworthy transform for" "iris dataset: {}".format(trust)
# UMAP Clusterability on Iris
# ---------------------------
def test_umap_clusterability_on_supervised_iris(supervised_iris_model, iris):
embedding = supervised_iris_model.embedding_
clusters = KMeans(3).fit_predict(embedding)
assert adjusted_rand_score(clusters, iris.target) >= 0.95
# UMAP Inverse transform on Iris
# ------------------------------
def test_umap_inverse_transform_on_iris(iris, iris_model):
highd_tree = KDTree(iris.data)
fitter = iris_model
lowd_tree = KDTree(fitter.embedding_)
for i in range(1, 150, 20):
query_point = fitter.embedding_[i]
near_points = lowd_tree.query([query_point], k=5, return_distance=False)
centroid = np.mean(np.squeeze(fitter.embedding_[near_points]), axis=0)
highd_centroid = fitter.inverse_transform([centroid])
highd_near_points = highd_tree.query(
highd_centroid, k=10, return_distance=False
)
assert np.intersect1d(near_points, highd_near_points[0]).shape[0] >= 3
def test_precomputed_knn_on_iris(iris, iris_selection, iris_subset_model):
# this to compare two similarity graphs which should be nearly the same
def rms(a, b):
return np.sqrt(np.mean(np.square(a - b)))
data = iris.data[iris_selection]
new_data = iris.data[~iris_selection]
knn = nearest_neighbors(
data,
n_neighbors=10,
metric="euclidean",
metric_kwds=None,
angular=False,
random_state=42,
)
# repeated UMAP arguments we don't want to mis-specify
umap_args = dict(
n_neighbors=iris_subset_model.n_neighbors,
random_state=iris_subset_model.random_state,
min_dist=iris_subset_model.min_dist,
)
# force_approximation_algorithm parameter is ignored when a precomputed knn is used
fitter_with_precomputed_knn = UMAP(
**umap_args,
precomputed_knn=knn,
force_approximation_algorithm=False,
).fit(data)
# embeddings and similarity graph are NOT the same due to choices of nearest
# neighbor in non-exact case: similarity graph is most stable for comparing output
# threshold for similarity in graph empirically chosen by comparing the iris subset
# model with force_approximation_algorithm=True and different random seeds
assert rms(fitter_with_precomputed_knn.graph_, iris_subset_model.graph_) < 0.005
with pytest.warns(Warning, match="transforming new data") as record:
fitter_ignoring_force_approx = UMAP(
**umap_args,
precomputed_knn=(knn[0], knn[1]),
).fit(data)
assert len(record) == 1
np.testing.assert_array_equal(
fitter_ignoring_force_approx.embedding_, fitter_with_precomputed_knn.embedding_
)
# #848 (continued): if you don't have a search index, attempting to transform
# will raise an error
with pytest.raises(NotImplementedError, match="search index"):
_ = fitter_ignoring_force_approx.transform(new_data)
# force_approximation_algorithm parameter is ignored
with pytest.warns(Warning, match="transforming new data") as record:
fitter_ignoring_force_approx_True = UMAP(
**umap_args,
precomputed_knn=(knn[0], knn[1]),
force_approximation_algorithm=True,
).fit(data)
assert len(record) == 1
np.testing.assert_array_equal(
fitter_ignoring_force_approx_True.embedding_, fitter_ignoring_force_approx.embedding_
)
| bsd-3-clause |
openai/triton | python/triton/ops/blocksparse/matmul.py | 1 | 15613 | import torch
import triton
import triton.language as tl
# ********************************************************
# --------------------------------------------------------
# Sparse = Dense x Dense (SDD)
# This operation uses super-blocking to make sure that
# it's done efficiently when small blocks can be grouped
# together
# --------------------------------------------------------
# ********************************************************
@triton.heuristics({
'EVEN_K': lambda nargs: nargs['K'] % nargs['TILE_K'] == 0,
})
@triton.jit
def _sdd_kernel(
A, B, C,
stride_za, stride_ha, stride_ma, stride_ak,
stride_zb, stride_hb, stride_bk, stride_nb,
stride_zc, stride_hc, stride_mc, stride_nc,
K, grid_offset, lut,
TILE_M: tl.constexpr, TILE_N: tl.constexpr, TILE_K: tl.constexpr,
BLOCK: tl.constexpr, EVEN_K: tl.constexpr
):
# ------------ #
# - Prologue - #
# ------------ #
block_id = tl.program_id(1) + grid_offset
lut += block_id * 3
# offsets
off_z = tl.program_id(2) # batch
off_h = tl.load(lut + 0) # head
# initialize pointers to A
start_am = tl.load(lut + 1)
offs_am = start_am * BLOCK + (tl.arange(0, TILE_M) % BLOCK)
offs_ak = tl.arange(0, TILE_K)
a_ptrs = A \
+ off_z * stride_za \
+ off_h * stride_ha \
+ offs_am[:, None] * stride_ma \
+ offs_ak[None, :] * stride_ak
# initialize pointers to B
start_bn = tl.load(lut + 2)
offs_bn = start_bn * BLOCK + (tl.arange(0, TILE_N) % BLOCK)
offs_bk = tl.arange(0, TILE_K)
b_ptrs = B \
+ off_z * stride_zb \
+ off_h * stride_hb \
+ offs_bn[None, :] * stride_nb \
+ offs_bk[:, None] * stride_bk
# ---------------- #
# Inner Loop #
# ---------------- #
acc = tl.zeros((TILE_M, TILE_N), dtype=tl.float32)
for k in range(K, 0, -TILE_K):
if EVEN_K:
a = tl.load(a_ptrs)
b = tl.load(b_ptrs)
else:
a = tl.load(a_ptrs, mask=offs_ak[None, :] < k, other=0.)
b = tl.load(b_ptrs, mask=offs_bk[:, None] < k, other=0.)
acc += tl.dot(a, b)
a_ptrs += TILE_K * stride_ak
b_ptrs += TILE_K * stride_bk
c = acc.to(C.dtype.element_ty)
# ---------------- #
# Epilogue #
# ---------------- #
offs_cm = tl.arange(0, TILE_M) % BLOCK
offs_cn = tl.arange(0, TILE_N) % BLOCK
pc = C \
+ off_z * stride_zc \
+ block_id * stride_hc \
+ offs_cm[:, None] * stride_mc \
+ offs_cn[None, :] * stride_nc
tl.store(pc, c, mask=True)
def sdd_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, lut, widths, out=None):
if a.stride(2) != 1 and a.stride(3) != 1:
a = a.contiguous()
if b.stride(2) != 1 and b.stride(3) != 1:
b = b.contiguous()
# (A * B)^T = B^T * A^T
if trans_c:
a, b = b, a
trans_a, trans_b = not trans_b, not trans_a
# shape constraints
a_dim = -2 if trans_a else -1
b_dim = -1 if trans_b else -2
Ka, Kb = a.shape[a_dim], b.shape[b_dim]
if Ka != Kb:
raise ValueError(f"Inner dimension mismatch (A: {Ka} vs B: {Kb})")
# allocate output
if out is None:
c = torch.empty((a.shape[0], lut.shape[0], block, block), dtype=a.dtype, device=a.device)
else:
assert out.shape == (a.shape[0], lut.shape[0], block, block)
c = out
grid = [1, c.shape[1], c.shape[0]]
_sdd_kernel[grid](
a, b, c,
a.stride(0), a.stride(1), a.stride(3 if trans_a else 2), a.stride(2 if trans_a else 3),
b.stride(0), b.stride(1), b.stride(3 if trans_b else 2), b.stride(2 if trans_b else 3),
c.stride(0), c.stride(1), c.stride(2), c.stride(3),
Ka, 0, lut,
TILE_M=block, TILE_N=block, TILE_K=32, BLOCK=block, num_stages=4,
num_warps=4,
)
return c
def sdd_lut(layout, block, device):
lut = layout.nonzero(as_tuple=False).to(device).int()
lut = lut.contiguous()
return lut, None
# -----------------------------
# Dense = Sparse x Dense (DSD)
# This operation uses a look-up table that contains pre-computed pointer increments
# in order to minimize computations in the inner loop of the matmul kernel.
# -----------------------------
@triton.jit
def _dsd_kernel(
A, B, C,
stride_az, stride_ha, stride_am, stride_ak,
stride_zb, stride_hb, stride_bk, stride_bn,
stride_zc, stride_hc, stride_cm, stride_cn,
DS0, DS1, lut,
TILE_M: tl.constexpr, TILE_N: tl.constexpr, TILE_K: tl.constexpr,
GROUP_SIZE_M: tl.constexpr, BLOCK: tl.constexpr
):
# ------------ #
# - Prologue - #
# ------------ #
pid_m = tl.program_id(0)
pid_n = tl.program_id(1)
num_pid_m = tl.num_programs(0)
num_pid_n = tl.num_programs(1)
pid_n, pid_m = tl.swizzle2d(pid_n, pid_m, num_pid_n, num_pid_m, GROUP_SIZE_M)
pidz = tl.program_id(2)
header = lut + pid_n * 4
offset = tl.load(header + 0)
K = tl.load(header + 1)
column = tl.load(header + 2)
off_h = tl.load(header + 3)
pinc = lut + offset
# initialize pointers to A (sparse)
block_id = tl.load(pinc + 1)
block_id = tl.multiple_of(block_id, 8) # compiler hint
offs_am = tl.arange(0, TILE_M)
offs_ak = tl.arange(0, TILE_K)
pa = A + pidz * stride_az \
+ block_id * stride_ha \
+ offs_am[:, None] * stride_am \
+ offs_ak[None, :] * stride_ak
# initialize pointers to B (dense)
offs_bn = pid_m * TILE_N + tl.arange(0, TILE_N)
offs_bn = tl.max_contiguous(tl.multiple_of(offs_bn % DS0, TILE_N), TILE_N)
start_bk = tl.load(pinc)
start_bk = tl.multiple_of(start_bk, 8) # compiler hint
offs_bk = start_bk + tl.arange(0, TILE_K)
pb = B + pidz * stride_zb \
+ off_h * stride_hb \
+ offs_bn[None, :] * stride_bn \
+ offs_bk[:, None] * stride_bk
# ---------------- #
# Inner Loop #
# ---------------- #
acc = tl.zeros((TILE_M, TILE_N), dtype=tl.float32)
pinc += 2
inc_a = tl.load(pinc + 1)
inc_a = tl.multiple_of(inc_a, 8)
inc_b = tl.load(pinc)
inc_b = tl.multiple_of(inc_b, 8)
for k in range(K, 0, -TILE_K):
a = tl.load(pa, mask=True)
b = tl.load(pb, mask=offs_bn[None, :] < DS0)
acc += tl.dot(a, b)
pa += inc_a
pb += inc_b * stride_bk
pinc += 2
inc_a = tl.load(pinc + 1)
inc_a = tl.multiple_of(inc_a, 8)
inc_b = tl.load(pinc)
inc_b = tl.multiple_of(inc_b, 8)
c = acc.to(C.dtype.element_ty)
# initialize pointers to C
offs_cm = column * TILE_M + tl.arange(0, TILE_M)
offs_cn = pid_m * TILE_N + tl.arange(0, TILE_N)
pc = C \
+ off_h * stride_hc \
+ pidz * stride_zc \
+ offs_cm[:, None] * stride_cm \
+ offs_cn[None, :] * stride_cn
tl.store(pc, c, mask=offs_cn[None, :] < DS0)
def dsd_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, lut, width, out=None):
if a.stride(2) != 1 and a.stride(3) != 1:
a = a.contiguous()
if b.stride(2) != 1 and b.stride(3) != 1:
b = b.contiguous()
# shapes / dtypes
AS1 = block * spdims[2 if trans_a else 1]
BS0 = b.size(0)
BS1 = b.size(1)
BS3 = b.size(2 if trans_b else 3)
dtype = a.dtype
# allocate output
CS0 = BS0
CS1 = BS1
CS2 = BS3 if trans_c else AS1
CS3 = AS1 if trans_c else BS3
if out is None:
c = torch.empty((CS0, CS1, CS2, CS3), dtype=dtype, device=a.device)
else:
assert out.shape == (CS0, CS1, CS2, CS3)
c = out
# meta-parameter heuristics
TILE_N = 128
# compute output
grid = lambda meta: [triton.cdiv(BS3, meta['TILE_N']), width, BS0]
_dsd_kernel[grid](
a, b, c,
a.stride(0), a.stride(1), a.stride(3 if trans_a else 2), a.stride(2 if trans_a else 3),
b.stride(0), b.stride(1), b.stride(3 if trans_b else 2), b.stride(2 if trans_b else 3),
c.stride(0), c.stride(1), c.stride(3 if trans_c else 2), c.stride(2 if trans_c else 3),
BS3, AS1, lut,
TILE_M=block, TILE_N=TILE_N, TILE_K=min(block, 32), BLOCK=block, num_stages=4,
num_warps=4, GROUP_SIZE_M=4,
)
# exit()
return c
def dsd_lut(layout, block, step, trans, device):
"""
Generates the look-up table for incrementing pointers in the DSD/DDS matmul.
Example (BLOCK=32, STEP=16)
[[1, 0, 0, 1, 0],
[0, 1, 1, 0, 1],
[1, 0, 1, 0, 0]]
Then the offsets for A are
[0 , 16, 32, 48] <- row 0
\\----/ \\----/
col=0 col=3
[64, 80, 96, 112, 128, 144] <- row 1
\\----/ \\----/ \\------/
col=1 col=2 col=3
[160, 176, 192, 208]
which leads to increments table
[0, 16, 16, 16, || 64, 16, 16, 16, 16, 16, || 160, 16, 16, 16]
Because B is dense, the offsets are
[0, 16, 96, 112] <- row 0
[32, 48, 64, 80] <- row 1
[0, 16, 64, 80] <- row 2
"""
sizes = torch.sum(layout, 2 if trans else 1)
head_id, col_id = torch.ones_like(sizes).nonzero(as_tuple=True)
sizes = sizes.flatten()
segments = sizes * step
# pointer increments
if trans:
nnz = layout.nonzero(as_tuple=False)
else:
nnz = layout.transpose(1, 2).nonzero(as_tuple=False)
num_blocks = nnz.size(0)
offsets = torch.zeros_like(sizes)
offsets[1:] = torch.cumsum(sizes[:-1], dim=0)
offsets = torch.min(offsets, (num_blocks - 1) * torch.ones_like(offsets))
# -------------------------------
# dense input pointer increments
# -------------------------------
# Note that the inner loop matmul kernel may have a fixed step size (e.g., TILE_K)
# that is smaller than the block size, so we need to do a bit of extra work
# to handle this case
B_idx = nnz[:, 2] * block
B_incs = B_idx.clone()
B_incs[1:] -= B_idx[:-1]
div = block // step
B_incs = B_incs.view(-1, 1).repeat(1, div)
B_incs[:, 1:] = step
B_incs[:, 0] -= (div - 1) * step
# first increment for each reduction is actually the offset
B_incs[offsets[segments > 0], 0] = B_idx[offsets[segments > 0]]
B_incs = B_incs.view(-1)
# -------------------------------
# sparse input pointer increments
# -------------------------------
# same as above, except that the increments are in the sparse memory layout
if trans:
A_idx = torch.arange(num_blocks, device=layout.device)
else:
A_idx = torch.tensor([], dtype=torch.int64, device=layout.device)
current_offset = 0
for z in range(layout.size(0)):
layoutw = layout[z, :, :].clone().long()
msum = layoutw.sum()
layoutw[layoutw > 0] = 1 + torch.arange(msum, device=layout.device)
A_idx = torch.cat((A_idx, current_offset + layoutw.T[layoutw.T > 0] - 1))
current_offset += msum
A_incs = A_idx * block * block
A_incs[1:] -= A_idx[:-1] * block * block
A_incs = A_incs.view(-1, 1).repeat(1, div)
if trans:
A_incs[:, 1:] = step
A_incs[:, 0] -= (div - 1) * step
else:
A_incs[:, 1:] = step * block
A_incs[:, 0] -= (div - 1) * step * block
A_incs[offsets[segments > 0], 0] = A_idx[offsets[segments > 0]]
A_incs = A_incs.view(-1)
# create header
width = col_id.size(0)
offsets = offsets * 2 * div + 4 * width
segments = segments * div
header = torch.stack((offsets, segments, col_id, head_id), dim=1).view(-1).contiguous()
# create increments
incs = torch.stack((B_incs, A_incs), dim=1).view(-1).contiguous()
# pad by a factor 2*MAX_NUM_STAGES
# to accommodate pre-fetching inside the kernel
pad = torch.zeros(20, device=incs.device, dtype=incs.dtype)
incs = torch.cat((incs, pad))
# create lut
lut = torch.cat((header, incs))
lut = lut.type(torch.int32).to(device)
# create locks
return lut, width
# -----------------------------
# Dense = Dense x Sparse (DDS)
# -----------------------------
# AB = (B^T A^T)^T
def dds_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, lut, width, out=None):
return dsd_matmul(b, a, not trans_b, not trans_a, not trans_c, spdims, block, lut, width, out=out)
##############
# MAIN API #
##############
class _matmul(torch.autograd.Function):
fn = {'sdd': sdd_matmul, 'dsd': dsd_matmul, 'dds': dds_matmul}
@staticmethod
def forward(
ctx, a, b, trans_a, trans_b, trans_c, mode, spdims, block,
c_lut, c_width, da_lut, da_width, db_lut, db_width, out
):
c = _matmul.fn[mode](a, b, trans_a, trans_b, trans_c, spdims, block, c_lut, c_width, out=out)
# save for backward
ctx.save_for_backward(a, b)
ctx.da_lut = da_lut
ctx.da_width = da_width
ctx.db_lut = db_lut
ctx.db_width = db_width
ctx.mode = mode
ctx.spdims = spdims
ctx.block = block
ctx.trans_a = trans_a
ctx.trans_b = trans_b
ctx.trans_c = trans_c
ctx.has_out = out is not None
return c
@staticmethod
def backward(ctx, dc):
# saved for backward
a, b = ctx.saved_tensors
da, db = None, None
mode = ctx.mode
# gradients w.r.t. a
if ctx.needs_input_grad[0]:
mode_da = mode[1] + mode[0] + mode[2]
da = _matmul.fn[mode_da](
dc, b, ctx.trans_c, not ctx.trans_b, ctx.trans_a, ctx.spdims, ctx.block, ctx.da_lut, ctx.da_width,
)
# gradients w.r.t. b
if ctx.needs_input_grad[1]:
mode_db = mode[2] + mode[1] + mode[0]
db = _matmul.fn[mode_db](
a, dc, not ctx.trans_a, ctx.trans_c, ctx.trans_b, ctx.spdims, ctx.block, ctx.db_lut, ctx.db_width,
)
dout = dc if ctx.has_out else None
return da, db, None, None, None,\
None, None, None, None,\
None, None, None, None, None, dout
class matmul:
def __init__(self, layout, block, mode, device, trans_a=False, trans_b=False, trans_c=False):
if mode not in ['sdd', 'dsd', 'dds']:
raise NotImplementedError('Supported modes are: sdd, dsd, dds')
self.block = block
self.mode = mode
self.trans_a = trans_a
self.trans_b = trans_b
self.trans_c = trans_c
self.layout = layout
self.spdims = layout.shape
step = min(block, 32)
if self.mode == 'sdd':
self.c_lut, self.c_width = sdd_lut(layout, block, device)
self.da_lut, self.da_width = dsd_lut(layout, block, step, True, device)
self.db_lut, self.db_width = dsd_lut(layout, block, step, False, device)
if self.mode == 'dsd':
self.c_lut, self.c_width = dsd_lut(layout, block, step, not self.trans_a, device)
self.da_lut, self.da_width = sdd_lut(layout, block, device)
self.db_lut, self.db_width = dsd_lut(layout, block, step, self.trans_a, device)
if self.mode == 'dds':
self.c_lut, self.c_width = dsd_lut(layout, block, step, self.trans_b, device)
self.da_lut, self.da_width = dsd_lut(layout, block, step, not self.trans_b, device)
self.db_lut, self.db_width = sdd_lut(layout, block, device)
def __call__(self, a, b, out=None):
c = _matmul.apply(
a, b, self.trans_a, self.trans_b, self.trans_c, self.mode, self.spdims, self.block,
self.c_lut, self.c_width,
self.da_lut, self.da_width,
self.db_lut, self.db_width,
out
)
return c
| mit |
raticate/AS-RANK | 2_store_as_relationships.py | 1 | 5499 | import os, sys, MySQLdb
from datetime import datetime, timedelta
import time
import datetime as dt
db = MySQLdb.connect(host="localhost", user="root", passwd="", db="ASRank")
cur = db.cursor()
## download all the relationship if needed
link = """data.caida.org/datasets/as-relationships/"""
command = """wget --no-parent -r """ +link
print '\n download list of files :', command
os.system(command)
## Load the list of treated files :
list_treated_files = []
try:
with open('list_of_treated_files_rel.txt', 'r') as fg:
for line in fg:
line= str(line).strip()
if line not in list_treated_files:
list_treated_files.append(str(line).strip())
except:
with open('list_of_treated_files_rel.txt', 'a') as fk:
print
## Current date
A = str(datetime.now() + timedelta(days=-1))
table = A.split(' ')
date_info = table[0].split('-')
date_info_end = [int(date_info[0]), int(date_info[1]) ]
#print date_info_end
date_info_start = [1998, 01]
k_year = date_info_start[0]
k_month = date_info_start[1]
while (k_year <= date_info_end[0]) :
if k_month >9:
elmt = str(k_year) + str(k_month) + '01'
else:
elmt = str(k_year) + '0' + str(k_month) + '01'
if k_month == 12:
k_month = 1
k_year +=1
elif k_month<12:
k_month +=1
#output = ['.as-rel.txt.gz', '.ppdc-ases.txt.gz']
output = ['.as-rel.txt.gz', '.as-rel.txt.bz2']
for ext in output:
current = elmt + ext
if str(current).strip() not in list_treated_files:
print
current_timestamp = int(elmt)
print current_timestamp
#Select data from the as-relationship
sql_command = """select AS1, AS2, relation from ASRelationships where enddate is NULL and IPversion = 4;"""
cur.execute(sql_command)
stored_AS_relationships = cur.fetchall()
stored_AS_relationships_list = []
for link in stored_AS_relationships:
stored_AS_relationships_list.append(str(link[0]).strip() + '|' + str(link[1]).strip() + '|' + str(link[2]).strip())
#print stored_AS_relationships
print 'len_before_sup = ', len(stored_AS_relationships_list) #, stored_AS_relationships_list
#time.sleep(10)
print 'I am parsing ', current
file = 'data.caida.org/datasets/as-relationships/serial-1/' + current
print 'current_date =', current_timestamp
## dezip file
try:
if '.as-rel.txt.gz' in file and os.path.isfile(file):
command = 'gzip -d ' + file
check = file[:-3]
os.system(command)
elif '.as-rel.txt.bz2' in file and os.path.isfile(file) :
command = 'bunzip2 ' + file
check = file[:-4]
os.system(command)
except:
print 'no need to dezip'
if os.path.isfile(check):
with open (check, 'r') as fh:
for line1 in fh:
#print line1
tab = line1.split('|')
if len(tab) == 3:
test_vc = str(tab[0]).strip() + '|' + str(tab[1]).strip() + '|' + str(tab[2]).strip()
if test_vc not in stored_AS_relationships_list:
sql_command = """ INSERT IGNORE INTO ASRelationships (IPversion, AS1, AS2, relation, startdate) VALUES (%s, %s, %s, %s, %s); """
cur.execute(sql_command, (4, int(tab[0]), int(tab[1]), int(tab[2]), current_timestamp ))
db.commit()
elif test_vc in stored_AS_relationships_list:
print 'I found it so I suppressed ', test_vc
stored_AS_relationships_list.remove(test_vc)
print 'len_after_sup = ', len(stored_AS_relationships_list) #, stored_AS_relationships_list
for couple in stored_AS_relationships_list:
sql_command = """ UPDATE ASRelationships set enddate = %s where IPversion = %s and AS1 = %s and AS2 = %s and relation = %s and enddate is NULL; """
print couple
tab1 = couple.split('|')
print 'update for ', couple, tab1[0], tab1[1], tab1[2]
cur.execute(sql_command, (current_timestamp, 4, int(tab1[0]), int(tab1[1]), int(tab1[2])))
db.commit()
## after treatment put it into the list of treated file
with open('list_of_treated_files_rel.txt', 'a') as fh:
fh.write('%s \n' %(elmt+ext))
else:
print 'file ', check, ' not found in the folder; we pass '
else:
print 'do not treat ', elmt + ext
| mit |
irhete/predictive-monitoring-benchmark | experiments/optimize_params.py | 1 | 10764 | import EncoderFactory
from DatasetManager import DatasetManager
import BucketFactory
import pandas as pd
import numpy as np
from sklearn.metrics import roc_auc_score
from sklearn.pipeline import FeatureUnion, Pipeline
from sklearn.preprocessing import StandardScaler
import time
import os
import sys
from sys import argv
import pickle
from collections import defaultdict
from sklearn.ensemble import RandomForestClassifier
import xgboost as xgb
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from hyperopt import Trials, STATUS_OK, tpe, fmin, hp
import hyperopt
from hyperopt.pyll.base import scope
from hyperopt.pyll.stochastic import sample
def create_and_evaluate_model(args):
global trial_nr
trial_nr += 1
start = time.time()
score = 0
for cv_iter in range(n_splits):
dt_test_prefixes = dt_prefixes[cv_iter]
dt_train_prefixes = pd.DataFrame()
for cv_train_iter in range(n_splits):
if cv_train_iter != cv_iter:
dt_train_prefixes = pd.concat([dt_train_prefixes, dt_prefixes[cv_train_iter]], axis=0)
# Bucketing prefixes based on control flow
bucketer_args = {'encoding_method':bucket_encoding,
'case_id_col':dataset_manager.case_id_col,
'cat_cols':[dataset_manager.activity_col],
'num_cols':[],
'random_state':random_state}
if bucket_method == "cluster":
bucketer_args["n_clusters"] = args["n_clusters"]
bucketer = BucketFactory.get_bucketer(bucket_method, **bucketer_args)
bucket_assignments_train = bucketer.fit_predict(dt_train_prefixes)
bucket_assignments_test = bucketer.predict(dt_test_prefixes)
preds_all = []
test_y_all = []
if "prefix" in method_name:
scores = defaultdict(int)
for bucket in set(bucket_assignments_test):
relevant_train_cases_bucket = dataset_manager.get_indexes(dt_train_prefixes)[bucket_assignments_train == bucket]
relevant_test_cases_bucket = dataset_manager.get_indexes(dt_test_prefixes)[bucket_assignments_test == bucket]
dt_test_bucket = dataset_manager.get_relevant_data_by_indexes(dt_test_prefixes, relevant_test_cases_bucket)
test_y = dataset_manager.get_label_numeric(dt_test_bucket)
if len(relevant_train_cases_bucket) == 0:
preds = [class_ratios[cv_iter]] * len(relevant_test_cases_bucket)
else:
dt_train_bucket = dataset_manager.get_relevant_data_by_indexes(dt_train_prefixes, relevant_train_cases_bucket) # one row per event
train_y = dataset_manager.get_label_numeric(dt_train_bucket)
if len(set(train_y)) < 2:
preds = [train_y[0]] * len(relevant_test_cases_bucket)
else:
feature_combiner = FeatureUnion([(method, EncoderFactory.get_encoder(method, **cls_encoder_args)) for method in methods])
if cls_method == "rf":
cls = RandomForestClassifier(n_estimators=500,
max_features=args['max_features'],
random_state=random_state)
elif cls_method == "xgboost":
cls = xgb.XGBClassifier(objective='binary:logistic',
n_estimators=500,
learning_rate= args['learning_rate'],
subsample=args['subsample'],
max_depth=int(args['max_depth']),
colsample_bytree=args['colsample_bytree'],
min_child_weight=int(args['min_child_weight']),
seed=random_state)
elif cls_method == "logit":
cls = LogisticRegression(C=2**args['C'],
random_state=random_state)
elif cls_method == "svm":
cls = SVC(C=2**args['C'],
gamma=2**args['gamma'],
random_state=random_state)
if cls_method == "svm" or cls_method == "logit":
pipeline = Pipeline([('encoder', feature_combiner), ('scaler', StandardScaler()), ('cls', cls)])
else:
pipeline = Pipeline([('encoder', feature_combiner), ('cls', cls)])
pipeline.fit(dt_train_bucket, train_y)
if cls_method == "svm":
preds = pipeline.decision_function(dt_test_bucket)
else:
preds_pos_label_idx = np.where(cls.classes_ == 1)[0][0]
preds = pipeline.predict_proba(dt_test_bucket)[:,preds_pos_label_idx]
if "prefix" in method_name:
auc = 0.5
if len(set(test_y)) == 2:
auc = roc_auc_score(test_y, preds)
scores[bucket] += auc
preds_all.extend(preds)
test_y_all.extend(test_y)
score += roc_auc_score(test_y_all, preds_all)
if "prefix" in method_name:
for k, v in args.items():
for bucket, bucket_score in scores.items():
fout_all.write("%s;%s;%s;%s;%s;%s;%s;%s\n" % (trial_nr, dataset_name, cls_method, method_name, bucket, k, v, bucket_score / n_splits))
fout_all.write("%s;%s;%s;%s;%s;%s;%s;%s\n" % (trial_nr, dataset_name, cls_method, method_name, 0, "processing_time", time.time() - start, 0))
else:
for k, v in args.items():
fout_all.write("%s;%s;%s;%s;%s;%s;%s\n" % (trial_nr, dataset_name, cls_method, method_name, k, v, score / n_splits))
fout_all.write("%s;%s;%s;%s;%s;%s;%s\n" % (trial_nr, dataset_name, cls_method, method_name, "processing_time", time.time() - start, 0))
fout_all.flush()
return {'loss': -score / n_splits, 'status': STATUS_OK, 'model': cls}
dataset_ref = argv[1]
params_dir = argv[2]
n_iter = int(argv[3])
bucket_method = argv[4]
cls_encoding = argv[5]
cls_method = argv[6]
if bucket_method == "state":
bucket_encoding = "last"
else:
bucket_encoding = "agg"
method_name = "%s_%s"%(bucket_method, cls_encoding)
dataset_ref_to_datasets = {
"bpic2011": ["bpic2011_f%s"%formula for formula in range(1,5)],
"bpic2015": ["bpic2015_%s_f2"%(municipality) for municipality in range(1,6)],
"insurance": ["insurance_activity", "insurance_followup"],
"sepsis_cases": ["sepsis_cases_1", "sepsis_cases_2", "sepsis_cases_4"]
}
encoding_dict = {
"laststate": ["static", "last"],
"agg": ["static", "agg"],
"index": ["static", "index"],
"combined": ["static", "last", "agg"]
}
datasets = [dataset_ref] if dataset_ref not in dataset_ref_to_datasets else dataset_ref_to_datasets[dataset_ref]
methods = encoding_dict[cls_encoding]
train_ratio = 0.8
n_splits = 3
random_state = 22
# create results directory
if not os.path.exists(os.path.join(params_dir)):
os.makedirs(os.path.join(params_dir))
for dataset_name in datasets:
# read the data
dataset_manager = DatasetManager(dataset_name)
data = dataset_manager.read_dataset()
cls_encoder_args = {'case_id_col': dataset_manager.case_id_col,
'static_cat_cols': dataset_manager.static_cat_cols,
'static_num_cols': dataset_manager.static_num_cols,
'dynamic_cat_cols': dataset_manager.dynamic_cat_cols,
'dynamic_num_cols': dataset_manager.dynamic_num_cols,
'fillna': True}
# determine min and max (truncated) prefix lengths
min_prefix_length = 1
if "traffic_fines" in dataset_name:
max_prefix_length = 10
elif "bpic2017" in dataset_name:
max_prefix_length = min(20, dataset_manager.get_pos_case_length_quantile(data, 0.90))
else:
max_prefix_length = min(40, dataset_manager.get_pos_case_length_quantile(data, 0.90))
# split into training and test
train, _ = dataset_manager.split_data_strict(data, train_ratio, split="temporal")
# prepare chunks for CV
dt_prefixes = []
class_ratios = []
for train_chunk, test_chunk in dataset_manager.get_stratified_split_generator(train, n_splits=n_splits):
class_ratios.append(dataset_manager.get_class_ratio(train_chunk))
# generate data where each prefix is a separate instance
dt_prefixes.append(dataset_manager.generate_prefix_data(test_chunk, min_prefix_length, max_prefix_length))
del train
# set up search space
if cls_method == "rf":
space = {'max_features': hp.uniform('max_features', 0, 1)}
elif cls_method == "xgboost":
space = {'learning_rate': hp.uniform("learning_rate", 0, 1),
'subsample': hp.uniform("subsample", 0.5, 1),
'max_depth': scope.int(hp.quniform('max_depth', 4, 30, 1)),
'colsample_bytree': hp.uniform("colsample_bytree", 0.5, 1),
'min_child_weight': scope.int(hp.quniform('min_child_weight', 1, 6, 1))}
elif cls_method == "logit":
space = {'C': hp.uniform('C', -15, 15)}
elif cls_method == "svm":
space = {'C': hp.uniform('C', -15, 15),
'gamma': hp.uniform('gamma', -15, 15)}
if bucket_method == "cluster":
space['n_clusters'] = scope.int(hp.quniform('n_clusters', 2, 50, 1))
# optimize parameters
trial_nr = 1
trials = Trials()
fout_all = open(os.path.join(params_dir, "param_optim_all_trials_%s_%s_%s.csv" % (cls_method, dataset_name, method_name)), "w")
if "prefix" in method_name:
fout_all.write("%s;%s;%s;%s;%s;%s;%s;%s\n" % ("iter", "dataset", "cls", "method", "nr_events", "param", "value", "score"))
else:
fout_all.write("%s;%s;%s;%s;%s;%s;%s\n" % ("iter", "dataset", "cls", "method", "param", "value", "score"))
best = fmin(create_and_evaluate_model, space, algo=tpe.suggest, max_evals=n_iter, trials=trials)
fout_all.close()
# write the best parameters
best_params = hyperopt.space_eval(space, best)
outfile = os.path.join(params_dir, "optimal_params_%s_%s_%s.pickle" % (cls_method, dataset_name, method_name))
# write to file
with open(outfile, "wb") as fout:
pickle.dump(best_params, fout)
| apache-2.0 |
arabenjamin/scikit-learn | examples/neural_networks/plot_rbm_logistic_classification.py | 257 | 4609 | """
==============================================================
Restricted Boltzmann Machine features for digit classification
==============================================================
For greyscale image data where pixel values can be interpreted as degrees of
blackness on a white background, like handwritten digit recognition, the
Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM
<sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear
feature extraction.
In order to learn good latent representations from a small dataset, we
artificially generate more labeled data by perturbing the training data with
linear shifts of 1 pixel in each direction.
This example shows how to build a classification pipeline with a BernoulliRBM
feature extractor and a :class:`LogisticRegression
<sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters
of the entire model (learning rate, hidden layer size, regularization)
were optimized by grid search, but the search is not reproduced here because
of runtime constraints.
Logistic regression on raw pixel values is presented for comparison. The
example shows that the features extracted by the BernoulliRBM help improve the
classification accuracy.
"""
from __future__ import print_function
print(__doc__)
# Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.cross_validation import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
###############################################################################
# Setting up
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
# Load Data
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
X, Y = nudge_dataset(X, digits.target)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=100.0)
logistic_classifier.fit(X_train, Y_train)
###############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
Y_test,
logistic_classifier.predict(X_test))))
###############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
text-machine-lab/CliNER | code/DatasetCliner_experimental.py | 1 | 20008 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 27 12:55:40 2017
@author: elena
"""
import sklearn.preprocessing
import collections
import codecs
#import utils_nlp
import re
import time
#import token
import os
import pickle
import random
import numpy as np
import helper_dataset as hd
def lists_to_dataset_structure(sentences_tokens,sentence_tags,total_token_counter,token_count,label_count,character_count):
labels=[]
tokens=[]
new_label_sequence=[]
new_token_sequence=[]
features=""
feature_file_name=""
feature_vector_size=0
for idx,sentence in enumerate(sentences_tokens):
for token_idx,token_i in enumerate(sentence):
new_token_sequence.append(token_i)
new_label_sequence.append(sentence_tags[idx][token_idx])
token_count[token_i] += 1
label_count[sentence_tags[idx][token_idx]] += 1
if token_idx == len(sentence) - 1:
labels.append(new_label_sequence)
tokens.append(new_token_sequence)
new_token_sequence = []
new_label_sequence = []
# FEATURES ARE NOT SUPPORTED: Can be done if we are getting a third list that looks like [[f1,f2,f3],[f1,f2,f3]... for each token]
token_features=[]
features_as_array=np.array(token_features,dtype=np.dtype('int32'))
features_as_array=features_as_array.reshape((features_as_array.shape[0],1))
features_as_array=np.transpose(features_as_array)
features=""
feature_file_name=""
feature_vector_size=0
total_token_counter+=1
for character in token_i:
character_count[character] += 1
return labels, tokens, token_count, label_count, character_count,features,feature_file_name,feature_vector_size
class Dataset(object):
"""A class for handling data sets."""
def __init__(self, name='', verbose=False, debug=False):
self.name = name
self.verbose = verbose
self.debug = debug
def _parse_dataset(self, dataset_filepath, dataset_type, sentences_list=[],tags_list=[], Not_here=False):
token_count = collections.defaultdict(lambda: 0) #initialized by a function
label_count = collections.defaultdict(lambda: 0)
character_count = collections.defaultdict(lambda: 0)
longest_sentence=0
# Currently Not supported, features
#feature_file_name=os.getcwd()+os.sep+"test_cliner"+dataset_type+".hdf5"
# size_of_features=0
# Currentlt Not supported - features
# f = h5py.File(feature_file_name, "w")
# dset = f.create_dataset("word-features", (0, size_of_features), maxshape=(None, size_of_features),dtype=np.dtype('int32'), chunks=True) #44
#dt = h5py.special_dtype(vlen=np.dtype('int32'))
#sentence_words=f.create_dataset("sentences-words",(0,),dtype=dt,chunks=True,maxshape=(None,))
line_count =-1
sent_count=-1
total_token_counter=0
token_counter_offset_sent=0
sentence_counter=0
tokens=[]
labels=[]
features=[]
characters=[] # NOT USED (?)
#extract token features for agumentation
token_features=[]
token_lengths=[]
new_token_sequence=[]
new_label_sequence = []
#new_token_features_sequence=[]
#labels, tokens, token_count, label_count, character_count,features,feature_file_name,feature_vector_size
if Not_here==False:
labels, tokens, token_count, label_count, character_count,features,feature_file_name,feature_vector_size=lists_to_dataset_structure(sentences_list,tags_list,total_token_counter,token_count,label_count,character_count)
return labels, tokens, token_count, label_count, character_count,features,feature_file_name,feature_vector_size
def _convert_to_indices(self, dataset_types):
# Frank and Jennies Function
tokens = self.tokens
labels = self.labels
token_to_index = self.token_to_index
character_to_index = self.character_to_index
label_to_index = self.label_to_index
index_to_label = self.index_to_label
# Map tokens and labels to their indices
token_indices = {}
label_indices = {}
characters = {}
token_lengths = {}
character_indices = {}
character_indices_padded = {}
for dataset_type in dataset_types:
print (dataset_type)
token_indices[dataset_type] = []
characters[dataset_type] = []
character_indices[dataset_type] = []
token_lengths[dataset_type] = []
character_indices_padded[dataset_type] = []
for token_sequence in tokens[dataset_type]:
token_indices[dataset_type].append([token_to_index.get(token, self.UNK_TOKEN_INDEX) for token in token_sequence])
characters[dataset_type].append([list(token) for token in token_sequence])
character_indices[dataset_type].append([[character_to_index.get(character, random.randint(1, max(self.index_to_character.keys()))) for character in token] for token in token_sequence])
token_lengths[dataset_type].append([len(token) for token in token_sequence])
longest_token_length_in_sequence = max(token_lengths[dataset_type][-1])
character_indices_padded[dataset_type].append([hd.pad_list(temp_token_indices, longest_token_length_in_sequence, self.PADDING_CHARACTER_INDEX) for temp_token_indices in character_indices[dataset_type][-1]])
label_indices[dataset_type] = []
for label_sequence in labels[dataset_type]:
label_indices[dataset_type].append([label_to_index[label] for label in label_sequence])
label_binarizer = sklearn.preprocessing.LabelBinarizer()
label_binarizer.fit(range(max(index_to_label.keys()) + 1))
label_vector_indices = {}
for dataset_type in dataset_types:
label_vector_indices[dataset_type] = []
for label_indices_sequence in label_indices[dataset_type]:
label_vector_indices[dataset_type].append(label_binarizer.transform(label_indices_sequence))
return token_indices, label_indices, character_indices_padded, character_indices, token_lengths, characters, label_vector_indices
def update_dataset(self, dataset_filepaths, dataset_types, Datasets_tokens, Datasets_labels):
'''
dataset_filepaths : dictionary with keys 'train', 'valid', 'test', 'deploy'
Overwrites the data of type specified in dataset_types using the existing token_to_index, character_to_index, and label_to_index mappings.
'''
# def _parse_dataset(self, dataset_filepath, dataset_type, sentences_list=[],tags_list=[], Not_here=False):
for dataset_type in dataset_types:
print (dataset_type)
self.labels[dataset_type], self.tokens[dataset_type], _, _, _,_,_,_= self._parse_dataset("",dataset_type, Datasets_tokens[dataset_type],Datasets_labels[dataset_type])
token_indices, label_indices, character_indices_padded, character_indices, token_lengths, characters, label_vector_indices = self._convert_to_indices(dataset_types)
self.token_indices.update(token_indices)
self.label_indices.update(label_indices)
self.character_indices_padded.update(character_indices_padded)
self.character_indices.update(character_indices)
self.token_lengths.update(token_lengths)
self.characters.update(characters)
self.label_vector_indices.update(label_vector_indices)
def load_dataset(self,avaliable_datasets_sent,avaliable_datasets_labels, dataset_filepaths, parameters, token_to_vector=None,pretrained_dataset=None):
'''
dataset_filepaths : dictionary with keys 'train', 'valid', 'test', 'deploy'
'''
start_time = time.time()
print('Load dataset... \n')
if parameters['token_pretrained_embedding_filepath'] != '':
if token_to_vector==None:
token_to_vector = hd.load_pretrained_token_embeddings(parameters)
else:
token_to_vector = {}
all_tokens_in_pretraining_dataset = []
all_characters_in_pretraining_dataset = []
if parameters['use_pretrained_model']:
#temp_pretrained_dataset_adress="./models/NN_models/1235-4/dataset.pickle" #"./models/NN_models/1234-5/dataset.pickle"
if pretrained_dataset==None:
temp_pretrained_dataset_adress=parameters['model_folder']+os.sep+"dataset.pickle"
pretraining_dataset = pickle.load(open(temp_pretrained_dataset_adress, "rb"))
print ("Pre-loading Pre-trained dataset objects")
else:
pretraining_dataset=pretrained_dataset
print ("Pretrained dataset was pre-loaded")
all_tokens_in_pretraining_dataset = pretraining_dataset.index_to_token.values()
all_characters_in_pretraining_dataset = pretraining_dataset.index_to_character.values()
remap_to_unk_count_threshold = 1
self.UNK_TOKEN_INDEX = 0
self.PADDING_CHARACTER_INDEX = 0
self.tokens_mapped_to_unk = []
self.UNK = 'UNK'
self.unique_labels = []
labels = {}
tokens = {}
label_count = {}
token_count = {}
character_count = {}
features={}
features_file_names={}
feature_vector_size={}
#deploy
for dataset_type in ['train', 'valid', 'test','deploy']:
Not_here=False
if dataset_type not in avaliable_datasets_sent:
Not_here=True
#_parse_dataset(self, dataset_filepath,dataset_type,sentences_list="",tags_list="")
if Not_here==False:
labels[dataset_type], tokens[dataset_type], token_count[dataset_type], label_count[dataset_type], character_count[dataset_type], features[dataset_type], \
features_file_names[dataset_type],feature_vector_size[dataset_type] \
= self._parse_dataset("", dataset_type, sentences_list=avaliable_datasets_sent[dataset_type], tags_list=avaliable_datasets_labels[dataset_type])
if Not_here==True:
labels[dataset_type], tokens[dataset_type], token_count[dataset_type], label_count[dataset_type], character_count[dataset_type], features[dataset_type], \
features_file_names[dataset_type],feature_vector_size[dataset_type] \
= self._parse_dataset("", dataset_type, sentences_list=[], tags_list=[]) #
token_count['all'] = {}
for token in list(token_count['train'].keys()) + list(token_count['valid'].keys()) + list(token_count['test'].keys()) + list(token_count['deploy'].keys()):
token_count['all'][token] = token_count['train'][token] + token_count['valid'][token] + token_count['test'][token] + token_count['deploy'][token]
if parameters['load_all_pretrained_token_embeddings']:
for token in token_to_vector:
if token not in token_count['all']:
token_count['all'][token] = -1
token_count['train'][token] = -1
for token in all_tokens_in_pretraining_dataset:
if token not in token_count['all']:
token_count['all'][token] = -1
token_count['train'][token] = -1
character_count['all'] = {}
for character in list(character_count['train'].keys()) + list(character_count['valid'].keys()) + list(character_count['test'].keys()) + list(character_count['deploy'].keys()):
character_count['all'][character] = character_count['train'][character] + character_count['valid'][character] + character_count['test'][character] + character_count['deploy'][character]
for character in all_characters_in_pretraining_dataset:
if character not in character_count['all']:
character_count['all'][character] = -1
character_count['train'][character] = -1
label_count['all'] = {}
for character in list(label_count['train'].keys()) + list(label_count['valid'].keys()) + list(label_count['test'].keys()) + list(label_count['deploy'].keys()):
label_count['all'][character] = label_count['train'][character] + label_count['valid'][character] + label_count['test'][character] + label_count['deploy'][character]
token_count['all'] = hd.order_dictionary(token_count['all'], 'value_key', reverse = True)
label_count['all'] = hd.order_dictionary(label_count['all'], 'key', reverse = False)
character_count['all'] = hd.order_dictionary(character_count['all'], 'value', reverse = True)
if self.verbose: print('character_count[\'all\']: {0}'.format(character_count['all']))
token_to_index = {}
token_to_index[self.UNK] = self.UNK_TOKEN_INDEX
iteration_number = 0
number_of_unknown_tokens = 0
if self.verbose: print("parameters['remap_unknown_tokens_to_unk']: {0}".format(parameters['remap_unknown_tokens_to_unk']))
if self.verbose: print("len(token_count['train'].keys()): {0}".format(len(token_count['train'].keys())))
for token, count in token_count['all'].items():
if iteration_number == self.UNK_TOKEN_INDEX: iteration_number += 1
if parameters['remap_unknown_tokens_to_unk'] == 1 and \
(token_count['train'][token] == 0 or \
parameters['load_only_pretrained_token_embeddings']) and \
not hd.is_token_in_pretrained_embeddings(token, token_to_vector, parameters) and \
token not in all_tokens_in_pretraining_dataset:
token_to_index[token] = self.UNK_TOKEN_INDEX
number_of_unknown_tokens += 1
self.tokens_mapped_to_unk.append(token)
else:
token_to_index[token] = iteration_number
iteration_number += 1
infrequent_token_indices = []
for token, count in token_count['train'].items():
if 0 < count <= remap_to_unk_count_threshold:
infrequent_token_indices.append(token_to_index[token])
#if self.verbose: print("len(token_count['train']): {0}".format(len(token_count['train'])))
# if self.verbose: print("len(infrequent_token_indices): {0}".format(len(infrequent_token_indices)))
# Ensure that both B- and I- versions exist for each label
labels_without_bio = set()
for label in label_count['all'].keys():
new_label = hd.remove_bio_from_label_name(label)
labels_without_bio.add(new_label)
for label in labels_without_bio:
if label == 'O':
continue
if parameters['tagging_format'] == 'bioes':
prefixes = ['B-', 'I-', 'E-', 'S-']
else:
prefixes = ['B-', 'I-']
for prefix in prefixes:
l = prefix + label
if l not in label_count['all']:
label_count['all'][l] = 0
label_count['all'] = hd.order_dictionary(label_count['all'], 'key', reverse = False)
if parameters['use_pretrained_model']:
print ("USE_PRETRAINED_MODEL ACTIVE")
self.unique_labels = sorted(list(pretraining_dataset.label_to_index.keys()))
# Make sure labels are compatible with the pretraining dataset.
for label in label_count['all']:
if label not in pretraining_dataset.label_to_index:
raise AssertionError("The label {0} does not exist in the pretraining dataset. ".format(label) +
"Please ensure that only the following labels exist in the dataset: {0}".format(', '.join(self.unique_labels)))
label_to_index = pretraining_dataset.label_to_index.copy()
else:
label_to_index = {}
iteration_number = 0
for label, count in label_count['all'].items():
label_to_index[label] = iteration_number
iteration_number += 1
self.unique_labels.append(label)
character_to_index = {}
iteration_number = 0
for character, count in character_count['all'].items():
if iteration_number == self.PADDING_CHARACTER_INDEX: iteration_number += 1
character_to_index[character] = iteration_number
iteration_number += 1
token_to_index = hd.order_dictionary(token_to_index, 'value', reverse = False)
if self.verbose: print('token_to_index: {0}'.format(token_to_index))
index_to_token = hd.reverse_dictionary(token_to_index)
if parameters['remap_unknown_tokens_to_unk'] == 1: index_to_token[self.UNK_TOKEN_INDEX] = self.UNK
if self.verbose: print('index_to_token: {0}'.format(index_to_token))
label_to_index = hd.order_dictionary(label_to_index, 'value', reverse = False)
index_to_label = hd.reverse_dictionary(label_to_index)
character_to_index = hd.order_dictionary(character_to_index, 'value', reverse = False)
index_to_character = hd.reverse_dictionary(character_to_index)
self.token_to_index = token_to_index
self.index_to_token = index_to_token
self.index_to_character = index_to_character
self.character_to_index = character_to_index
self.index_to_label = index_to_label
self.label_to_index = label_to_index
self.tokens = tokens
self.labels = labels
dataset_types=['train','test','valid','deploy']
token_indices, label_indices, character_indices_padded, character_indices, token_lengths, characters, label_vector_indices = self._convert_to_indices(dataset_types)
self.token_indices = token_indices
self.label_indices = label_indices
self.character_indices_padded = character_indices_padded
self.character_indices = character_indices
self.token_lengths = token_lengths
self.characters = characters
self.label_vector_indices = label_vector_indices
self.number_of_classes = max(self.index_to_label.keys()) + 1
self.vocabulary_size = max(self.index_to_token.keys()) + 1
self.alphabet_size = max(self.index_to_character.keys()) + 1
# unique_labels_of_interest is used to compute F1-scores.
self.unique_labels_of_interest = list(self.unique_labels)
self.unique_labels_of_interest.remove('O')
self.unique_label_indices_of_interest = []
for lab in self.unique_labels_of_interest:
self.unique_label_indices_of_interest.append(label_to_index[lab])
self.infrequent_token_indices = infrequent_token_indices
elapsed_time = time.time() - start_time
print('done ({0:.2f} seconds)'.format(elapsed_time))
self.feature_vector_size=0
return token_to_vector
| apache-2.0 |
text-machine-lab/CliNER | code/model.py | 1 | 27656 | ######################################################################
# CliNER - model.py #
# #
# Willie Boag #
# #
# Purpose: Define the model for clinical concept extraction. #
######################################################################
import sys
from sklearn.feature_extraction import DictVectorizer
import os
import random
import math
import io
import numpy as np
from time import localtime, strftime
from collections import defaultdict
from notes.documents import labels as tag2id, id2tag
from tools import flatten, save_list_structure, reconstruct_list
from tools import print_str, print_vec, print_files, write
cliner_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
tmp_dir = os.path.join(cliner_dir, 'data', 'tmp')
class ClinerModel:
def log(self, out, model_file=None):
'''
ClinerModel::log()
Log training information of model.
@param out. Either a filename or file channel to output the log string.
@param model_file. A path to optionally identify where the model was saved.
@return None
'''
if not self._log:
log = self.__log_str(model_file)
else:
log = self._log
# depending on whether it is already opened as a channel
if isinstance(out,type(sys.stdout)):
write(out, '%s\n' % log)
else:
with open(out, 'a') as f:
write(f, '%s\n' % log)
def __log_str_NEURAL(self,model_file=None):
""
def __log_str(self, model_file=None):
'''
ClinerModel::__log_str()
Build a string of information about training for the model's log file.
@param model_file. A path to optionally identify where the model was saved.
@return A string of the model's training information
'''
assert self._is_trained, 'ClinerModel not trained'
with io.StringIO() as f:
write(f, u'\n')
write(f, '-'*40)
write(f, u'\n\n')
if model_file:
write(f, 'model : %s\n' % os.path.abspath(model_file))
write(f, u'\n')
if self._use_lstm:
write(f, u'modeltype: LSTM\n')
else:
write(f, u'modeltype: CRF\n')
if 'hyperparams' in self._score:
for name,value in self._score['hyperparams'].items():
write(f, u'\t%-10s: %s\n' % (name,value))
write(f, u'\n')
print_str(f, 'features', self._features)
write(f, u'\n')
write(f, u'\n')
write(f, 'training began: %s\n' % self._time_train_begin)
write(f, 'training ended: %s\n' % self._time_train_end)
write(f, u'\n')
write(f, u'scores\n')
print_vec(f, 'train precision', self._score['train']['precision'])
print_vec(f, 'train recall ', self._score['train']['recall' ])
print_vec(f, 'train f1 ', self._score['train']['f1' ])
write(f, self._score['train']['conf'])
if 'dev' in self._score:
print_vec(f, u'dev precision ', self._score['dev']['precision'])
print_vec(f, u'dev recall ', self._score['dev']['recall' ])
print_vec(f, u'dev f1 ', self._score['dev']['f1' ])
write(f, self._score['dev']['conf'])
if 'test' in self._score:
print_vec(f, u'test precision ', self._score['test']['precision'])
print_vec(f, u'test recall ', self._score['test']['recall' ])
print_vec(f, u'test f1 ', self._score['test']['f1' ])
write(f, self._score['test']['conf'])
if 'history' in self._score:
for label,vec in self._score['history'].items():
print_vec(f, '%-16s'%label, vec)
write(f, u'\n')
if self._training_files:
write(f, u'\n')
write(f, u'Training Files\n')
if len(self._training_files) < 200:
print_files(f, self._training_files)
else:
write(f, '\t%d files\n'%len(self._training_files))
write(f, u'\n')
write(f, u'-'*40)
write(f, u'\n\n')
# get output as full string
contents = f.getvalue()
return contents
def __init__(self, use_lstm):
"""
ClinerModel::__init__()
Instantiate a ClinerModel object.
@param use_lstm. Bool indicating whether to train a CRF or LSTM.
"""
self._use_lstm = use_lstm
self._is_trained = False
self._clf = "latin1"
self._vocab = None
self._training_files = None
self._log = None
self._text_feats = None
# Import the tools for either CRF or LSTM
if use_lstm:
# NEW
import DatasetCliner_experimental as Exp
import tensorflow as tf
import entity_lstm as entity_model
import training_predict_LSTM
import pickle
import copy
import helper_dataset as hd
import shutil
self._pretrained_dataset=None
self._pretrained_wordvectors=None
self._current_model=None
self._parameters=None
def train(self, train_notes, val=[], test=[]):
"""
ClinerModel::train()
Purpose: Train a Machine Learning model on annotated data
@param notes. A list of Note objects (containing text and annotations)
@return None
"""
# Extract formatted data
train_sents = flatten([n.getTokenizedSentences() for n in train_notes])
train_labels = flatten([n.getTokenLabels() for n in train_notes])
if test:
test_sents = flatten([n.getTokenizedSentences() for n in test])
test_labels = flatten([n.getTokenLabels() for n in test])
else:
test_sents = []
test_labels = []
if val:
print ("VAL")
val_sents = flatten([n.getTokenizedSentences() for n in val])
val_labels = flatten([n.getTokenLabels() for n in val])
self.train_fit(train_sents,train_labels,val_sents=val_sents,val_labels=val_labels,test_sents=test_sents,test_labels=test_labels)
else:
print ("NO DEV")
self.train_fit(train_sents, train_labels, dev_split=0.1,
test_sents=test_sents, test_labels=test_labels)
self._train_files = [ n.getName() for n in train_notes+val ]
def train_fit(self, train_sents, train_labels, val_sents=None, val_labels=None,
test_sents=None, test_labels=None, dev_split=None):
"""
ClinerModel::train_fit()
Purpose: Train clinical concept extraction model using annotated data.
@param train_sents. A list of sentences, where each sentence is tokenized into words.
@param train_labels. Parallel to 'train_sents', 7-way labels for concept spans.
@param val_sents. Validation data. Same format as tokenized_sents
@param val_labels. Validation data. Same format as iob_nested_labels
@param dev_split A real number from 0 to 1
"""
# metadata
self._time_train_begin = strftime("%Y-%m-%d %H:%M:%S", localtime())
# train classifier
if self._use_lstm==False:
voc, clf, dev_score, enabled_features = generic_train('all',
train_sents ,
train_labels ,
self._use_lstm ,
val_sents=val_sents ,
val_labels=val_labels ,
test_sents=test_sents ,
test_labels=test_labels ,
dev_split=dev_split )
self._is_trained = True
self._vocab = voc
self._clf = clf
self._score = dev_score
self._features = enabled_features
# metadata
self._time_train_end = strftime("%Y-%m-%d %H:%M:%S", localtime())
else:
print ("IN ERROR CHECK")
print (dev_split)
parameters,dataset,best = generic_train('all',
train_sents ,
train_labels ,
self._use_lstm ,
val_sents=val_sents ,
val_labels=val_labels ,
test_sents=test_sents ,
test_labels=test_labels ,
dev_split=dev_split )
self._is_trained = True
self.pretrained_dataset=dataset
self.parameters=parameters
self._score=best
self._time_train_end = strftime("%Y-%m-%d %H:%M:%S", localtime())
print ("BEST EPOCH")
print (best)
#self._vocab = voc
#self._clf = clf
#self._score = dev_score
#self._features = enabled_features
# metadata
#self._time_train_end = strftime("%Y-%m-%d %H:%M:%S", localtime())
def predict_classes_from_document(self, document):
"""
ClinerModel::predict_classes_from_documents()
Predict concept annotations for a given document
@param note. A Document object (containing text and annotations)
@return List of predictions
"""
# Extract formatted data
tokenized_sents = document.getTokenizedSentences()
return self.predict_classes(tokenized_sents)
def predict_classes(self, tokenized_sents):
"""
ClinerModel::predict_classes()
Predict concept annotations for unlabeled, tokenized sentences
@param tokenized_sents. A list of sentences, where each sentence is tokenized
into words
@return List of predictions
"""
hyperparams = {}
# Predict labels for prose
if self._use_lstm:
if self.parameters==None:
hyperprams['parameters'] = hd.load_parameters_from_file("LSTM_parameters.txt")
if self._pretrained_dataset==None:
temp_pretrained_dataset = os.path.join(hyperparams['parameters']['model_folder'],
"dataset.pickle")
hyperparams['pretrained_dataset'] = pickle.load(open(temp_pretrained_dataset_adress, 'rb'))
vectorized_pred = generic_predict('all' ,
tokenized_sents ,
vocab = self._vocab ,
clf = self._clf ,
use_lstm = self._use_lstm,
hyperparams = hyperparams)
#pretrained_dataset=self._pretrained_dataset,
#tokens_to_vec=self._pretrained_wordvector,
#current_model=self._current_model,
#parameters=self.parameters)
#self._current_model=model
if self._use_lstm:
iob_pred = vectorized_pred
else:
iob_pred = [ [id2tag[p] for p in seq] for seq in vectorized_pred ]
return iob_pred
############################################################################
### Lowest-level (interfaces to ML modules) ###
############################################################################
def generic_train(p_or_n, train_sents, train_labels, use_lstm, val_sents=None, val_labels=None, test_sents=None, test_labels=None, dev_split=None):
'''
generic_train()
Train a model that works for both prose and nonprose
@param p_or_n. A string that indicates "prose", "nonprose", or "all"
@param train_sents. A list of sentences; each sentence is tokenized into words
@param train_labels. Parallel to `train_sents`, 7-way labels for concept spans
@param use_lstm Bool indicating whether to train CRF or LSTM.
@param val_sents. Validation data. Same format as train_sents
@param val_labels. Validation data. Same format as train_labels
@param dev_split. A real number from 0 to 1
'''
# Must have data to train on:
if len(train_sents) == 0:
raise Exception('Training must have %s training examples' % p_or_n)
# if you should split the data into train/dev yourself
if (not val_sents) and (dev_split > 0.0) and (len(train_sents)>10):
p = int(dev_split*100)
sys.stdout.write('\tCreating %d/%d train/dev split\n' % (100-p,p))
perm = list(range(len(train_sents)))
random.shuffle(perm)
train_sents = [ train_sents[i] for i in perm ]
train_labels = [ train_labels[i] for i in perm ]
ind = int(dev_split*len(train_sents))
val_sents = train_sents[:ind ]
train_sents = train_sents[ ind:]
val_labels = train_labels[:ind ]
train_labels = train_labels[ ind:]
else:
sys.stdout.write('\tUsing existing validation data\n')
sys.stdout.write('\tvectorizing words %s\n' % p_or_n)
if use_lstm:
print ("TESTING NEW DATSET OBJECT")
dataset = Exp.Dataset()
parameters=hd.load_parameters_from_file("LSTM_parameters.txt")
parameters['use_pretrained_model']=False
Datasets_tokens={}
Datasets_labels={}
Datasets_tokens['train']=train_sents
Datasets_labels['train']=train_labels
if val_sents!=None:
Datasets_tokens['valid']=val_sents
Datasets_labels['valid']=val_labels
if test_sents!=None:
Datasets_tokens['test']=test_sents
Datasets_labels['test']=test_labels
dataset.load_dataset(Datasets_tokens,Datasets_labels,"",parameters)
pickle.dump(dataset, open(os.path.join(parameters['model_folder'], 'dataset.pickle'), 'wb'))
print (Datasets_tokens['valid'][0])
print (Datasets_tokens['test'][0])
parameters['Feature_vector_length']=dataset.feature_vector_size
parameters['use_features_before_final_lstm']=False
parameters['learning_rate']=0.005
sess = tf.Session()
number_of_sent=list(range(len(dataset.token_indices['train'])))
with sess.as_default():
model=entity_model.EntityLSTM(dataset,parameters)
sess.run(tf.global_variables_initializer())
model.load_pretrained_token_embeddings(sess, dataset,parameters)
epoch_number = -1
transition_params_trained = np.random.rand(5+2,5+2)
values={}
values["best"]=0
f1_dictionary={}
f1_dictionary['best']=0
model_saver = tf.train.Saver(max_to_keep=100)
print ("START TRAINING")
eval_dir = os.path.join(tmo_dir, 'cliner_eval_%d' % random.randint(0,256)+os.sep)
parameters['conll_like_result_folder']=eval_dir
test_temp = os.path.join(parameters['conll_like_result_folder'], 'test/')
train_temp = os.path.join(parameters['conll_like_result_folder'], 'train/')
valid_temp = os.path.join(parameters['conll_like_result_folder'], 'valid/')
os.mkdir(parameters['conll_like_result_folder'])
os.mkdir(test_temp)
os.mkdir(train_temp)
os.mkdir(valid_temp)
while epoch_number<90:
average_loss_per_phrase=0
accuracy_per_phase=0
step = 0
epoch_number += 1
if epoch_number != 0:
sequence_numbers=list(range(len(dataset.token_indices['train'])))
random.shuffle(sequence_numbers)
for sequence_number in sequence_numbers:
loss,accuracy,transition_params_trained=training_predict_LSTM.train_step(sess, dataset, sequence_number, model)
average_loss_per_phrase+=loss
accuracy_per_phase+=accuracy
step += 1
if step % 10 == 0:
print('Training {0:.2f}% done\n'.format(step/len(sequence_numbers)*100))
model_saver.save(sess, os.path.join(parameters['model_folder'], 'model_{0:05d}.ckpt'.format(epoch_number)))
total_loss=average_loss_per_phrase
total_accuracy=accuracy_per_phase
average_loss_per_phrase=average_loss_per_phrase/len(number_of_sent)
accuracy_per_phase=accuracy_per_phase/len(number_of_sent)
if epoch_number>0:
""
f1,predictions=training_predict_LSTM.prediction_step(sess,dataset,"test",model,epoch_number,parameters['conll_like_result_folder'],transition_params_trained)
f1_train,_=training_predict_LSTM.prediction_step(sess,dataset,"train", model,epoch_number,parameters['conll_like_result_folder'],transition_params_trained)
f1_valid,_=training_predict_LSTM.prediction_step(sess,dataset,"valid", model,epoch_number,parameters['conll_like_result_folder'],transition_params_trained)
correctly_predicted_tokens=training_predict_LSTM.compute_train_accuracy(parameters['conll_like_result_folder']+"valid"+os.sep+"epoche_"+str(epoch_number)+".txt")
if f1_dictionary['best']<float(f1_valid):
f1_dictionary['epoche']=epoch_number
f1_dictionary['best']=float(f1_valid)
if values["best"]<correctly_predicted_tokens:
values["epoche"]=epoch_number
values["best"]=correctly_predicted_tokens
#print ("Number of correctly predicted tokens -test "+str(correctly_predicted_tokens))
print ("NEW EPOCHE"+" "+str(epoch_number))
print ("Current F1 on train"+" "+str(f1_train))
print ("Current F1 on valid"+" "+str(f1_valid))
print ("Current F1 on test"+" "+str(f1))
print ("Current F1 best (validation): ")
print (f1_dictionary)
shutil.rmtree(parameters['conll_like_result_folder'])
return parameters, dataset,f1_dictionary['best']
else:
########
# CRF
########
from feature_extraction.features import extract_features
# vectorize tokenized sentences
text_features = extract_features(train_sents)
# type(text_features): <type 'list'>
# Collect list of feature types
enabled_features = set()
for sf in text_features:
for wf in sf:
for (feature_type,instance),value in wf.items():
if feature_type.startswith('prev'):
feature_type = 'PREV*'
if feature_type.startswith('next'):
feature_type = 'NEXT*'
enabled_features.add(feature_type)
enabled_features = sorted(enabled_features)
# Vectorize features
vocab = DictVectorizer()
flat_X_feats = vocab.fit_transform( flatten(text_features) )
X_feats = reconstruct_list(flat_X_feats, save_list_structure(text_features))
# vectorize IOB labels
Y_labels = [ [tag2id[y] for y in y_seq] for y_seq in train_labels ]
assert len(X_feats) == len(Y_labels)
for i in range(len(X_feats)):
assert X_feats[i].shape[0] == len(Y_labels[i])
# if there is specified validation data, then vectorize it
if val_sents:
# vectorize validation X
val_text_features = extract_features(val_sents)
flat_val_X_feats = vocab.transform( flatten(val_text_features) )
val_X = reconstruct_list(flat_val_X_feats,
save_list_structure(val_text_features))
# vectorize validation Y
val_Y = [ [tag2id[y] for y in y_seq] for y_seq in val_labels ]
# if there is specified test data, then vectorize it
if test_sents:
# vectorize test X
test_text_features = extract_features(test_sents)
flat_test_X_feats = vocab.transform( flatten(test_text_features) )
test_X = reconstruct_list(flat_test_X_feats,
save_list_structure(test_text_features))
# vectorize test Y
test_Y = [ [tag2id[y] for y in y_seq] for y_seq in test_labels ]
else:
test_X = None
test_Y = None
sys.stdout.write('\ttraining classifiers %s\n' % p_or_n)
if use_lstm:
# train using lstm
clf, dev_score = keras_ml.train(X_seq_ids, Y_labels, tag2id, len(vocab),
val_X_ids=val_X, val_Y_ids=val_Y,
test_X_ids=test_X, test_Y_ids=test_Y)
else:
# train using crf
from machine_learning import crf
clf, dev_score = crf.train(X_feats, Y_labels, val_X=val_X, val_Y=val_Y,
test_X=test_X, test_Y=test_Y)
return vocab, clf, dev_score, enabled_features
#def generic_predict(p_or_n, tokenized_sents, vocab, clf, use_lstm, pretrained_dataset=None,tokens_to_vec=None, current_model=None, parameters=None):
def generic_predict(p_or_n, tokenized_sents, vocab, clf, use_lstm, hyperparams):
'''
generic_predict()
Train a model that works for both prose and nonprose
@param p_or_n. A string that indicates "prose", "nonprose", or "all"
@param tokenized_sents. A list of sentences, where each sentence is tokenized
into words
@param vocab. A dictionary mapping word tokens to numeric indices.
@param clf. An encoding of the trained keras model.
@param use_lstm. Bool indicating whether clf is a CRF or LSTM.
'''
# use_lstm=self._use_lstm
if use_lstm:
#parameters=hd.load_parameters_from_file("LSTM_parameters.txt")
parameters['use_pretrained_model']=True
#model_folder="./models/NN_models"
predictions=[]
sys.stdout.write('\n use_lstm \n')
dataset = Exp.Dataset()
fictional_labels= copy.deepcopy(tokenized_sents)
for idx,x in enumerate(fictional_labels):
for val_id,value in enumerate(x):
fictional_labels[idx][val_id]='O'
Datasets_tokens={}
Datasets_labels={}
Datasets_tokens['deploy']=tokenized_sents
Datasets_labels['deploy']=fictional_labels
token_to_vector=dataset.load_dataset(Datasets_tokens, Datasets_labels, "", parameters,token_to_vector=tokens_to_vec, pretrained_dataset=pretrained_dataset)
print (dataset.token_indices.keys())
parameters['Feature_vector_length']=dataset.feature_vector_size
parameters['use_features_before_final_lstm']=False
dataset.update_dataset("", ['deploy'],Datasets_tokens,Datasets_labels)
del Datasets_tokens
del Datasets_labels
#model=current_model
model=entity_model.EntityLSTM(dataset,parameters)
os.mkdir(parameters['conll_like_result_folder'])
test_temp = os.path.join(parameters['conll_like_result_folder'], 'test/')
train_temp = os.path.join(parameters['conll_like_result_folder'], 'train/')
valid_temp = os.path.join(parameters['conll_like_result_folder'], 'valid/')
os.mkdir(test_temp)
os.mkdir(train_temp)
os.mkdir(valid_temp)
sess = tf.Session()
with sess.as_default():
#model=entity_model.EntityLSTM(dataset,parameters)
transition_params_trained=model.restore_from_pretrained_model(parameters, dataset, sess, token_to_vector=token_to_vector,pretrained_dataset=pretrained_dataset)
del token_to_vector
predictions=training_predict_LSTM.prediction_step(sess,dataset,"deploy",model,0,parameters['conll_like_result_folder'],transition_params_trained)
sess.close()
tf.reset_default_graph()
shutil.rmtree(parameters['conll_like_result_folder'])
return predictions, model
# If nothing to predict, skip actual prediction
if len(tokenized_sents) == 0:
sys.stdout.write('\tnothing to predict %s\n' % p_or_n)
return []
sys.stdout.write('\tvectorizing words %s\n' % p_or_n)
if use_lstm:
print('todo: incorporate lstm')
# vectorize tokenized sentences
#X = []
#for sent in tokenized_sents:
# id_seq = []
# for w in sent:
# if w in vocab:
# id_seq.append(vocab[w])
# else:
# id_seq.append(vocab['oov'])
# X.append(id_seq)
else:
from feature_extraction.features import extract_features
# vectorize validation X
text_features = extract_features(tokenized_sents)
flat_X_feats = vocab.transform( flatten(text_features) )
X = reconstruct_list(flat_X_feats, save_list_structure(text_features))
sys.stdout.write('\tpredicting labels %s\n' % p_or_n)
# Predict labels
if use_lstm:
print ("TEST_PREDICT")
exit()
else:
from machine_learning import crf
predictions = crf.predict(clf, X)
# Format labels from output
return predictions
| apache-2.0 |
mlassnig/pilot2 | pilot/scripts/stageout.py | 1 | 14721 | #do not use: #!/usr/bin/env python3
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Paul Nilsson, [email protected], 2020
import argparse
import os
import re
from pilot.api.data import StageOutClient
from pilot.common.errorcodes import ErrorCodes
from pilot.common.exception import PilotException
from pilot.info import InfoService, FileSpec, infosys
from pilot.util.config import config
from pilot.util.filehandling import establish_logging, write_json
from pilot.util.tracereport import TraceReport
import logging
errors = ErrorCodes()
# error codes
GENERAL_ERROR = 1
NO_QUEUENAME = 2
NO_SCOPES = 3
NO_LFNS = 4
NO_EVENTTYPE = 5
NO_LOCALSITE = 6
NO_REMOTESITE = 7
NO_PRODUSERID = 8
NO_JOBID = 9
NO_TASKID = 10
NO_JOBDEFINITIONID = 11
NO_DDMENDPOINTS = 12
NO_DATASETS = 13
NO_GUIDS = 14
TRANSFER_ERROR = 15
def get_args():
"""
Return the args from the arg parser.
:return: args (arg parser object).
"""
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('-d',
dest='debug',
action='store_true',
default=False,
help='Enable debug mode for logging messages')
arg_parser.add_argument('-q',
dest='queuename',
required=True,
help='Queue name (e.g., AGLT2_TEST-condor')
arg_parser.add_argument('-w',
dest='workdir',
required=False,
default=os.getcwd(),
help='Working directory')
arg_parser.add_argument('--scopes',
dest='scopes',
required=True,
help='List of Rucio scopes (e.g., mc16_13TeV,mc16_13TeV')
arg_parser.add_argument('--lfns',
dest='lfns',
required=True,
help='LFN list (e.g., filename1,filename2')
arg_parser.add_argument('--eventtype',
dest='eventtype',
required=True,
help='Event type')
arg_parser.add_argument('--ddmendpoints',
dest='ddmendpoints',
required=True,
help='DDM endpoint')
arg_parser.add_argument('--datasets',
dest='datasets',
required=True,
help='Dataset')
arg_parser.add_argument('--guids',
dest='guids',
required=True,
help='GUIDs')
arg_parser.add_argument('--localsite',
dest='localsite',
required=True,
help='Local site')
arg_parser.add_argument('--remotesite',
dest='remotesite',
required=True,
help='Remote site')
arg_parser.add_argument('--produserid',
dest='produserid',
required=True,
help='produserid')
arg_parser.add_argument('--jobid',
dest='jobid',
required=True,
help='PanDA job id')
arg_parser.add_argument('--taskid',
dest='taskid',
required=True,
help='PanDA task id')
arg_parser.add_argument('--jobdefinitionid',
dest='jobdefinitionid',
required=True,
help='Job definition id')
arg_parser.add_argument('--eventservicemerge',
dest='eventservicemerge',
type=str2bool,
default=False,
help='Event service merge boolean')
arg_parser.add_argument('--usepcache',
dest='usepcache',
type=str2bool,
default=False,
help='pcache boolean from queuedata')
arg_parser.add_argument('--no-pilot-log',
dest='nopilotlog',
action='store_true',
default=False,
help='Do not write the pilot log to file')
arg_parser.add_argument('--outputdir',
dest='outputdir',
required=False,
default='',
help='Output files directory')
arg_parser.add_argument('--catchall',
dest='catchall',
required=False,
default='',
help='PQ catchall field')
return arg_parser.parse_args()
def str2bool(v):
""" Helper function to convert string to bool """
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def verify_args():
"""
Make sure required arguments are set, and if they are not then set them.
(deprecated)
:return:
"""
if not args.workdir:
args.workdir = os.getcwd()
if not args.queuename:
message('queue name not set, cannot initialize InfoService')
return NO_QUEUENAME
if not args.scopes:
message('scopes not set')
return NO_SCOPES
if not args.lfns:
message('LFNs not set')
return NO_LFNS
if not args.eventtype:
message('No event type provided')
return NO_EVENTTYPE
if not args.localsite:
message('No local site provided')
return NO_LOCALSITE
if not args.remotesite:
message('No remote site provided')
return NO_REMOTESITE
if not args.produserid:
message('No produserid provided')
return NO_PRODUSERID
if not args.jobid:
message('No jobid provided')
return NO_JOBID
if not args.ddmendpoints:
message('No ddmendpoint provided')
return NO_DDMENDPOINTS
if not args.datasets:
message('No dataset provided')
return NO_DATASETS
if not args.guids:
message('No GUIDs provided')
return NO_GUIDS
if not args.taskid:
message('No taskid provided')
return NO_TASKID
if not args.jobdefinitionid:
message('No jobdefinitionid provided')
return NO_JOBDEFINITIONID
return 0
def message(msg):
print(msg) if not logger else logger.info(msg)
def get_file_lists(lfns, scopes, ddmendpoints, datasets, guids):
return lfns.split(','), scopes.split(','), ddmendpoints.split(','), datasets.split(','), guids.split(',')
class Job:
"""
A minimal implementation of the Pilot Job class with data members necessary for the trace report only.
"""
produserid = ""
jobid = ""
taskid = ""
jobdefinitionid = ""
def __init__(self, produserid="", jobid="", taskid="", jobdefinitionid=""):
self.produserid = produserid.replace('%20', ' ')
self.jobid = jobid
self.taskid = taskid
self.jobdefinitionid = jobdefinitionid
def add_to_dictionary(dictionary, key, value1, value2, value3, value4, value5, value6):
"""
Add key: [value1, value2, value3, value4, value5, value6] to dictionary.
In practice; lfn: [status, status_code, surl, turl, checksum, fsize].
:param dictionary: dictionary to be updated.
:param key: lfn key to be added (string).
:param value1: status to be added to list belonging to key (string).
:param value2: status_code to be added to list belonging to key (string).
:param value3: surl to be added to list belonging to key (string).
:param value4: turl to be added to list belonging to key (string).
:param value5: checksum to be added to list belonging to key (string).
:param value6: fsize to be added to list belonging to key (string).
:return: updated dictionary.
"""
dictionary[key] = [value1, value2, value3, value4, value5, value6]
return dictionary
def extract_error_info(err):
error_code = 0
error_message = ""
_code = re.search(r'error code: (\d+)', err)
if _code:
error_code = _code.group(1)
_msg = re.search('details: (.+)', err)
if _msg:
error_message = _msg.group(1)
error_message = error_message.replace('[PilotException(', '').strip()
return error_code, error_message
if __name__ == '__main__':
"""
Main function of the stage-in script.
"""
# get the args from the arg parser
args = get_args()
args.debug = True
args.nopilotlog = False
establish_logging(debug=args.debug, nopilotlog=args.nopilotlog, filename=config.Pilot.stageoutlog)
logger = logging.getLogger(__name__)
#ret = verify_args()
#if ret:
# exit(ret)
# get the file info
lfns, scopes, ddmendpoints, datasets, guids = get_file_lists(args.lfns, args.scopes, args.ddmendpoints, args.datasets, args.guids)
if len(lfns) != len(scopes) or len(lfns) != len(ddmendpoints) or len(lfns) != len(datasets) or len(lfns) != len(guids):
message('file lists not same length: len(lfns)=%d, len(scopes)=%d, len(ddmendpoints)=%d, len(datasets)=%d, len(guids)=%d' %
(len(lfns), len(scopes), len(ddmendpoints), len(datasets), len(guids)))
# generate the trace report
trace_report = TraceReport(pq=os.environ.get('PILOT_SITENAME', ''), localSite=args.localsite,
remoteSite=args.remotesite, dataset="", eventType=args.eventtype)
job = Job(produserid=args.produserid, jobid=args.jobid, taskid=args.taskid, jobdefinitionid=args.jobdefinitionid)
trace_report.init(job)
try:
infoservice = InfoService()
infoservice.init(args.queuename, infosys.confinfo, infosys.extinfo)
infosys.init(args.queuename) # is this correct? otherwise infosys.queuedata doesn't get set
except Exception as e:
message(e)
# perform stage-out (single transfers)
err = ""
errcode = 0
xfiles = None
activity = 'pw'
client = StageOutClient(infoservice, logger=logger, trace_report=trace_report)
kwargs = dict(workdir=args.workdir, cwd=args.workdir, usecontainer=False, job=job, output_dir=args.outputdir,
catchall=args.catchall) # , mode='stage-out')
xfiles = []
for lfn, scope, dataset, ddmendpoint, guid in list(zip(lfns, scopes, datasets, ddmendpoints, guids)):
files = [{'scope': scope, 'lfn': lfn, 'workdir': args.workdir, 'dataset': dataset, 'ddmendpoint': ddmendpoint,
'ddmendpoint_alt': None}]
# do not abbreviate the following two lines as otherwise the content of xfiles will be a list of generator objects
_xfiles = [FileSpec(type='output', **f) for f in files]
xfiles += _xfiles
# prod analy unification: use destination preferences from PanDA server for unified queues
if infoservice.queuedata.type != 'unified':
client.prepare_destinations(xfiles,
activity) ## FIX ME LATER: split activities: for astorages and for copytools (to unify with ES workflow)
try:
r = client.transfer(xfiles, activity=activity, **kwargs)
except PilotException as error:
import traceback
error_msg = traceback.format_exc()
logger.error(error_msg)
err = errors.format_diagnostics(error.get_error_code(), error_msg)
except Exception as error:
err = str(error)
errcode = -1
message(err)
# for lfn, scope, dataset, ddmendpoint, guid in list(zip(lfns, scopes, datasets, ddmendpoints, guids)):
# try:
# files = [{'scope': scope, 'lfn': lfn, 'workdir': args.workdir, 'dataset': dataset, 'ddmendpoint': ddmendpoint, 'ddmendpoint_alt': None}]
# xfiles = [FileSpec(type='output', **f) for f in files]
#
# # prod analy unification: use destination preferences from PanDA server for unified queues
# if infoservice.queuedata.type != 'unified':
# client.prepare_destinations(xfiles,
# activity) ## FIX ME LATER: split activities: for astorages and for copytools (to unify with ES workflow)
#
# r = client.transfer(xfiles, activity=activity, **kwargs)
# except PilotException as error:
# import traceback
# error_msg = traceback.format_exc()
# logger.error(error_msg)
# err = errors.format_diagnostics(error.get_error_code(), error_msg)
# except Exception as error:
# err = str(error)
# errcode = -1
# message(err)
# put file statuses in a dictionary to be written to file
file_dictionary = {} # { 'error': [error_diag, -1], 'lfn1': [status, status_code], 'lfn2':.., .. }
if xfiles:
message('stageout script summary of transferred files:')
for fspec in xfiles:
add_to_dictionary(file_dictionary, fspec.lfn, fspec.status, fspec.status_code,
fspec.surl, fspec.turl, fspec.checksum.get('adler32'), fspec.filesize)
status = fspec.status if fspec.status else "(not transferred)"
message(" -- lfn=%s, status_code=%s, status=%s, surl=%s, turl=%s, checksum=%s, filesize=%s" %
(fspec.lfn, fspec.status_code, status, fspec.surl, fspec.turl, fspec.checksum.get('adler32'), fspec.filesize))
# add error info, if any
if err:
errcode, err = extract_error_info(err)
add_to_dictionary(file_dictionary, 'error', err, errcode, None, None, None, None)
path = os.path.join(args.workdir, config.Container.stageout_status_dictionary)
if os.path.exists(path):
path += '.log'
_status = write_json(path, file_dictionary)
if err:
message("containerised file transfers failed: %s" % err)
exit(TRANSFER_ERROR)
message("wrote %s" % path)
message("containerised file transfers finished")
exit(0)
| apache-2.0 |
DonBeo/scikit-learn | sklearn/cross_decomposition/tests/test_pls.py | 1 | 10168 | import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import load_linnerud
from sklearn.cross_decomposition import pls_
from nose.tools import assert_equal
def test_pls():
d = load_linnerud()
X = d.data
Y = d.target
# 1) Canonical (symmetric) PLS (PLS 2 blocks canonical mode A)
# ===========================================================
# Compare 2 algo.: nipals vs. svd
# ------------------------------
pls_bynipals = pls_.PLSCanonical(n_components=X.shape[1])
pls_bynipals.fit(X, Y)
pls_bysvd = pls_.PLSCanonical(algorithm="svd", n_components=X.shape[1])
pls_bysvd.fit(X, Y)
# check equalities of loading (up to the sign of the second column)
assert_array_almost_equal(
pls_bynipals.x_loadings_,
np.multiply(pls_bysvd.x_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different x loadings")
assert_array_almost_equal(
pls_bynipals.y_loadings_,
np.multiply(pls_bysvd.y_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different y loadings")
# Check PLS properties (with n_components=X.shape[1])
# ---------------------------------------------------
plsca = pls_.PLSCanonical(n_components=X.shape[1])
plsca.fit(X, Y)
T = plsca.x_scores_
P = plsca.x_loadings_
Wx = plsca.x_weights_
U = plsca.y_scores_
Q = plsca.y_loadings_
Wy = plsca.y_weights_
def check_ortho(M, err_msg):
K = np.dot(M.T, M)
assert_array_almost_equal(K, np.diag(np.diag(K)), err_msg=err_msg)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(Wx, "x weights are not orthogonal")
check_ortho(Wy, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(T, "x scores are not orthogonal")
check_ortho(U, "y scores are not orthogonal")
# Check X = TP' and Y = UQ' (with (p == q) components)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# center scale X, Y
Xc, Yc, x_mean, y_mean, x_std, y_std =\
pls_._center_scale_xy(X.copy(), Y.copy(), scale=True)
assert_array_almost_equal(Xc, np.dot(T, P.T), err_msg="X != TP'")
assert_array_almost_equal(Yc, np.dot(U, Q.T), err_msg="Y != UQ'")
# Check that rotations on training data lead to scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Xr = plsca.transform(X)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
Xr, Yr = plsca.transform(X, Y)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
assert_array_almost_equal(Yr, plsca.y_scores_,
err_msg="rotation on Y failed")
# "Non regression test" on canonical PLS
# --------------------------------------
# The results were checked against the R-package plspm
pls_ca = pls_.PLSCanonical(n_components=X.shape[1])
pls_ca.fit(X, Y)
x_weights = np.array(
[[-0.61330704, 0.25616119, -0.74715187],
[-0.74697144, 0.11930791, 0.65406368],
[-0.25668686, -0.95924297, -0.11817271]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_rotations = np.array(
[[-0.61330704, 0.41591889, -0.62297525],
[-0.74697144, 0.31388326, 0.77368233],
[-0.25668686, -0.89237972, -0.24121788]])
assert_array_almost_equal(pls_ca.x_rotations_, x_rotations)
y_weights = np.array(
[[+0.58989127, 0.7890047, 0.1717553],
[+0.77134053, -0.61351791, 0.16920272],
[-0.23887670, -0.03267062, 0.97050016]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_rotations = np.array(
[[+0.58989127, 0.7168115, 0.30665872],
[+0.77134053, -0.70791757, 0.19786539],
[-0.23887670, -0.00343595, 0.94162826]])
assert_array_almost_equal(pls_ca.y_rotations_, y_rotations)
# 2) Regression PLS (PLS2): "Non regression test"
# ===============================================
# The results were checked against the R-packages plspm, misOmics and pls
pls_2 = pls_.PLSRegression(n_components=X.shape[1])
pls_2.fit(X, Y)
x_weights = np.array(
[[-0.61330704, -0.00443647, 0.78983213],
[-0.74697144, -0.32172099, -0.58183269],
[-0.25668686, 0.94682413, -0.19399983]])
assert_array_almost_equal(pls_2.x_weights_, x_weights)
x_loadings = np.array(
[[-0.61470416, -0.24574278, 0.78983213],
[-0.65625755, -0.14396183, -0.58183269],
[-0.51733059, 1.00609417, -0.19399983]])
assert_array_almost_equal(pls_2.x_loadings_, x_loadings)
y_weights = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_weights_, y_weights)
y_loadings = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_loadings_, y_loadings)
# 3) Another non-regression test of Canonical PLS on random dataset
# =================================================================
# The results were checked against the R-package plspm
n = 500
p_noise = 10
q_noise = 5
# 2 latents vars:
np.random.seed(11)
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X = np.concatenate(
(X, np.random.normal(size=p_noise * n).reshape(n, p_noise)), axis=1)
Y = np.concatenate(
(Y, np.random.normal(size=q_noise * n).reshape(n, q_noise)), axis=1)
np.random.seed(None)
pls_ca = pls_.PLSCanonical(n_components=3)
pls_ca.fit(X, Y)
x_weights = np.array(
[[0.65803719, 0.19197924, 0.21769083],
[0.7009113, 0.13303969, -0.15376699],
[0.13528197, -0.68636408, 0.13856546],
[0.16854574, -0.66788088, -0.12485304],
[-0.03232333, -0.04189855, 0.40690153],
[0.1148816, -0.09643158, 0.1613305],
[0.04792138, -0.02384992, 0.17175319],
[-0.06781, -0.01666137, -0.18556747],
[-0.00266945, -0.00160224, 0.11893098],
[-0.00849528, -0.07706095, 0.1570547],
[-0.00949471, -0.02964127, 0.34657036],
[-0.03572177, 0.0945091, 0.3414855],
[0.05584937, -0.02028961, -0.57682568],
[0.05744254, -0.01482333, -0.17431274]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_loadings = np.array(
[[0.65649254, 0.1847647, 0.15270699],
[0.67554234, 0.15237508, -0.09182247],
[0.19219925, -0.67750975, 0.08673128],
[0.2133631, -0.67034809, -0.08835483],
[-0.03178912, -0.06668336, 0.43395268],
[0.15684588, -0.13350241, 0.20578984],
[0.03337736, -0.03807306, 0.09871553],
[-0.06199844, 0.01559854, -0.1881785],
[0.00406146, -0.00587025, 0.16413253],
[-0.00374239, -0.05848466, 0.19140336],
[0.00139214, -0.01033161, 0.32239136],
[-0.05292828, 0.0953533, 0.31916881],
[0.04031924, -0.01961045, -0.65174036],
[0.06172484, -0.06597366, -0.1244497]])
assert_array_almost_equal(pls_ca.x_loadings_, x_loadings)
y_weights = np.array(
[[0.66101097, 0.18672553, 0.22826092],
[0.69347861, 0.18463471, -0.23995597],
[0.14462724, -0.66504085, 0.17082434],
[0.22247955, -0.6932605, -0.09832993],
[0.07035859, 0.00714283, 0.67810124],
[0.07765351, -0.0105204, -0.44108074],
[-0.00917056, 0.04322147, 0.10062478],
[-0.01909512, 0.06182718, 0.28830475],
[0.01756709, 0.04797666, 0.32225745]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_loadings = np.array(
[[0.68568625, 0.1674376, 0.0969508],
[0.68782064, 0.20375837, -0.1164448],
[0.11712173, -0.68046903, 0.12001505],
[0.17860457, -0.6798319, -0.05089681],
[0.06265739, -0.0277703, 0.74729584],
[0.0914178, 0.00403751, -0.5135078],
[-0.02196918, -0.01377169, 0.09564505],
[-0.03288952, 0.09039729, 0.31858973],
[0.04287624, 0.05254676, 0.27836841]])
assert_array_almost_equal(pls_ca.y_loadings_, y_loadings)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_weights_, "x weights are not orthogonal")
check_ortho(pls_ca.y_weights_, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_scores_, "x scores are not orthogonal")
check_ortho(pls_ca.y_scores_, "y scores are not orthogonal")
def test_PLSSVD():
# Let's check the PLSSVD doesn't return all possible component but just
# the specificied number
d = load_linnerud()
X = d.data
Y = d.target
n_components = 2
for clf in [pls_.PLSSVD, pls_.PLSRegression, pls_.PLSCanonical]:
pls = clf(n_components=n_components)
pls.fit(X, Y)
assert_equal(n_components, pls.y_scores_.shape[1])
def test_univariate_pls_regression():
# Ensure 1d Y is correctly interpreted
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSRegression()
# Compare 1d to column vector
model1 = clf.fit(X, Y[:, 0]).coef_
model2 = clf.fit(X, Y[:, :1]).coef_
assert_array_almost_equal(model1, model2)
def test_scale():
d = load_linnerud()
X = d.data
Y = d.target
# causes X[:, -1].std() to be zero
X[:, -1] = 1.0
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.set_params(scale=True)
clf.fit(X, Y)
| bsd-3-clause |
nyu-dl/dl4mt-cdec | subword_base/train_wmt15_deen_bpe2bpe_both_adam.py | 1 | 3244 | import os
from collections import OrderedDict
from nmt import train
from subword_base_both import *
layers = {'ff': ('param_init_fflayer', 'fflayer'),
'fff': ('param_init_ffflayer', 'ffflayer'),
'gru': ('param_init_gru', 'gru_layer'),
'two_layer_gru_decoder_both': ('param_init_two_layer_gru_decoder_both',
'two_layer_gru_decoder_both'),
}
def main(job_id, params):
re_load = False
save_file_name = 'bpe2bpe_two_layer_gru_decoder_both_adam'
source_dataset = params['train_data_path'] + params['source_dataset']
target_dataset = params['train_data_path'] + params['target_dataset']
valid_source_dataset = params['dev_data_path'] + params['valid_source_dataset']
valid_target_dataset = params['dev_data_path'] + params['valid_target_dataset']
source_dictionary = params['train_data_path'] + params['source_dictionary']
target_dictionary = params['train_data_path'] + params['target_dictionary']
print params, params['save_path'], save_file_name
validerr = train(
max_epochs=int(params['max_epochs']),
patience=int(params['patience']),
dim_word=int(params['dim_word']),
dim_word_src=int(params['dim_word_src']),
save_path=params['save_path'],
save_file_name=save_file_name,
re_load=re_load,
enc_dim=int(params['enc_dim']),
dec_dim=int(params['dec_dim']),
n_words=int(params['n_words']),
n_words_src=int(params['n_words_src']),
decay_c=float(params['decay_c']),
lrate=float(params['learning_rate']),
optimizer=params['optimizer'],
maxlen=int(params['maxlen']),
maxlen_trg=int(params['maxlen_trg']),
maxlen_sample=int(params['maxlen_sample']),
batch_size=int(params['batch_size']),
valid_batch_size=int(params['valid_batch_size']),
sort_size=int(params['sort_size']),
validFreq=int(params['validFreq']),
dispFreq=int(params['dispFreq']),
saveFreq=int(params['saveFreq']),
sampleFreq=int(params['sampleFreq']),
clip_c=int(params['clip_c']),
datasets=[source_dataset, target_dataset],
valid_datasets=[valid_source_dataset, valid_target_dataset],
dictionaries=[source_dictionary, target_dictionary],
use_dropout=int(params['use_dropout']),
source_word_level=int(params['source_word_level']),
target_word_level=int(params['target_word_level']),
layers=layers,
save_every_saveFreq=1,
use_bpe=1,
init_params=init_params,
build_model=build_model,
build_sampler=build_sampler,
gen_sample=gen_sample
)
return validerr
if __name__ == '__main__':
import sys, time
if len(sys.argv) > 1:
config_file_name = sys.argv[-1]
else:
config_file_name = 'wmt15_deen_bpe2bpe_adam.txt'
f = open(config_file_name, 'r')
lines = f.readlines()
params = OrderedDict()
for line in lines:
line = line.split('\n')[0]
param_list = line.split(' ')
param_name = param_list[0]
param_value = param_list[1]
params[param_name] = param_value
main(0, params)
| bsd-3-clause |
mckinziebrandon/DeepChatModels | data/_dataset.py | 1 | 12596 | """ABC for datasets. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import logging
import numpy as np
import tensorflow as tf
from utils import io_utils
from abc import ABCMeta, abstractmethod, abstractproperty
from chatbot.globals import DEFAULT_FULL_CONFIG
DEFAULT_PARAMS = DEFAULT_FULL_CONFIG['dataset_params']
class DatasetABC(metaclass=ABCMeta):
@abstractmethod
def convert_to_tf_records(self, *args):
"""If not found in data dir, will create tfrecords data
files from text files.
"""
pass
@abstractmethod
def train_generator(self, batch_size):
"""Returns a generator function for batches of batch_size
train data.
"""
pass
@abstractmethod
def valid_generator(self, batch_size):
"""Returns a generator function for batches of batch_size
validation data.
"""
pass
@abstractproperty
def word_to_idx(self):
"""Return dictionary map from str -> int. """
pass
@abstractproperty
def idx_to_word(self):
"""Return dictionary map from int -> str. """
pass
@abstractproperty
def name(self):
"""Returns name of the dataset as a string."""
pass
@abstractproperty
def max_seq_len(self):
"""Return the maximum allowed sentence length."""
pass
class Dataset(DatasetABC):
def __init__(self, dataset_params):
"""Implements the general of subset of operations that all
dataset subclasses can use.
Args:
dataset_params: dictionary of configuration parameters.
See DEFAULT_FULL_CONFIG at top of file for supported keys.
"""
self.__dict__['__params'] = Dataset.fill_params(dataset_params)
# We query io_utils to ensure all data files are organized properly,
# and io_utils returns the paths to files of interest.
id_paths, vocab_path, vocab_size = io_utils.prepare_data(
data_dir=self.data_dir,
vocab_size=self.vocab_size,
optimize=dataset_params.get('optimize_params'),
config_path=dataset_params.get('config_path'))
if vocab_size != self.vocab_size:
self.log.info("Updating vocab size from %d to %d",
self.vocab_size, vocab_size)
self.vocab_size = vocab_size
# Also update the input dict, in case it is used later/elsewhere.
dataset_params['vocab_size'] = self.vocab_size
self.paths = dict()
self.paths = {
**id_paths,
'vocab': vocab_path,
'train_tfrecords': None,
'valid_tfrecords': None}
self._word_to_idx, self._idx_to_word = io_utils.get_vocab_dicts(
vocab_path)
# Create tfrecords file if not located in data_dir.
self.convert_to_tf_records('train')
self.convert_to_tf_records('valid')
def convert_to_tf_records(self, prefix='train'):
"""If can't find tfrecords 'prefix' files, creates them.
Args:
prefix: 'train' or 'valid'. Determines which tfrecords to build.
"""
from_path = self.paths['from_'+prefix]
to_path = self.paths['to_'+prefix]
tfrecords_fname = (prefix
+ 'voc%d_seq%d' % (self.vocab_size, self.max_seq_len)
+ '.tfrecords')
output_path = os.path.join(self.data_dir, tfrecords_fname)
if os.path.isfile(output_path):
self.log.info('Using tfrecords file %s' % output_path)
self.paths[prefix + '_tfrecords'] = output_path
return
def get_sequence_example(encoder_line, decoder_line):
space_needed = max(len(encoder_line.split()), len(decoder_line.split()))
if space_needed > self.max_seq_len:
return None
example = tf.train.SequenceExample()
encoder_list = [int(x) for x in encoder_line.split()]
decoder_list = [io_utils.GO_ID] \
+ [int(x) for x in decoder_line.split()] \
+ [io_utils.EOS_ID]
# Why tensorflow . . . why . . .
example.context.feature['encoder_sequence_length'].int64_list.value.append(
len(encoder_list))
example.context.feature['decoder_sequence_length'].int64_list.value.append(
len(decoder_list))
encoder_sequence = example.feature_lists.feature_list['encoder_sequence']
decoder_sequence = example.feature_lists.feature_list['decoder_sequence']
for e in encoder_list:
encoder_sequence.feature.add().int64_list.value.append(e)
for d in decoder_list:
decoder_sequence.feature.add().int64_list.value.append(d)
return example
with tf.gfile.GFile(from_path, mode="r") as encoder_file:
with tf.gfile.GFile(to_path, mode="r") as decoder_file:
with tf.python_io.TFRecordWriter(output_path) as writer:
encoder_line = encoder_file.readline()
decoder_line = decoder_file.readline()
while encoder_line and decoder_line:
sequence_example = get_sequence_example(
encoder_line,
decoder_line)
if sequence_example is not None:
writer.write(sequence_example.SerializeToString())
encoder_line = encoder_file.readline()
decoder_line = decoder_file.readline()
self.log.info("Converted text files %s and %s into tfrecords file %s" \
% (os.path.basename(from_path),
os.path.basename(to_path),
os.path.basename(output_path)))
self.paths[prefix + '_tfrecords'] = output_path
def sentence_generator(self, prefix='from'):
"""Yields (as words) single sentences from training data,
for testing purposes.
"""
self.log.info("Generating sentences from %s", self.paths[prefix+'_train'])
with tf.gfile.GFile(self.paths[prefix+'_train'], mode="r") as f:
sentence = self.as_words(
list(map(int, f.readline().strip().lower().split())))
while sentence:
yield sentence
sentence = self.as_words(
list(map(int, f.readline().strip().lower().split())))
def pairs_generator(self, num_generate=None):
in_sentences = self.sentence_generator('from')
in_sentences = [s for s in in_sentences]
out_sentences = self.sentence_generator('to')
out_sentences = [s for s in out_sentences]
if num_generate is None:
num_generate = len(in_sentences)
count = 0
for in_sent, out_sent in zip(in_sentences, out_sentences):
yield in_sent, out_sent
count += 1
if count >= num_generate:
break
def train_generator(self, batch_size):
"""[Note: not needed by DynamicBot since InputPipeline]"""
return self._generator(
self.paths['from_train'],
self.paths['to_train'],
batch_size)
def valid_generator(self, batch_size):
"""[Note: not needed by DynamicBot since InputPipeline]"""
return self._generator(
self.paths['from_valid'],
self.paths['to_valid'],
batch_size)
def _generator(self, from_path, to_path, batch_size):
"""(Used by BucketModels only). Returns a generator function that
reads data from file, and yields shuffled batches.
Args:
from_path: full path to file for encoder inputs.
to_path: full path to file for decoder inputs.
batch_size: number of samples to yield at once.
"""
def longest_sentence(enc_list, dec_list):
max_enc_len = max([len(s) for s in enc_list])
max_dec_len = max([len(s) for s in dec_list])
return max(max_enc_len, max_dec_len)
def padded_batch(encoder_tokens, decoder_tokens):
max_sent_len = longest_sentence(encoder_tokens, decoder_tokens)
encoder_batch = np.array(
[s + [io_utils.PAD_ID] * (max_sent_len - len(s))
for s in encoder_tokens])[:, ::-1]
decoder_batch = np.array(
[s + [io_utils.PAD_ID] * (max_sent_len - len(s))
for s in decoder_tokens])
return encoder_batch, decoder_batch
encoder_tokens = []
decoder_tokens = []
with tf.gfile.GFile(from_path, mode="r") as source_file:
with tf.gfile.GFile(to_path, mode="r") as target_file:
source, target = source_file.readline(), target_file.readline()
while source and target:
# Skip sentence pairs that are too long for specifications.
space_needed = max(len(source.split()), len(target.split()))
if space_needed > self.max_seq_len:
source, target = source_file.readline(), target_file.readline()
continue
# Reformat token strings to token lists.
# Note: GO_ID is prepended by the chat bot, since it
# determines whether or not it's responsible for responding.
encoder_tokens.append([int(x) for x in source.split()])
decoder_tokens.append(
[int(x) for x in target.split()] + [io_utils.EOS_ID])
# Have we collected batch_size number of sentences?
# If so, pad & yield.
assert len(encoder_tokens) == len(decoder_tokens)
if len(encoder_tokens) == batch_size:
yield padded_batch(encoder_tokens, decoder_tokens)
encoder_tokens = []
decoder_tokens = []
source, target = source_file.readline(), target_file.readline()
# Don't forget to yield the 'leftovers'!
assert len(encoder_tokens) == len(decoder_tokens)
assert len(encoder_tokens) <= batch_size
if len(encoder_tokens) > 0:
yield padded_batch(encoder_tokens, decoder_tokens)
@property
def word_to_idx(self):
"""Return dictionary map from str -> int. """
return self._word_to_idx
@property
def idx_to_word(self):
"""Return dictionary map from int -> str. """
return self._idx_to_word
def as_words(self, sentence):
"""Convert list of integer tokens to a single sentence string."""
words = []
for token in sentence:
word = self.idx_to_word[token]
try:
word = tf.compat.as_str(word)
except UnicodeDecodeError:
logging.error("UnicodeDecodeError on (token, word): "
"(%r, %r)", token, word)
word = str(word)
words.append(word)
words = " ".join(words)
#words = " ".join([tf.compat.as_str(self.idx_to_word[i]) for i in sentence])
words = words.replace(' , ', ', ').replace(' .', '.').replace(' !', '!')
words = words.replace(" ' ", "'").replace(" ?", "?")
if len(words) < 2:
return words
return words[0].upper() + words[1:]
@property
def name(self):
"""Returns name of the dataset as a string."""
return self._name
@property
def train_size(self):
raise NotImplemented
@property
def valid_size(self):
raise NotImplemented
@property
def max_seq_len(self):
return self._max_seq_len
@staticmethod
def fill_params(dataset_params):
"""Assigns default values from DEFAULT_FULL_CONFIG
for keys not in dataset_params."""
if 'data_dir' not in dataset_params:
raise ValueError('data directory not found in dataset_params.')
return {**DEFAULT_PARAMS, **dataset_params}
def __getattr__(self, name):
if name not in self.__dict__['__params']:
raise AttributeError(name)
else:
return self.__dict__['__params'][name]
| mit |
CodingCat/mxnet | example/kaggle-ndsb1/gen_img_list.py | 42 | 7000 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import csv
import os
import sys
import random
import numpy as np
import argparse
parser = argparse.ArgumentParser(description='generate train/test image list files form input directory. If training it will also split into tr and va sets.')
parser.add_argument('--image-folder', type=str, default="data/train/",
help='the input data directory')
parser.add_argument('--out-folder', type=str, default="data/",
help='the output folder')
parser.add_argument('--out-file', type=str, default="train.lst",
help='the output lst file')
parser.add_argument('--train', action='store_true',
help='if we are generating training list and hence we have to loop over subdirectories')
## These options are only used if we are doing training lst
parser.add_argument('--percent-val', type=float, default=0.25,
help='the percentage of training list to use as validation')
parser.add_argument('--stratified', action='store_true',
help='if True it will split train lst into tr and va sets using stratified sampling')
args = parser.parse_args()
random.seed(888)
fo_name=os.path.join(args.out_folder+args.out_file)
fo = csv.writer(open(fo_name, "w"), delimiter='\t', lineterminator='\n')
if args.train:
tr_fo_name=os.path.join(args.out_folder+"tr.lst")
va_fo_name=os.path.join(args.out_folder+"va.lst")
tr_fo = csv.writer(open(tr_fo_name, "w"), delimiter='\t', lineterminator='\n')
va_fo = csv.writer(open(va_fo_name, "w"), delimiter='\t', lineterminator='\n')
#check sampleSubmission.csv from kaggle website to view submission format
head = "acantharia_protist_big_center,acantharia_protist_halo,acantharia_protist,amphipods,appendicularian_fritillaridae,appendicularian_s_shape,appendicularian_slight_curve,appendicularian_straight,artifacts_edge,artifacts,chaetognath_non_sagitta,chaetognath_other,chaetognath_sagitta,chordate_type1,copepod_calanoid_eggs,copepod_calanoid_eucalanus,copepod_calanoid_flatheads,copepod_calanoid_frillyAntennae,copepod_calanoid_large_side_antennatucked,copepod_calanoid_large,copepod_calanoid_octomoms,copepod_calanoid_small_longantennae,copepod_calanoid,copepod_cyclopoid_copilia,copepod_cyclopoid_oithona_eggs,copepod_cyclopoid_oithona,copepod_other,crustacean_other,ctenophore_cestid,ctenophore_cydippid_no_tentacles,ctenophore_cydippid_tentacles,ctenophore_lobate,decapods,detritus_blob,detritus_filamentous,detritus_other,diatom_chain_string,diatom_chain_tube,echinoderm_larva_pluteus_brittlestar,echinoderm_larva_pluteus_early,echinoderm_larva_pluteus_typeC,echinoderm_larva_pluteus_urchin,echinoderm_larva_seastar_bipinnaria,echinoderm_larva_seastar_brachiolaria,echinoderm_seacucumber_auricularia_larva,echinopluteus,ephyra,euphausiids_young,euphausiids,fecal_pellet,fish_larvae_deep_body,fish_larvae_leptocephali,fish_larvae_medium_body,fish_larvae_myctophids,fish_larvae_thin_body,fish_larvae_very_thin_body,heteropod,hydromedusae_aglaura,hydromedusae_bell_and_tentacles,hydromedusae_h15,hydromedusae_haliscera_small_sideview,hydromedusae_haliscera,hydromedusae_liriope,hydromedusae_narco_dark,hydromedusae_narco_young,hydromedusae_narcomedusae,hydromedusae_other,hydromedusae_partial_dark,hydromedusae_shapeA_sideview_small,hydromedusae_shapeA,hydromedusae_shapeB,hydromedusae_sideview_big,hydromedusae_solmaris,hydromedusae_solmundella,hydromedusae_typeD_bell_and_tentacles,hydromedusae_typeD,hydromedusae_typeE,hydromedusae_typeF,invertebrate_larvae_other_A,invertebrate_larvae_other_B,jellies_tentacles,polychaete,protist_dark_center,protist_fuzzy_olive,protist_noctiluca,protist_other,protist_star,pteropod_butterfly,pteropod_theco_dev_seq,pteropod_triangle,radiolarian_chain,radiolarian_colony,shrimp_caridean,shrimp_sergestidae,shrimp_zoea,shrimp-like_other,siphonophore_calycophoran_abylidae,siphonophore_calycophoran_rocketship_adult,siphonophore_calycophoran_rocketship_young,siphonophore_calycophoran_sphaeronectes_stem,siphonophore_calycophoran_sphaeronectes_young,siphonophore_calycophoran_sphaeronectes,siphonophore_other_parts,siphonophore_partial,siphonophore_physonect_young,siphonophore_physonect,stomatopod,tornaria_acorn_worm_larvae,trichodesmium_bowtie,trichodesmium_multiple,trichodesmium_puff,trichodesmium_tuft,trochophore_larvae,tunicate_doliolid_nurse,tunicate_doliolid,tunicate_partial,tunicate_salp_chains,tunicate_salp,unknown_blobs_and_smudges,unknown_sticks,unknown_unclassified".split(',')
# make image list
img_lst = []
cnt = 0
if args.train:
for i in xrange(len(head)):
path = args.image_folder + head[i]
lst = os.listdir(args.image_folder + head[i])
for img in lst:
img_lst.append((cnt, i, path + '/' + img))
cnt += 1
else:
lst = os.listdir(args.image_folder)
for img in lst:
img_lst.append((cnt, 0, args.image_folder + img))
cnt += 1
# shuffle
random.shuffle(img_lst)
#write
for item in img_lst:
fo.writerow(item)
## If training, split into train and validation lists (tr.lst and va.lst)
## Optional stratified sampling
if args.train:
img_lst=np.array(img_lst)
if args.stratified:
from sklearn.cross_validation import StratifiedShuffleSplit
## Stratified sampling to generate train and validation sets
labels_train=img_lst[:,1]
# unique_train, counts_train = np.unique(labels_train, return_counts=True) # To have a look at the frecuency distribution
sss = StratifiedShuffleSplit(labels_train, 1, test_size=args.percent_val, random_state=0)
for tr_idx, va_idx in sss:
print("Train subset has ", len(tr_idx), " cases. Validation subset has ", len(va_idx), "cases")
else:
(nRows, nCols) = img_lst.shape
splitat=int(round(nRows*(1-args.percent_val),0))
tr_idx=range(0,splitat)
va_idx=range(splitat,nRows)
print("Train subset has ", len(tr_idx), " cases. Validation subset has ", len(va_idx), "cases")
tr_lst=img_lst[tr_idx,:].tolist()
va_lst=img_lst[va_idx,:].tolist()
for item in tr_lst:
tr_fo.writerow(item)
for item in va_lst:
va_fo.writerow(item)
| apache-2.0 |
h-mayorquin/mnist_dl_ann_project | code/utils.py | 37 | 5101 | """ This file contains different utility functions that are not connected
in anyway to the networks presented in the tutorials, but rather help in
processing the outputs into a more understandable way.
For example ``tile_raster_images`` helps in generating a easy to grasp
image from a set of samples or weights.
"""
import numpy
def scale_to_unit_interval(ndar, eps=1e-8):
""" Scales all values in the ndarray ndar to be between 0 and 1 """
ndar = ndar.copy()
ndar -= ndar.min()
ndar *= 1.0 / (ndar.max() + eps)
return ndar
def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),
scale_rows_to_unit_interval=True,
output_pixel_vals=True):
"""
Transform an array with one flattened image per row, into an array in
which images are reshaped and layed out like tiles on a floor.
This function is useful for visualizing datasets whose rows are images,
and also columns of matrices for transforming those rows
(such as the first layer of a neural net).
:type X: a 2-D ndarray or a tuple of 4 channels, elements of which can
be 2-D ndarrays or None;
:param X: a 2-D array in which every row is a flattened image.
:type img_shape: tuple; (height, width)
:param img_shape: the original shape of each image
:type tile_shape: tuple; (rows, cols)
:param tile_shape: the number of images to tile (rows, cols)
:param output_pixel_vals: if output should be pixel values (i.e. int8
values) or floats
:param scale_rows_to_unit_interval: if the values need to be scaled before
being plotted to [0,1] or not
:returns: array suitable for viewing as an image.
(See:`Image.fromarray`.)
:rtype: a 2-d array with same dtype as X.
"""
assert len(img_shape) == 2
assert len(tile_shape) == 2
assert len(tile_spacing) == 2
# The expression below can be re-written in a more C style as
# follows :
#
# out_shape = [0,0]
# out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -
# tile_spacing[0]
# out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -
# tile_spacing[1]
out_shape = [
(ishp + tsp) * tshp - tsp
for ishp, tshp, tsp in zip(img_shape, tile_shape, tile_spacing)
]
if isinstance(X, tuple):
assert len(X) == 4
# Create an output numpy ndarray to store the image
if output_pixel_vals:
out_array = numpy.zeros((out_shape[0], out_shape[1], 4),
dtype='uint8')
else:
out_array = numpy.zeros((out_shape[0], out_shape[1], 4),
dtype=X.dtype)
#colors default to 0, alpha defaults to 1 (opaque)
if output_pixel_vals:
channel_defaults = [0, 0, 0, 255]
else:
channel_defaults = [0., 0., 0., 1.]
for i in xrange(4):
if X[i] is None:
# if channel is None, fill it with zeros of the correct
# dtype
dt = out_array.dtype
if output_pixel_vals:
dt = 'uint8'
out_array[:, :, i] = numpy.zeros(
out_shape,
dtype=dt
) + channel_defaults[i]
else:
# use a recurrent call to compute the channel and store it
# in the output
out_array[:, :, i] = tile_raster_images(
X[i], img_shape, tile_shape, tile_spacing,
scale_rows_to_unit_interval, output_pixel_vals)
return out_array
else:
# if we are dealing with only one channel
H, W = img_shape
Hs, Ws = tile_spacing
# generate a matrix to store the output
dt = X.dtype
if output_pixel_vals:
dt = 'uint8'
out_array = numpy.zeros(out_shape, dtype=dt)
for tile_row in xrange(tile_shape[0]):
for tile_col in xrange(tile_shape[1]):
if tile_row * tile_shape[1] + tile_col < X.shape[0]:
this_x = X[tile_row * tile_shape[1] + tile_col]
if scale_rows_to_unit_interval:
# if we should scale values to be between 0 and 1
# do this by calling the `scale_to_unit_interval`
# function
this_img = scale_to_unit_interval(
this_x.reshape(img_shape))
else:
this_img = this_x.reshape(img_shape)
# add the slice to the corresponding position in the
# output array
c = 1
if output_pixel_vals:
c = 255
out_array[
tile_row * (H + Hs): tile_row * (H + Hs) + H,
tile_col * (W + Ws): tile_col * (W + Ws) + W
] = this_img * c
return out_array
| bsd-2-clause |
DonBeo/scikit-learn | sklearn/utils/setup.py | 294 | 2884 | import os
from os.path import join
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
import numpy
from numpy.distutils.misc_util import Configuration
config = Configuration('utils', parent_package, top_path)
config.add_subpackage('sparsetools')
cblas_libs, blas_info = get_blas_info()
cblas_compile_args = blas_info.pop('extra_compile_args', [])
cblas_includes = [join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])]
libraries = []
if os.name == 'posix':
libraries.append('m')
cblas_libs.append('m')
config.add_extension('sparsefuncs_fast', sources=['sparsefuncs_fast.c'],
libraries=libraries)
config.add_extension('arrayfuncs',
sources=['arrayfuncs.c'],
depends=[join('src', 'cholesky_delete.h')],
libraries=cblas_libs,
include_dirs=cblas_includes,
extra_compile_args=cblas_compile_args,
**blas_info
)
config.add_extension(
'murmurhash',
sources=['murmurhash.c', join('src', 'MurmurHash3.cpp')],
include_dirs=['src'])
config.add_extension('lgamma',
sources=['lgamma.c', join('src', 'gamma.c')],
include_dirs=['src'],
libraries=libraries)
config.add_extension('graph_shortest_path',
sources=['graph_shortest_path.c'],
include_dirs=[numpy.get_include()])
config.add_extension('fast_dict',
sources=['fast_dict.cpp'],
language="c++",
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension('seq_dataset',
sources=['seq_dataset.c'],
include_dirs=[numpy.get_include()])
config.add_extension('weight_vector',
sources=['weight_vector.c'],
include_dirs=cblas_includes,
libraries=cblas_libs,
**blas_info)
config.add_extension("_random",
sources=["_random.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
config.add_extension("_logistic_sigmoid",
sources=["_logistic_sigmoid.c"],
include_dirs=[numpy.get_include()],
libraries=libraries)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
tensorflow/tensorflow-experimental_link_static_libraries_once | tensorflow/python/framework/config.py | 3 | 45411 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for configuring TensorFlow execution."""
from typing import Union
from tensorflow.python.eager import context
from tensorflow.python.framework import errors
from tensorflow.python.util import _pywrap_determinism
from tensorflow.python.util import _pywrap_tensor_float_32_execution
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
@tf_export('config.experimental.tensor_float_32_execution_enabled')
def tensor_float_32_execution_enabled():
"""Returns whether TensorFloat-32 is enabled.
By default, TensorFloat-32 is enabled, but this can be changed with
`tf.config.experimental.enable_tensor_float_32_execution`.
Returns:
True if TensorFloat-32 is enabled (the default) and False otherwise
"""
return _pywrap_tensor_float_32_execution.is_enabled()
@tf_export('config.experimental.enable_tensor_float_32_execution')
def enable_tensor_float_32_execution(enabled):
"""Enable or disable the use of TensorFloat-32 on supported hardware.
[TensorFloat-32](https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format),
or TF32 for short, is a math mode for NVIDIA Ampere GPUs. TensorFloat-32
execution causes certain float32 ops, such as matrix multiplications and
convolutions, to run much faster on Ampere GPUs but with reduced precision.
This reduced precision should not impact convergence of deep learning models
in practice.
TensorFloat-32 is enabled by default. TensorFloat-32 is only supported on
Ampere GPUs, so all other hardware will use the full float32 precision
regardless of whether TensorFloat-32 is enabled or not. If you want to use the
full float32 precision on Ampere, you can disable TensorFloat-32 execution
with this function. For example:
```python
x = tf.fill((2, 2), 1.0001)
y = tf.fill((2, 2), 1.)
# TensorFloat-32 is enabled, so matmul is run with reduced precision
print(tf.linalg.matmul(x, y)) # [[2., 2.], [2., 2.]]
tf.config.experimental.enable_tensor_float_32_execution(False)
# Matmul is run with full precision
print(tf.linalg.matmul(x, y)) # [[2.0002, 2.0002], [2.0002, 2.0002]]
```
To check whether TensorFloat-32 execution is currently enabled, use
`tf.config.experimental.tensor_float_32_execution_enabled`.
If TensorFloat-32 is enabled, float32 inputs of supported ops, such as
`tf.linalg.matmul`, will be rounded from 23 bits of precision to 10 bits of
precision in most cases. This allows the ops to execute much faster by
utilizing the GPU's tensor cores. TensorFloat-32 has the same dynamic range as
float32, meaning it is no more likely to underflow or overflow than float32.
Ops still use float32 accumulation when TensorFloat-32 is enabled. Enabling or
disabling TensorFloat-32 only affects Ampere GPUs and subsequent GPUs that
support TensorFloat-32.
Note TensorFloat-32 is not always used in supported ops, as only inputs of
certain shapes are supported. Support for more input shapes and more ops may
be added in the future. As a result, precision of float32 ops may decrease in
minor versions of TensorFlow.
TensorFloat-32 is also used for some complex64 ops. Currently, TensorFloat-32
is used in fewer cases for complex64 as it is for float32.
Args:
enabled: Bool indicating whether to enable TensorFloat-32 execution.
"""
_pywrap_tensor_float_32_execution.enable(enabled)
@tf_export('config.threading.get_intra_op_parallelism_threads')
def get_intra_op_parallelism_threads():
"""Get number of threads used within an individual op for parallelism.
Certain operations like matrix multiplication and reductions can utilize
parallel threads for speed ups. A value of 0 means the system picks an
appropriate number.
Returns:
Number of parallel threads
"""
return context.context().intra_op_parallelism_threads
@tf_export('config.threading.set_intra_op_parallelism_threads')
def set_intra_op_parallelism_threads(num_threads):
"""Set number of threads used within an individual op for parallelism.
Certain operations like matrix multiplication and reductions can utilize
parallel threads for speed ups. A value of 0 means the system picks an
appropriate number.
Args:
num_threads: Number of parallel threads
"""
context.context().intra_op_parallelism_threads = num_threads
@tf_export('config.threading.get_inter_op_parallelism_threads')
def get_inter_op_parallelism_threads():
"""Get number of threads used for parallelism between independent operations.
Determines the number of threads used by independent non-blocking operations.
0 means the system picks an appropriate number.
Returns:
Number of parallel threads
"""
return context.context().inter_op_parallelism_threads
@tf_export('config.threading.set_inter_op_parallelism_threads')
def set_inter_op_parallelism_threads(num_threads):
"""Set number of threads used for parallelism between independent operations.
Determines the number of threads used by independent non-blocking operations.
0 means the system picks an appropriate number.
Args:
num_threads: Number of parallel threads
"""
context.context().inter_op_parallelism_threads = num_threads
@tf_export('config.optimizer.get_jit')
def get_optimizer_jit() -> str:
"""Returns JIT compilation configuration for code inside `tf.function`.
Possible return values:
-`"autoclustering"` if
[autoclustering](https://www.tensorflow.org/xla#auto-clustering) is enabled
- `""` when no default compilation is applied.
"""
if context.context().optimizer_jit:
return 'autoclustering'
return ''
@tf_export('config.optimizer.set_jit')
@deprecation.deprecated_arg_values(
None,
'`True` setting is deprecated, use `autoclustering` instead.',
warn_once=True,
jit_config=True)
def set_optimizer_jit(enabled: Union[bool, str]):
"""Configure JIT compilation.
Note: compilation is only applied to code that is compiled into a
graph (in TF2 that's only a code inside `tf.function`).
Args:
enabled: JIT compilation configuration.
Possible values:
- `"autoclustering"` (`True` is a deprecated alias): perform
[autoclustering](https://www.tensorflow.org/xla#auto-clustering)
(automatically identify and compile clusters of nodes) on all graphs
using
[XLA](https://www.tensorflow.org/xla).
- `False`: do not automatically compile any graphs.
"""
autoclustering_enabled = enabled in (True, 'autoclustering')
context.context().optimizer_jit = autoclustering_enabled
@tf_export('config.optimizer.get_experimental_options')
def get_optimizer_experimental_options():
"""Get experimental optimizer options.
Refer to tf.config.optimizer.set_experimental_options for a list of current
options.
Note that optimizations are only applied in graph mode, (within tf.function).
In addition, as these are experimental options, the list is subject to change.
Returns:
Dictionary of configured experimental optimizer options
"""
return context.context().get_optimizer_experimental_options()
@tf_export('config.optimizer.set_experimental_options')
def set_optimizer_experimental_options(options):
"""Set experimental optimizer options.
Note that optimizations are only applied in graph mode, (within tf.function).
In addition, as these are experimental options, the list is subject to change.
Args:
options: Dictionary of experimental optimizer options to configure.
Valid keys:
- layout_optimizer: Optimize tensor layouts e.g. This will try to use NCHW
layout on GPU which is faster.
- constant_folding: Fold constants Statically infer the value of tensors
when possible, and materialize the result using constants.
- shape_optimization: Simplify computations made on shapes.
- remapping: Remap subgraphs onto more efficient implementations.
- arithmetic_optimization: Simplify arithmetic ops with common
sub-expression elimination and arithmetic simplification.
- dependency_optimization: Control dependency optimizations. Remove
redundant control dependencies, which may enable other optimization.
This optimizer is also essential for pruning Identity and NoOp nodes.
- loop_optimization: Loop optimizations.
- function_optimization: Function optimizations and inlining.
- debug_stripper: Strips debug-related nodes from the graph.
- disable_model_pruning: Disable removal of unnecessary ops from the graph
- scoped_allocator_optimization: Try to allocate some independent Op
outputs contiguously in order to merge or eliminate downstream Ops.
- pin_to_host_optimization: Force small ops onto the CPU.
- implementation_selector: Enable the swap of kernel implementations based
on the device placement.
- auto_mixed_precision: Change certain float32 ops to float16 on Volta
GPUs and above. Without the use of loss scaling, this can cause
numerical underflow (see
`keras.mixed_precision.experimental.LossScaleOptimizer`).
- disable_meta_optimizer: Disable the entire meta optimizer.
- min_graph_nodes: The minimum number of nodes in a graph to optimizer.
For smaller graphs, optimization is skipped.
"""
context.context().set_optimizer_experimental_options(options)
@tf_export('config.get_soft_device_placement')
def get_soft_device_placement():
"""Return status of soft device placement flag.
If enabled, an op will be placed on CPU if any of the following are true
1. there's no GPU implementation for the OP
2. no GPU devices are known or registered
3. need to co-locate with reftype input(s) which are from CPU
If disabled, the placement is strict and CPU fallback is not allowed.
An error is raised when an Op cannot be placed onto its intended device.
Returns:
A boolean indicating if soft placement is enabled.
"""
return context.context().soft_device_placement
@tf_export('config.set_soft_device_placement')
def set_soft_device_placement(enabled):
"""Enable or disable soft device placement.
If enabled, an op will be placed on CPU if any of the following are true
1. there's no GPU implementation for the OP
2. no GPU devices are known or registered
3. need to co-locate with reftype input(s) which are from CPU
Note: by default soft device placement is enabled when running in eager mode
(for convenience) and disabled in graph mode (for performance).
Args:
enabled: A boolean indicating whether to enable soft placement.
"""
context.context().soft_device_placement = enabled
@tf_export('config.experimental.get_device_policy')
def get_device_policy():
"""Gets the current device policy.
The device policy controls how operations requiring inputs on a specific
device (e.g., on GPU:0) handle inputs on a different device (e.g. GPU:1).
This function only gets the device policy for the current thread. Any
subsequently started thread will again use the default policy.
Returns:
Current thread device policy
"""
device_policy = context.context().device_policy
if device_policy == context.DEVICE_PLACEMENT_SILENT:
return 'silent'
elif device_policy == context.DEVICE_PLACEMENT_SILENT_FOR_INT32:
return 'silent_for_int32'
elif device_policy == context.DEVICE_PLACEMENT_WARN:
return 'warn'
elif device_policy == context.DEVICE_PLACEMENT_EXPLICIT:
return 'explicit'
else:
# pylint: disable-next=no-value-for-parameter
raise errors.InternalError(
f'Got an invalid device policy: {device_policy!r}.')
@tf_export('config.experimental.set_device_policy')
def set_device_policy(device_policy):
"""Sets the current thread device policy.
The device policy controls how operations requiring inputs on a specific
device (e.g., on GPU:0) handle inputs on a different device (e.g. GPU:1).
When using the default, an appropriate policy will be picked automatically.
The default policy may change over time.
This function only sets the device policy for the current thread. Any
subsequently started thread will again use the default policy.
Args:
device_policy: A device policy.
Valid values:
- None: Switch to a system default.
- 'warn': Copies the tensors which are not on the right device and logs a
warning.
- 'explicit': Raises an error if the placement is not as required.
- 'silent': Silently copies the tensors. Note that this may hide
performance problems as there is no notification provided when
operations are blocked on the tensor being copied between devices.
- 'silent_for_int32': silently copies `int32` tensors, raising errors on
the other ones.
Raises:
ValueError: If an invalid `device_policy` is passed.
"""
if device_policy == 'silent':
context.context().device_policy = context.DEVICE_PLACEMENT_SILENT
elif device_policy == 'silent_for_int32':
context.context().device_policy = context.DEVICE_PLACEMENT_SILENT_FOR_INT32
elif device_policy == 'warn':
context.context().device_policy = context.DEVICE_PLACEMENT_WARN
elif device_policy == 'explicit':
context.context().device_policy = context.DEVICE_PLACEMENT_EXPLICIT
elif device_policy is None:
context.context().device_policy = None
else:
raise ValueError(
f'Invalid argument `device_policy`: {device_policy!r}. Please refer to '
'https://www.tensorflow.org/api_docs/python/tf/config/experimental/set_device_policy '
'for valid `device_policy` arguments.')
@tf_export('config.experimental.get_synchronous_execution')
def get_synchronous_execution():
"""Gets whether operations are executed synchronously or asynchronously.
TensorFlow can execute operations synchronously or asynchronously. If
asynchronous execution is enabled, operations may return "non-ready" handles.
Returns:
Current thread execution mode
"""
return context.context().execution_mode == context.SYNC
@tf_export('config.experimental.set_synchronous_execution')
def set_synchronous_execution(enable):
"""Specifies whether operations are executed synchronously or asynchronously.
TensorFlow can execute operations synchronously or asynchronously. If
asynchronous execution is enabled, operations may return "non-ready" handles.
When `enable` is set to None, an appropriate value will be picked
automatically. The value picked may change between TensorFlow releases.
Args:
enable: Whether operations should be dispatched synchronously.
Valid values:
- None: sets the system default.
- True: executes each operation synchronously.
- False: executes each operation asynchronously.
"""
if enable is None:
context.context().execution_mode = None
elif enable:
context.context().execution_mode = context.SYNC
else:
context.context().execution_mode = context.ASYNC
@tf_export('config.list_physical_devices',
'config.experimental.list_physical_devices')
@deprecation.deprecated_endpoints('config.experimental.list_physical_devices')
def list_physical_devices(device_type=None):
"""Return a list of physical devices visible to the host runtime.
Physical devices are hardware devices present on the host machine. By default
all discovered CPU and GPU devices are considered visible.
This API allows querying the physical hardware resources prior to runtime
initialization. Thus, giving an opportunity to call any additional
configuration APIs. This is in contrast to `tf.config.list_logical_devices`,
which triggers runtime initialization in order to list the configured devices.
The following example lists the number of visible GPUs on the host.
>>> physical_devices = tf.config.list_physical_devices('GPU')
>>> print("Num GPUs:", len(physical_devices))
Num GPUs: ...
However, the number of GPUs available to the runtime may change during runtime
initialization due to marking certain devices as not visible or configuring
multiple logical devices.
Args:
device_type: (optional string) Only include devices matching this device
type. For example "CPU" or "GPU".
Returns:
List of discovered `tf.config.PhysicalDevice` objects
"""
return context.context().list_physical_devices(device_type)
@tf_export('config.list_logical_devices',
'config.experimental.list_logical_devices')
@deprecation.deprecated_endpoints('config.experimental.list_logical_devices')
def list_logical_devices(device_type=None):
"""Return a list of logical devices created by runtime.
Logical devices may correspond to physical devices or remote devices in the
cluster. Operations and tensors may be placed on these devices by using the
`name` of the `tf.config.LogicalDevice`.
Calling `tf.config.list_logical_devices` triggers the runtime to configure any
`tf.config.PhysicalDevice` visible to the runtime, thereby preventing
further configuration. To avoid runtime initialization, call
`tf.config.list_physical_devices` instead.
For example:
>>> logical_devices = tf.config.list_logical_devices('GPU')
>>> if len(logical_devices) > 0:
... # Allocate on GPU:0
... with tf.device(logical_devices[0].name):
... one = tf.constant(1)
... # Allocate on GPU:1
... with tf.device(logical_devices[1].name):
... two = tf.constant(2)
Args:
device_type: (optional string) Only include devices matching this device
type. For example "CPU" or "GPU".
Returns:
List of initialized `LogicalDevice`s
"""
return context.context().list_logical_devices(device_type=device_type)
@tf_export('config.get_visible_devices',
'config.experimental.get_visible_devices')
@deprecation.deprecated_endpoints('config.experimental.get_visible_devices')
def get_visible_devices(device_type=None):
"""Get the list of visible physical devices.
Returns the list of `PhysicalDevice`s currently marked as visible to the
runtime. A visible device will have at least one `LogicalDevice` associated
with it once the runtime is initialized.
The following example verifies all visible GPUs have been disabled:
>>> physical_devices = tf.config.list_physical_devices('GPU')
>>> try:
... # Disable all GPUS
... tf.config.set_visible_devices([], 'GPU')
... visible_devices = tf.config.get_visible_devices()
... for device in visible_devices:
... assert device.device_type != 'GPU'
... except:
... # Invalid device or cannot modify virtual devices once initialized.
... pass
Args:
device_type: (optional string) Only include devices matching this device
type. For example "CPU" or "GPU".
Returns:
List of visible `PhysicalDevice`s
"""
return context.context().get_visible_devices(device_type)
@tf_export('config.set_visible_devices',
'config.experimental.set_visible_devices')
@deprecation.deprecated_endpoints('config.experimental.set_visible_devices')
def set_visible_devices(devices, device_type=None):
"""Set the list of visible devices.
Specifies which `PhysicalDevice` objects are visible to the runtime.
TensorFlow will only allocate memory and place operations on visible
physical devices, as otherwise no `LogicalDevice` will be created on them.
By default all discovered devices are marked as visible.
The following example demonstrates disabling the first GPU on the machine.
>>> physical_devices = tf.config.list_physical_devices('GPU')
>>> try:
... # Disable first GPU
... tf.config.set_visible_devices(physical_devices[1:], 'GPU')
... logical_devices = tf.config.list_logical_devices('GPU')
... # Logical device was not created for first GPU
... assert len(logical_devices) == len(physical_devices) - 1
... except:
... # Invalid device or cannot modify virtual devices once initialized.
... pass
Args:
devices: List of `PhysicalDevice`s to make visible
device_type: (optional) Only configure devices matching this device type.
For example "CPU" or "GPU". Other devices will be left unaltered.
Raises:
ValueError: If argument validation fails.
RuntimeError: Runtime is already initialized.
"""
context.context().set_visible_devices(devices, device_type)
# TODO(b/188089869): Redesign memory stats related APIs before move them out of
# experimental.
@tf_export('config.experimental.get_memory_info')
def get_memory_info(device):
"""Get memory info for the chosen device, as a dict.
This function returns a dict containing information about the device's memory
usage. For example:
>>> if tf.config.list_physical_devices('GPU'):
... # Returns a dict in the form {'current': <current mem usage>,
... # 'peak': <peak mem usage>}
... tf.config.experimental.get_memory_info('GPU:0')
Currently returns the following keys:
- `'current'`: The current memory used by the device, in bytes.
- `'peak'`: The peak memory used by the device across the run of the
program, in bytes. Can be reset with
`tf.config.experimental.reset_memory_stats`.
More keys may be added in the future, including device-specific keys.
Currently only supports GPU and TPU. If called on a CPU device, an exception
will be raised.
For GPUs, TensorFlow will allocate all the memory by default, unless changed
with `tf.config.experimental.set_memory_growth`. The dict specifies only the
current and peak memory that TensorFlow is actually using, not the memory that
TensorFlow has allocated on the GPU.
Args:
device: Device string to get the memory information for, e.g. `"GPU:0"`,
`"TPU:0"`. See https://www.tensorflow.org/api_docs/python/tf/device for
specifying device strings.
Returns:
A dict with keys `'current'` and `'peak'`, specifying the current and peak
memory usage respectively.
Raises:
ValueError: No device found with the device name, like '"nonexistent"'.
ValueError: Invalid device name, like '"GPU"', '"CPU:GPU"', '"CPU:"'.
ValueError: Multiple devices matched with the device name.
ValueError: Memory statistics not tracked, like '"CPU:0"'.
"""
return context.context().get_memory_info(device)
# TODO(b/188089869): Redesign memory stats related APIs before move them out of
# experimental.
# TODO(b/189498350): Unify the behavior on CPU, GPU and TPU.
@tf_export('config.experimental.reset_memory_stats')
def reset_memory_stats(device):
"""Resets the tracked memory stats for the chosen device.
This function sets the tracked peak memory for a device to the device's
current memory usage. This allows you to measure the peak memory usage for a
specific part of your program. For example:
>>> if tf.config.list_physical_devices('GPU'):
... # Sets the peak memory to the current memory.
... tf.config.experimental.reset_memory_stats('GPU:0')
... # Creates the first peak memory usage.
... x1 = tf.ones(1000 * 1000, dtype=tf.float64)
... del x1 # Frees the memory referenced by `x1`.
... peak1 = tf.config.experimental.get_memory_info('GPU:0')['peak']
... # Sets the peak memory to the current memory again.
... tf.config.experimental.reset_memory_stats('GPU:0')
... # Creates the second peak memory usage.
... x2 = tf.ones(1000 * 1000, dtype=tf.float32)
... del x2
... peak2 = tf.config.experimental.get_memory_info('GPU:0')['peak']
... assert peak2 < peak1 # tf.float32 consumes less memory than tf.float64.
Currently only supports GPU and TPU. If called on a CPU device, an exception
will be raised.
Args:
device: Device string to reset the memory stats, e.g. `"GPU:0"`, `"TPU:0"`.
See https://www.tensorflow.org/api_docs/python/tf/device for specifying
device strings.
Raises:
ValueError: No device found with the device name, like '"nonexistent"'.
ValueError: Invalid device name, like '"GPU"', '"CPU:GPU"', '"CPU:"'.
ValueError: Multiple devices matched with the device name.
ValueError: Memory statistics not tracked or clearing memory statistics not
supported, like '"CPU:0"'.
"""
context.context().reset_memory_stats(device)
@deprecation.deprecated(
None,
"Use tf.config.experimental.get_memory_info(device)['current'] instead.")
@tf_export('config.experimental.get_memory_usage')
def get_memory_usage(device):
"""Get the current memory usage, in bytes, for the chosen device.
This function is deprecated in favor of
`tf.config.experimental.get_memory_info`. Calling this function is equivalent
to calling `tf.config.experimental.get_memory_info()['current']`.
See https://www.tensorflow.org/api_docs/python/tf/device for specifying device
strings.
For example:
>>> gpu_devices = tf.config.list_physical_devices('GPU')
>>> if gpu_devices:
... tf.config.experimental.get_memory_usage('GPU:0')
Does not work for CPU.
For GPUs, TensorFlow will allocate all the memory by default, unless changed
with `tf.config.experimental.set_memory_growth`. This function only returns
the memory that TensorFlow is actually using, not the memory that TensorFlow
has allocated on the GPU.
Args:
device: Device string to get the bytes in use for, e.g. `"GPU:0"`
Returns:
Total memory usage in bytes.
Raises:
ValueError: Non-existent or CPU device specified.
"""
return get_memory_info(device)['current']
@tf_export('config.experimental.get_memory_growth')
def get_memory_growth(device):
"""Get if memory growth is enabled for a `PhysicalDevice`.
If memory growth is enabled for a `PhysicalDevice`, the runtime initialization
will not allocate all memory on the device.
For example:
>>> physical_devices = tf.config.list_physical_devices('GPU')
>>> try:
... tf.config.experimental.set_memory_growth(physical_devices[0], True)
... assert tf.config.experimental.get_memory_growth(physical_devices[0])
... except:
... # Invalid device or cannot modify virtual devices once initialized.
... pass
Args:
device: `PhysicalDevice` to query
Returns:
A boolean indicating the memory growth setting for the `PhysicalDevice`.
Raises:
ValueError: Invalid `PhysicalDevice` specified.
"""
return context.context().get_memory_growth(device)
@tf_export('config.experimental.set_memory_growth')
def set_memory_growth(device, enable):
"""Set if memory growth should be enabled for a `PhysicalDevice`.
If memory growth is enabled for a `PhysicalDevice`, the runtime initialization
will not allocate all memory on the device. Memory growth cannot be configured
on a `PhysicalDevice` with virtual devices configured.
For example:
>>> physical_devices = tf.config.list_physical_devices('GPU')
>>> try:
... tf.config.experimental.set_memory_growth(physical_devices[0], True)
... except:
... # Invalid device or cannot modify virtual devices once initialized.
... pass
Args:
device: `PhysicalDevice` to configure
enable: (Boolean) Whether to enable or disable memory growth
Raises:
ValueError: Invalid `PhysicalDevice` specified.
RuntimeError: Runtime is already initialized.
"""
context.context().set_memory_growth(device, enable)
@tf_export('config.experimental.get_device_details')
def get_device_details(device):
"""Returns details about a physical devices.
This API takes in a `tf.config.PhysicalDevice` returned by
`tf.config.list_physical_devices`. It returns a dict with string keys
containing various details about the device. Each key is only supported by a
subset of devices, so you should not assume the returned dict will have any
particular key.
>>> gpu_devices = tf.config.list_physical_devices('GPU')
>>> if gpu_devices:
... details = tf.config.experimental.get_device_details(gpu_devices[0])
... details.get('device_name', 'Unknown GPU')
Currently, details are only returned for GPUs. This function returns an
empty dict if passed a non-GPU device.
The returned dict may have the following keys:
* `'device_name'`: A human-readable name of the device as a string, e.g.
"Titan V". Unlike `tf.config.PhysicalDevice.name`, this will be the same for
multiple devices if each device is the same model. Currently only available
for GPUs.
* `'compute_capability'`: The
[compute capability](https://developer.nvidia.com/cuda-gpus) of the device
as a tuple of two ints, in the form `(major_version, minor_version)`. Only
available for NVIDIA GPUs
Note: This is similar to `tf.sysconfig.get_build_info` in that both functions
can return information relating to GPUs. However, this function returns
run-time information about a specific device (such as a GPU's compute
capability), while `tf.sysconfig.get_build_info` returns compile-time
information about how TensorFlow was built (such as what version of CUDA
TensorFlow was built for).
Args:
device: A `tf.config.PhysicalDevice` returned by
`tf.config.list_physical_devices` or `tf.config.get_visible_devices`.
Returns:
A dict with string keys.
"""
return context.context().get_device_details(device)
@tf_export('config.get_logical_device_configuration',
'config.experimental.get_virtual_device_configuration')
@deprecation.deprecated_endpoints(
'config.experimental.get_virtual_device_configuration')
def get_logical_device_configuration(device):
"""Get the virtual device configuration for a `tf.config.PhysicalDevice`.
Returns the list of `tf.config.LogicalDeviceConfiguration`
objects previously configured by a call to
`tf.config.set_logical_device_configuration`.
For example:
>>> physical_devices = tf.config.list_physical_devices('CPU')
>>> assert len(physical_devices) == 1, "No CPUs found"
>>> configs = tf.config.get_logical_device_configuration(
... physical_devices[0])
>>> try:
... assert configs is None
... tf.config.set_logical_device_configuration(
... physical_devices[0],
... [tf.config.LogicalDeviceConfiguration(),
... tf.config.LogicalDeviceConfiguration()])
... configs = tf.config.get_logical_device_configuration(
... physical_devices[0])
... assert len(configs) == 2
... except:
... # Cannot modify virtual devices once initialized.
... pass
Args:
device: `PhysicalDevice` to query
Returns:
List of `tf.config.LogicalDeviceConfiguration` objects or
`None` if no virtual device configuration has been set for this physical
device.
"""
return context.context().get_logical_device_configuration(device)
@tf_export('config.set_logical_device_configuration',
'config.experimental.set_virtual_device_configuration')
@deprecation.deprecated_endpoints(
'config.experimental.set_virtual_device_configuration')
def set_logical_device_configuration(device, logical_devices):
"""Set the logical device configuration for a `tf.config.PhysicalDevice`.
A visible `tf.config.PhysicalDevice` will by default have a single
`tf.config.LogicalDevice` associated with it once the runtime is initialized.
Specifying a list of `tf.config.LogicalDeviceConfiguration` objects allows
multiple devices to be created on the same `tf.config.PhysicalDevice`.
Logical device configurations can be modified by calling this function as
long as the runtime is uninitialized. After the runtime is initialized
calling this function raises a RuntimeError.
The following example splits the CPU into 2 logical devices:
>>> physical_devices = tf.config.list_physical_devices('CPU')
>>> assert len(physical_devices) == 1, "No CPUs found"
>>> # Specify 2 virtual CPUs. Note currently memory limit is not supported.
>>> try:
... tf.config.set_logical_device_configuration(
... physical_devices[0],
... [tf.config.LogicalDeviceConfiguration(),
... tf.config.LogicalDeviceConfiguration()])
... logical_devices = tf.config.list_logical_devices('CPU')
... assert len(logical_devices) == 2
...
... tf.config.set_logical_device_configuration(
... physical_devices[0],
... [tf.config.LogicalDeviceConfiguration(),
... tf.config.LogicalDeviceConfiguration(),
... tf.config.LogicalDeviceConfiguration(),
... tf.config.LogicalDeviceConfiguration()])
... except:
... # Cannot modify logical devices once initialized.
... pass
The following example splits the GPU into 2 logical devices with 100 MB each:
>>> physical_devices = tf.config.list_physical_devices('GPU')
>>> try:
... tf.config.set_logical_device_configuration(
... physical_devices[0],
... [tf.config.LogicalDeviceConfiguration(memory_limit=100),
... tf.config.LogicalDeviceConfiguration(memory_limit=100)])
...
... logical_devices = tf.config.list_logical_devices('GPU')
... assert len(logical_devices) == len(physical_devices) + 1
...
... tf.config.set_logical_device_configuration(
... physical_devices[0],
... [tf.config.LogicalDeviceConfiguration(memory_limit=10),
... tf.config.LogicalDeviceConfiguration(memory_limit=10)])
... except:
... # Invalid device or cannot modify logical devices once initialized.
... pass
Args:
device: The `PhysicalDevice` to configure.
logical_devices: (optional) List of `tf.config.LogicalDeviceConfiguration`
objects to allocate for the specified `PhysicalDevice`. If None, the
default configuration will be used.
Raises:
ValueError: If argument validation fails.
RuntimeError: Runtime is already initialized.
"""
context.context().set_logical_device_configuration(device, logical_devices)
@tf_export('config.experimental.enable_mlir_bridge')
def enable_mlir_bridge():
"""Enables experimental MLIR-Based TensorFlow Compiler Bridge.
DO NOT USE, DEV AND TESTING ONLY AT THE MOMENT.
NOTE: MLIR-Based TensorFlow Compiler is under active development and has
missing features, please refrain from using. This API exists for development
and testing only.
TensorFlow Compiler Bridge (TF Bridge) is responsible for translating parts
of TensorFlow graph into a form that can be accepted as an input by a backend
compiler such as XLA.
"""
context.context().enable_mlir_bridge = True
@tf_export('config.experimental.enable_mlir_graph_optimization')
def enable_mlir_graph_optimization():
"""Enables experimental MLIR-Based TensorFlow Compiler Optimizations.
DO NOT USE, DEV AND TESTING ONLY AT THE MOMENT.
NOTE: MLIR-Based TensorFlow Compiler is under active development and has
missing features, please refrain from using. This API exists for development
and testing only.
TensorFlow Compiler Optimizations are responsible general graph level
optimizations that in the current stack mostly done by Grappler graph
optimizers.
"""
context.context().enable_mlir_graph_optimization = True
@tf_export('config.experimental.disable_mlir_bridge')
def disable_mlir_bridge():
"""Disables experimental MLIR-Based TensorFlow Compiler Bridge."""
context.context().enable_mlir_bridge = False
@tf_export('config.experimental.disable_mlir_graph_optimization')
def disable_mlir_graph_optimization():
"""Disables experimental MLIR-Based TensorFlow Compiler Optimizations."""
context.context().enable_mlir_graph_optimization = False
@tf_export('config.experimental.enable_op_determinism', v1=[])
def enable_op_determinism():
"""Configures TensorFlow ops to run deterministically.
When op determinism is enabled, TensorFlow ops will be deterministic. This
means that if an op is run multiple times with the same inputs on the same
hardware, it will have the exact same outputs each time. This is useful for
debugging models. Note that determinism in general comes at the expense of
lower performance and so your model may run slower when op determinism is
enabled.
If you want your TensorFlow program to run deterministically, put the
following code near the start of your program.
```python
tf.keras.utils.set_random_seed(1)
tf.config.experimental.enable_op_determinism()
```
Calling `tf.keras.utils.set_random_seed` sets the Python seed, the NumPy seed,
and the TensorFlow seed. Setting these seeds is necessary to ensure any random
numbers your program generates are also deterministic.
By default, op determinism is not enabled, so ops might return different
results when run with the same inputs. These differences are often caused by
the use of asynchronous threads within the op nondeterministically changing
the order in which floating-point numbers are added. Most of these cases of
nondeterminism occur on GPUs, which have thousands of hardware threads that
are used to run ops. Enabling determinism directs such ops to use a different
algorithm, one that does not use threads in a nondeterministic way.
Another potential source of nondeterminism is `tf.data` based data processing.
Typically, this can introduce nondeterminsm due to the use of parallelism in
methods such as `Dataset.map` producing inputs or running stateful ops in a
nondeterministic order. Enabling determinism will remove such sources of
nondeterminism.
Enabling determinism will likely make your model or your `tf.data` data
processing slower. For example, `Dataset.map` can become several orders of
magnitude slower when the map function has random ops or other stateful ops.
See the “Determinism and tf.data” section below for more details. In future
TensorFlow releases, we plan on improving the performance of determinism,
especially for common scenarios such as `Dataset.map`.
Certain ops will raise an `UnimplementedError` because they do not yet have a
deterministic implementation. Additionally, due to bugs, some ops might be
nondeterministic and not raise an `UnimplementedError`. If you encounter such
ops, please [file an issue](https://github.com/tensorflow/tensorflow/issues).
An example of enabling determinism follows. The
`tf.nn.softmax_cross_entropy_with_logits` op is run multiple times and the
output is shown to be the same each time. This example would likely fail when
run on a GPU if determinism were not enabled, because
`tf.nn.softmax_cross_entropy_with_logits` uses a nondeterministic algorithm on
GPUs by default.
```python
labels = tf.random.normal((1, 10000))
logits = tf.random.normal((1, 10000))
output = tf.nn.softmax_cross_entropy_with_logits(labels=labels,
logits=logits)
for _ in range(5):
output2 = tf.nn.softmax_cross_entropy_with_logits(labels=labels,
logits=logits)
tf.debugging.assert_equal(output, output2)
```
## Writing deterministic models
You can make your models deterministic by enabling op determinism. This
means that you can train a model and finish each run with exactly the same
trainable variables. This also means that the inferences of your
previously-trained model will be exactly the same on each run. Typically,
models can be made deterministic by simply setting the seeds and enabling
op determinism, as in the example above. However, to guarantee that your
model operates deterministically, you must meet all the following
requirements:
* Call `tf.config.experimental.enable_op_determinism()`, as mentioned above.
* Reproducibly reset any pseudorandom number generators (PRNGs) you’re using,
such as by setting the seeds for the default PRNGs in TensorFlow, Python,
and NumPy, as mentioned above. Note that certain newer NumPy classes like
` numpy.random.default_rng` ignore the global NumPy seed, so a seed must be
explicitly passed to such classes, if used.
* Use the same hardware configuration in every run.
* Use the same software environment in every run (OS, checkpoints, version of
CUDA and TensorFlow, environmental variables, etc). Note that determinism is
not guaranteed across different versions of TensorFlow.
* Do not use constructs outside TensorFlow that are nondeterministic, such as
reading from `/dev/random` or using multiple threads/processes in ways that
influence TensorFlow’s behavior.
* Ensure your input pipeline is deterministic. If you use `tf.data`, this is
done automatically (at the expense of performance). See "Determinism and
tf.data" below for more information.
* Do not use `tf.compat.v1.Session` and
`tf.distribute.experimental.ParameterServerStrategy`, which can introduce
nondeterminism. Besides ops (including `tf.data` ops), these are the only
known potential sources of nondeterminism within TensorFlow, (if you
find more, please file an issue). Note that `tf.compat.v1.Session` is
required to use the TF1 API, so determinism cannot be guaranteed when using
the TF1 API.
* Do not use nondeterministic custom ops.
## Additional details on determinism
For stateful ops to be deterministic, the state of the system must be the same
every time the op is run. For example the output of `tf.Variable.sparse_read`
(obviously) depends on both the variable value and the `indices` function
parameter. When determinism is enabled, the side effects of stateful ops are
deterministic.
TensorFlow’s random ops, such as `tf.random.normal`, will raise a
`RuntimeError` if determinism is enabled and a seed has not been set. However,
attempting to generate nondeterministic random numbers using Python or NumPy
will not raise such errors. Make sure you remember to set the Python and NumPy
seeds. Calling `tf.keras.utils.set_random_seed` is an easy way to set all
three seeds.
Note that latency, memory consumption, throughput, and other performance
characteristics are *not* made deterministic by enabling op determinism.
Only op outputs and side effects are made deterministic. Additionally, a model
may nondeterministically raise a `tf.errors.ResourceExhaustedError` from a
lack of memory due to the fact that memory consumption is nondeterministic.
## Determinism and tf.data
Enabling deterministic ops makes `tf.data` deterministic in several ways:
1. For dataset methods with a `deterministic` argument, such as `Dataset.map`
and `Dataset.batch`, the `deterministic` argument is overridden to be
`True` irrespective of its setting.
2. The `tf.data.Option.experimental_deterministic` option is overridden to be
`True` irrespective of its setting..
3. In `Dataset.map` and `Dataset.interleave`, if the map or interleave
function has stateful random ops or other stateful ops, the function will
run serially instead of in parallel. This means the `num_parallel_calls`
argument to `map` and `interleave` is effectively ignored.
4. Prefetching with `Dataset.prefetch` will be disabled if any function run
as part of the input pipeline has certain stateful ops. Similarly, any
dataset method with a `num_parallel_calls` argument will be made to run
serially if any function in the input pipeline has such stateful ops.
Legacy random ops such as `tf.random.normal` will *not* cause such datasets
to be changed, but most other stateful ops will.
Unfortunately, due to (3), performance can be greatly reduced when stateful
ops are used in `Dataset.map` due to no longer running the map function in
parallel. A common example of stateful ops used in `Dataset.map` are random
ops, such as `tf.random.normal`, which are typically used for distortions. One
way to work around this is to use stateless random ops instead. Alternatively
you can hoist all random ops into its own separate `Dataset.map` call, making
the original `Dataset.map` call stateless and thus avoid the need to serialize
its execution.
(4) can also cause performance to be reduced, but occurs less frequently than
(3) because legacy random ops do not cause (4) to take effect. However, unlike
(3), when there are non-random stateful ops in a user-defined function, every
`map` and `interleave` dataset is affected, instead of just the `map` or
`interleave` dataset with the function that has stateful ops. Additionally,
`prefetch` datasets and any dataset with the `num_parallel_calls` argument are
also affected.
"""
_pywrap_determinism.enable(True)
def disable_op_determinism():
"""Disables op determinism."""
_pywrap_determinism.enable(False)
def is_op_determinism_enabled():
"""Returns True if op determinism is enabled."""
return _pywrap_determinism.is_enabled()
| apache-2.0 |
heli522/scikit-learn | examples/linear_model/plot_lasso_model_selection.py | 308 | 5431 | """
===================================================
Lasso model selection: Cross-Validation / AIC / BIC
===================================================
Use the Akaike information criterion (AIC), the Bayes Information
criterion (BIC) and cross-validation to select an optimal value
of the regularization parameter alpha of the :ref:`lasso` estimator.
Results obtained with LassoLarsIC are based on AIC/BIC criteria.
Information-criterion based model selection is very fast, but it
relies on a proper estimation of degrees of freedom, are
derived for large samples (asymptotic results) and assume the model
is correct, i.e. that the data are actually generated by this model.
They also tend to break when the problem is badly conditioned
(more features than samples).
For cross-validation, we use 20-fold with 2 algorithms to compute the
Lasso path: coordinate descent, as implemented by the LassoCV class, and
Lars (least angle regression) as implemented by the LassoLarsCV class.
Both algorithms give roughly the same results. They differ with regards
to their execution speed and sources of numerical errors.
Lars computes a path solution only for each kink in the path. As a
result, it is very efficient when there are only of few kinks, which is
the case if there are few features or samples. Also, it is able to
compute the full path without setting any meta parameter. On the
opposite, coordinate descent compute the path points on a pre-specified
grid (here we use the default). Thus it is more efficient if the number
of grid points is smaller than the number of kinks in the path. Such a
strategy can be interesting if the number of features is really large
and there are enough samples to select a large amount. In terms of
numerical errors, for heavily correlated variables, Lars will accumulate
more errors, while the coordinate descent algorithm will only sample the
path on a grid.
Note how the optimal value of alpha varies for each fold. This
illustrates why nested-cross validation is necessary when trying to
evaluate the performance of a method for which a parameter is chosen by
cross-validation: this choice of parameter may not be optimal for unseen
data.
"""
print(__doc__)
# Author: Olivier Grisel, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LassoCV, LassoLarsCV, LassoLarsIC
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
rng = np.random.RandomState(42)
X = np.c_[X, rng.randn(X.shape[0], 14)] # add some bad features
# normalize data as done by Lars to allow for comparison
X /= np.sqrt(np.sum(X ** 2, axis=0))
##############################################################################
# LassoLarsIC: least angle regression with BIC/AIC criterion
model_bic = LassoLarsIC(criterion='bic')
t1 = time.time()
model_bic.fit(X, y)
t_bic = time.time() - t1
alpha_bic_ = model_bic.alpha_
model_aic = LassoLarsIC(criterion='aic')
model_aic.fit(X, y)
alpha_aic_ = model_aic.alpha_
def plot_ic_criterion(model, name, color):
alpha_ = model.alpha_
alphas_ = model.alphas_
criterion_ = model.criterion_
plt.plot(-np.log10(alphas_), criterion_, '--', color=color,
linewidth=3, label='%s criterion' % name)
plt.axvline(-np.log10(alpha_), color=color, linewidth=3,
label='alpha: %s estimate' % name)
plt.xlabel('-log(alpha)')
plt.ylabel('criterion')
plt.figure()
plot_ic_criterion(model_aic, 'AIC', 'b')
plot_ic_criterion(model_bic, 'BIC', 'r')
plt.legend()
plt.title('Information-criterion for model selection (training time %.3fs)'
% t_bic)
##############################################################################
# LassoCV: coordinate descent
# Compute paths
print("Computing regularization path using the coordinate descent lasso...")
t1 = time.time()
model = LassoCV(cv=20).fit(X, y)
t_lasso_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.alphas_)
plt.figure()
ymin, ymax = 2300, 3800
plt.plot(m_log_alphas, model.mse_path_, ':')
plt.plot(m_log_alphas, model.mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha: CV estimate')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: coordinate descent '
'(train time: %.2fs)' % t_lasso_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
##############################################################################
# LassoLarsCV: least angle regression
# Compute paths
print("Computing regularization path using the Lars lasso...")
t1 = time.time()
model = LassoLarsCV(cv=20).fit(X, y)
t_lasso_lars_cv = time.time() - t1
# Display results
m_log_alphas = -np.log10(model.cv_alphas_)
plt.figure()
plt.plot(m_log_alphas, model.cv_mse_path_, ':')
plt.plot(m_log_alphas, model.cv_mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='alpha CV')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold: Lars (train time: %.2fs)'
% t_lasso_lars_cv)
plt.axis('tight')
plt.ylim(ymin, ymax)
plt.show()
| bsd-3-clause |
DonBeo/scikit-learn | sklearn/mixture/tests/test_dpgmm.py | 12 | 2594 | import unittest
import nose
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less
from sklearn.mixture.tests.test_gmm import GMMTester
np.seterr(all='warn')
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
assert np.allclose(v, log_normalize(a), rtol=0.01)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
if __name__ == '__main__':
nose.runmodule()
| bsd-3-clause |
ChadFulton/statsmodels | examples/incomplete/wls_extended.py | 1 | 16137 | """
Weighted Least Squares
example is extended to look at the meaning of rsquared in WLS,
at outliers, compares with RLM and a short bootstrap
"""
from __future__ import print_function
import numpy as np
import statsmodels.api as sm
import matplotlib.pyplot as plt
data = sm.datasets.ccard.load()
data.exog = sm.add_constant(data.exog, prepend=False)
ols_fit = sm.OLS(data.endog, data.exog).fit()
# perhaps the residuals from this fit depend on the square of income
incomesq = data.exog[:,2]
plt.scatter(incomesq, ols_fit.resid)
#@savefig wls_resid_check.png
plt.grid()
# If we think that the variance is proportional to income**2
# we would want to weight the regression by income
# the weights argument in WLS weights the regression by its square root
# and since income enters the equation, if we have income/income
# it becomes the constant, so we would want to perform
# this type of regression without an explicit constant in the design
#..data.exog = data.exog[:,:-1]
wls_fit = sm.WLS(data.endog, data.exog[:,:-1], weights=1/incomesq).fit()
# This however, leads to difficulties in interpreting the post-estimation
# statistics. Statsmodels does not yet handle this elegantly, but
# the following may be more appropriate
# explained sum of squares
ess = wls_fit.uncentered_tss - wls_fit.ssr
# rsquared
rsquared = ess/wls_fit.uncentered_tss
# mean squared error of the model
mse_model = ess/(wls_fit.df_model + 1) # add back the dof of the constant
# f statistic
fvalue = mse_model/wls_fit.mse_resid
# adjusted r-squared
rsquared_adj = 1 -(wls_fit.nobs)/(wls_fit.df_resid)*(1-rsquared)
#Trying to figure out what's going on in this example
#----------------------------------------------------
#JP: I need to look at this again. Even if I exclude the weight variable
# from the regressors and keep the constant in then the reported rsquared
# stays small. Below also compared using squared or sqrt of weight variable.
# TODO: need to add 45 degree line to graphs
wls_fit3 = sm.WLS(data.endog, data.exog[:,(0,1,3,4)], weights=1/incomesq).fit()
print(wls_fit3.summary())
print('corrected rsquared')
print((wls_fit3.uncentered_tss - wls_fit3.ssr)/wls_fit3.uncentered_tss)
plt.figure();
plt.title('WLS dropping heteroscedasticity variable from regressors');
plt.plot(data.endog, wls_fit3.fittedvalues, 'o');
plt.xlim([0,2000]);
#@savefig wls_drop_het.png
plt.ylim([0,2000]);
print('raw correlation of endog and fittedvalues')
print(np.corrcoef(data.endog, wls_fit.fittedvalues))
print('raw correlation coefficient of endog and fittedvalues squared')
print(np.corrcoef(data.endog, wls_fit.fittedvalues)[0,1]**2)
# compare with robust regression,
# heteroscedasticity correction downweights the outliers
rlm_fit = sm.RLM(data.endog, data.exog).fit()
plt.figure();
plt.title('using robust for comparison');
plt.plot(data.endog, rlm_fit.fittedvalues, 'o');
plt.xlim([0,2000]);
#@savefig wls_robust_compare.png
plt.ylim([0,2000]);
#What is going on? A more systematic look at the data
#----------------------------------------------------
# two helper functions
def getrsq(fitresult):
'''calculates rsquared residual, total and explained sums of squares
Parameters
----------
fitresult : instance of Regression Result class, or tuple of (resid, endog) arrays
regression residuals and endogenous variable
Returns
-------
rsquared
residual sum of squares
(centered) total sum of squares
explained sum of squares (for centered)
'''
if hasattr(fitresult, 'resid') and hasattr(fitresult, 'model'):
resid = fitresult.resid
endog = fitresult.model.endog
nobs = fitresult.nobs
else:
resid = fitresult[0]
endog = fitresult[1]
nobs = resid.shape[0]
rss = np.dot(resid, resid)
tss = np.var(endog)*nobs
return 1-rss/tss, rss, tss, tss-rss
def index_trim_outlier(resid, k):
'''returns indices to residual array with k outliers removed
Parameters
----------
resid : array_like, 1d
data vector, usually residuals of a regression
k : int
number of outliers to remove
Returns
-------
trimmed_index : array, 1d
index array with k outliers removed
outlier_index : array, 1d
index array of k outliers
Notes
-----
Outliers are defined as the k observations with the largest
absolute values.
'''
sort_index = np.argsort(np.abs(resid))
# index of non-outlier
trimmed_index = np.sort(sort_index[:-k])
outlier_index = np.sort(sort_index[-k:])
return trimmed_index, outlier_index
#Comparing estimation results for ols, rlm and wls with and without outliers
#---------------------------------------------------------------------------
#ols_test_fit = sm.OLS(data.endog, data.exog).fit()
olskeep, olsoutl = index_trim_outlier(ols_fit.resid, 2)
print('ols outliers', olsoutl, ols_fit.resid[olsoutl])
ols_fit_rm2 = sm.OLS(data.endog[olskeep], data.exog[olskeep,:]).fit()
rlm_fit_rm2 = sm.RLM(data.endog[olskeep], data.exog[olskeep,:]).fit()
#weights = 1/incomesq
results = [ols_fit, ols_fit_rm2, rlm_fit, rlm_fit_rm2]
#Note: I think incomesq is already square
for weights in [1/incomesq, 1/incomesq**2, np.sqrt(incomesq)]:
print('\nComparison OLS and WLS with and without outliers')
wls_fit0 = sm.WLS(data.endog, data.exog, weights=weights).fit()
wls_fit_rm2 = sm.WLS(data.endog[olskeep], data.exog[olskeep,:],
weights=weights[olskeep]).fit()
wlskeep, wlsoutl = index_trim_outlier(ols_fit.resid, 2)
print('2 outliers candidates and residuals')
print(wlsoutl, wls_fit.resid[olsoutl])
# redundant because ols and wls outliers are the same:
##wls_fit_rm2_ = sm.WLS(data.endog[wlskeep], data.exog[wlskeep,:],
## weights=1/incomesq[wlskeep]).fit()
print('outliers ols, wls:', olsoutl, wlsoutl)
print('rsquared')
print('ols vs ols rm2', ols_fit.rsquared, ols_fit_rm2.rsquared)
print('wls vs wls rm2', wls_fit0.rsquared, wls_fit_rm2.rsquared) #, wls_fit_rm2_.rsquared
print('compare R2_resid versus R2_wresid')
print('ols minus 2', getrsq(ols_fit_rm2)[0],)
print(getrsq((ols_fit_rm2.wresid, ols_fit_rm2.model.wendog))[0])
print('wls ', getrsq(wls_fit)[0],)
print(getrsq((wls_fit.wresid, wls_fit.model.wendog))[0])
print('wls minus 2', getrsq(wls_fit_rm2)[0])
# next is same as wls_fit_rm2.rsquared for cross checking
print(getrsq((wls_fit_rm2.wresid, wls_fit_rm2.model.wendog))[0])
#print(getrsq(wls_fit_rm2_)[0],
#print(getrsq((wls_fit_rm2_.wresid, wls_fit_rm2_.model.wendog))[0]
results.extend([wls_fit0, wls_fit_rm2])
print(' ols ols_rm2 rlm rlm_rm2 wls (lin) wls_rm2 (lin) wls (squ) wls_rm2 (squ) wls (sqrt) wls_rm2 (sqrt)')
print('Parameter estimates')
print(np.column_stack([r.params for r in results]))
print('R2 original data, next line R2 weighted data')
print(np.column_stack([getattr(r, 'rsquared', None) for r in results]))
print('Standard errors')
print(np.column_stack([getattr(r, 'bse', None) for r in results]))
print('Heteroscedasticity robust standard errors (with ols)')
print('with outliers')
print(np.column_stack([getattr(ols_fit, se, None) for se in ['HC0_se', 'HC1_se', 'HC2_se', 'HC3_se']]))
#..'''
#..
#.. ols ols_rm2 rlm rlm_rm2 wls (lin) wls_rm2 (lin) wls (squ) wls_rm2 (squ) wls (sqrt) wls_rm2 (sqrt)
#..Parameter estimates
#..[[ -3.08181404 -5.06103843 -4.98510966 -5.34410309 -2.69418516 -3.1305703 -1.43815462 -1.58893054 -3.57074829 -6.80053364]
#.. [ 234.34702702 115.08753715 129.85391456 109.01433492 158.42697752 128.38182357 60.95113284 100.25000841 254.82166855 103.75834726]
#.. [ -14.99684418 -5.77558429 -6.46204829 -4.77409191 -7.24928987 -7.41228893 6.84943071 -3.34972494 -16.40524256 -4.5924465 ]
#.. [ 27.94090839 85.46566835 89.91389709 95.85086459 60.44877369 79.7759146 55.9884469 60.97199734 -3.8085159 84.69170048]
#.. [-237.1465136 39.51639838 -15.50014814 31.39771833 -114.10886935 -40.04207242 -6.41976501 -38.83583228 -260.72084271 117.20540179]]
#..
#..R2 original data, next line R2 weighted data
#..[[ 0.24357792 0.31745994 0.19220308 0.30527648 0.22861236 0.3112333 0.06573949 0.29366904 0.24114325 0.31218669]]
#..[[ 0.24357791 0.31745994 None None 0.05936888 0.0679071 0.06661848 0.12769654 0.35326686 0.54681225]]
#..
#..-> R2 with weighted data is jumping all over
#..
#..standard errors
#..[[ 5.51471653 3.31028758 2.61580069 2.39537089 3.80730631 2.90027255 2.71141739 2.46959477 6.37593755 3.39477842]
#.. [ 80.36595035 49.35949263 38.12005692 35.71722666 76.39115431 58.35231328 87.18452039 80.30086861 86.99568216 47.58202096]
#.. [ 7.46933695 4.55366113 3.54293763 3.29509357 9.72433732 7.41259156 15.15205888 14.10674821 7.18302629 3.91640711]
#.. [ 82.92232357 50.54681754 39.33262384 36.57639175 58.55088753 44.82218676 43.11017757 39.31097542 96.4077482 52.57314209]
#.. [ 199.35166485 122.1287718 94.55866295 88.3741058 139.68749646 106.89445525 115.79258539 105.99258363 239.38105863 130.32619908]]
#..
#..robust standard errors (with ols)
#..with outliers
#.. HC0_se HC1_se HC2_se HC3_se'
#..[[ 3.30166123 3.42264107 3.4477148 3.60462409]
#.. [ 88.86635165 92.12260235 92.08368378 95.48159869]
#.. [ 6.94456348 7.19902694 7.19953754 7.47634779]
#.. [ 92.18777672 95.56573144 95.67211143 99.31427277]
#.. [ 212.9905298 220.79495237 221.08892661 229.57434782]]
#..
#..removing 2 outliers
#..[[ 2.57840843 2.67574088 2.68958007 2.80968452]
#.. [ 36.21720995 37.58437497 37.69555106 39.51362437]
#.. [ 3.1156149 3.23322638 3.27353882 3.49104794]
#.. [ 50.09789409 51.98904166 51.89530067 53.79478834]
#.. [ 94.27094886 97.82958699 98.25588281 102.60375381]]
#..
#..
#..'''
# a quick bootstrap analysis
# --------------------------
#
#(I didn't check whether this is fully correct statistically)
#**With OLS on full sample**
nobs, nvar = data.exog.shape
niter = 2000
bootres = np.zeros((niter, nvar*2))
for it in range(niter):
rind = np.random.randint(nobs, size=nobs)
endog = data.endog[rind]
exog = data.exog[rind,:]
res = sm.OLS(endog, exog).fit()
bootres[it, :nvar] = res.params
bootres[it, nvar:] = res.bse
np.set_printoptions(linewidth=200)
print('Bootstrap Results of parameters and parameter standard deviation OLS')
print('Parameter estimates')
print('median', np.median(bootres[:,:5], 0))
print('mean ', np.mean(bootres[:,:5], 0))
print('std ', np.std(bootres[:,:5], 0))
print('Standard deviation of parameter estimates')
print('median', np.median(bootres[:,5:], 0))
print('mean ', np.mean(bootres[:,5:], 0))
print('std ', np.std(bootres[:,5:], 0))
plt.figure()
for i in range(4):
plt.subplot(2,2,i+1)
plt.hist(bootres[:,i],50)
plt.title('var%d' % i)
#@savefig wls_bootstrap.png
plt.figtext(0.5, 0.935, 'OLS Bootstrap',
ha='center', color='black', weight='bold', size='large')
#**With WLS on sample with outliers removed**
data_endog = data.endog[olskeep]
data_exog = data.exog[olskeep,:]
incomesq_rm2 = incomesq[olskeep]
nobs, nvar = data_exog.shape
niter = 500 # a bit slow
bootreswls = np.zeros((niter, nvar*2))
for it in range(niter):
rind = np.random.randint(nobs, size=nobs)
endog = data_endog[rind]
exog = data_exog[rind,:]
res = sm.WLS(endog, exog, weights=1/incomesq[rind,:]).fit()
bootreswls[it, :nvar] = res.params
bootreswls[it, nvar:] = res.bse
print('Bootstrap Results of parameters and parameter standard deviation',)
print('WLS removed 2 outliers from sample')
print('Parameter estimates')
print('median', np.median(bootreswls[:,:5], 0))
print('mean ', np.mean(bootreswls[:,:5], 0))
print('std ', np.std(bootreswls[:,:5], 0))
print('Standard deviation of parameter estimates')
print('median', np.median(bootreswls[:,5:], 0))
print('mean ', np.mean(bootreswls[:,5:], 0))
print('std ', np.std(bootreswls[:,5:], 0))
plt.figure()
for i in range(4):
plt.subplot(2,2,i+1)
plt.hist(bootreswls[:,i],50)
plt.title('var%d' % i)
#@savefig wls_bootstrap_rm2.png
plt.figtext(0.5, 0.935, 'WLS rm2 Bootstrap',
ha='center', color='black', weight='bold', size='large')
#..plt.show()
#..plt.close('all')
#::
#
# The following a random variables not fixed by a seed
#
# Bootstrap Results of parameters and parameter standard deviation
# OLS
#
# Parameter estimates
# median [ -3.26216383 228.52546429 -14.57239967 34.27155426 -227.02816597]
# mean [ -2.89855173 234.37139359 -14.98726881 27.96375666 -243.18361746]
# std [ 3.78704907 97.35797802 9.16316538 94.65031973 221.79444244]
#
# Standard deviation of parameter estimates
# median [ 5.44701033 81.96921398 7.58642431 80.64906783 200.19167735]
# mean [ 5.44840542 86.02554883 8.56750041 80.41864084 201.81196849]
# std [ 1.43425083 29.74806562 4.22063268 19.14973277 55.34848348]
#
# Bootstrap Results of parameters and parameter standard deviation
# WLS removed 2 outliers from sample
#
# Parameter estimates
# median [ -3.95876112 137.10419042 -9.29131131 88.40265447 -44.21091869]
# mean [ -3.67485724 135.42681207 -8.7499235 89.74703443 -46.38622848]
# std [ 2.96908679 56.36648967 7.03870751 48.51201918 106.92466097]
#
# Standard deviation of parameter estimates
# median [ 2.89349748 59.19454402 6.70583332 45.40987953 119.05241283]
# mean [ 2.97600894 60.14540249 6.92102065 45.66077486 121.35519673]
# std [ 0.55378808 11.77831934 1.69289179 7.4911526 23.72821085]
#
#
#
#Conclusion: problem with outliers and possibly heteroscedasticity
#-----------------------------------------------------------------
#
#in bootstrap results
#
#* bse in OLS underestimates the standard deviation of the parameters
# compared to standard deviation in bootstrap
#* OLS heteroscedasticity corrected standard errors for the original
# data (above) are close to bootstrap std
#* using WLS with 2 outliers removed has a relatively good match between
# the mean or median bse and the std of the parameter estimates in the
# bootstrap
#
#We could also include rsquared in bootstrap, and do it also for RLM.
#The problems could also mean that the linearity assumption is violated,
#e.g. try non-linear transformation of exog variables, but linear
#in parameters.
#
#
#for statsmodels
#
# * In this case rsquared for original data looks less random/arbitrary.
# * Don't change definition of rsquared from centered tss to uncentered
# tss when calculating rsquared in WLS if the original exog contains
# a constant. The increase in rsquared because of a change in definition
# will be very misleading.
# * Whether there is a constant in the transformed exog, wexog, or not,
# might affect also the degrees of freedom calculation, but I haven't
# checked this. I would guess that the df_model should stay the same,
# but needs to be verified with a textbook.
# * df_model has to be adjusted if the original data does not have a
# constant, e.g. when regressing an endog on a single exog variable
# without constant. This case might require also a redefinition of
# the rsquare and f statistic for the regression anova to use the
# uncentered tss.
# This can be done through keyword parameter to model.__init__ or
# through autodedection with hasconst = (exog.var(0)<1e-10).any()
# I'm not sure about fixed effects with a full dummy set but
# without a constant. In this case autodedection wouldn't work this
# way. Also, I'm not sure whether a ddof keyword parameter can also
# handle the hasconst case.
| bsd-3-clause |
DonBeo/scikit-learn | examples/linear_model/plot_lasso_and_elasticnet.py | 248 | 1982 | """
========================================
Lasso and Elastic Net for Sparse Signals
========================================
Estimates Lasso and Elastic-Net regression models on a manually generated
sparse signal corrupted with an additive noise. Estimated coefficients are
compared with the ground-truth.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
###############################################################################
# generate some sparse data to play with
np.random.seed(42)
n_samples, n_features = 50, 200
X = np.random.randn(n_samples, n_features)
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[10:]] = 0 # sparsify coef
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
###############################################################################
# Lasso
from sklearn.linear_model import Lasso
alpha = 0.1
lasso = Lasso(alpha=alpha)
y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)
###############################################################################
# ElasticNet
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=alpha, l1_ratio=0.7)
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)
plt.plot(enet.coef_, label='Elastic net coefficients')
plt.plot(lasso.coef_, label='Lasso coefficients')
plt.plot(coef, '--', label='original coefficients')
plt.legend(loc='best')
plt.title("Lasso R^2: %f, Elastic Net R^2: %f"
% (r2_score_lasso, r2_score_enet))
plt.show()
| bsd-3-clause |
mckinziebrandon/DeepChatModels | tests/test_data.py | 1 | 4834 | import logging
import pdb
import sys
sys.path.append("..")
import os
import unittest
import tensorflow as tf
from pydoc import locate
import chatbot
from utils import io_utils
import data
from chatbot.globals import DEFAULT_FULL_CONFIG
dir = os.path.dirname(os.path.realpath(__file__))
from tests.utils import *
class TestData(unittest.TestCase):
"""Tests for the datsets."""
def setUp(self):
logging.basicConfig(level=logging.INFO)
tf.logging.set_verbosity('ERROR')
self.supported_datasets = ['Reddit', 'Ubuntu', 'Cornell']
self.default_flags = {
'pretrained_dir': TEST_FLAGS.pretrained_dir,
'config': TEST_FLAGS.config,
'model': TEST_FLAGS.model,
'debug': TEST_FLAGS.debug}
def test_basic(self):
"""Instantiate all supported datasets and check they satisfy basic conditions.
THIS MAY TAKE A LONG TIME TO COMPLETE. Since we are testing that the
supported datasets can be instantiated successfully, it necessarily
means that the data must exist in proper format. Since the program
will generate the proper format(s) if not found, this will take
about 15 minutes if run from a completely fresh setup.
Otherwise, a few seconds. :)
"""
if os.getenv('DATA') is None \
and not os.path.exists('/home/brandon/Datasets'):
print('To run this test, please enter the path to your datasets: ')
data_dir = input()
else:
data_dir = '/home/brandon/Datasets'
for dataset_name in self.supported_datasets:
logging.info('Testing %s', dataset_name)
incomplete_params = {
'vocab_size': 40000,
'max_seq_len': 10}
self.assertIsNotNone(incomplete_params)
dataset_class = getattr(data, dataset_name)
# User must specify data_dir, which we have not done yet.
self.assertRaises(ValueError, dataset_class, incomplete_params)
config = io_utils.parse_config(flags=TEST_FLAGS)
dataset_params = config.get('dataset_params')
dataset_params['data_dir'] = os.path.join(
data_dir,
dataset_name.lower())
dataset = dataset_class(dataset_params)
# Ensure all params from DEFAULT_FULL_CONFIG['dataset_params']
# are set to a value in our dataset object.
for default_key in DEFAULT_FULL_CONFIG['dataset_params']:
self.assertIsNotNone(getattr(dataset, default_key))
# Check that all dataset properties exist.
self.assertIsNotNone(dataset.name)
self.assertIsNotNone(dataset.word_to_idx)
self.assertIsNotNone(dataset.idx_to_word)
self.assertIsNotNone(dataset.vocab_size)
self.assertIsNotNone(dataset.max_seq_len)
# Check that the properties satisfy basic expectations.
self.assertEqual(len(dataset.word_to_idx), len(dataset.idx_to_word))
self.assertEqual(len(dataset.word_to_idx), dataset.vocab_size)
self.assertEqual(len(dataset.idx_to_word), dataset.vocab_size)
incomplete_params.clear()
dataset_params.clear()
def test_cornell(self):
"""Train a bot on cornell and display responses when given
training data as input -- a sanity check that the data is clean.
"""
flags = Flags(
model_params=dict(
ckpt_dir='out/tests/test_cornell',
reset_model=True,
steps_per_ckpt=50,
base_cell='GRUCell',
num_layers=1,
state_size=128,
embed_size=64,
max_steps=50),
dataset_params=dict(
vocab_size=50000,
max_seq_len=8,
data_dir='/home/brandon/Datasets/cornell'),
dataset='Cornell',
**self.default_flags)
bot, dataset = create_bot(flags=flags, return_dataset=True)
bot.train()
del bot
# Recreate bot (its session is automatically closed after training).
flags.model_params['reset_model'] = False
flags.model_params['decode'] = True
bot, dataset = create_bot(flags, return_dataset=True)
for inp_sent, resp_sent in dataset.pairs_generator(100):
print('\nHuman:', inp_sent)
response = bot.respond(inp_sent)
if response == resp_sent:
print('Robot: %s\nCorrect!' % response)
else:
print('Robot: %s\nExpected: %s' % (
response, resp_sent))
if __name__ == '__main__':
tf.logging.set_verbosity('ERROR')
unittest.main()
| mit |
negrinho/deep_architect | dev/examples/full_benchmarks/arch_search.py | 1 | 9376 | import argparse
import deep_architect.utils as ut
from deep_architect.contrib.misc.datasets.loaders import (load_cifar10,
load_mnist)
from deep_architect.contrib.misc.datasets.dataset import InMemoryDataset
from deep_architect.searchers import common as se
from deep_architect.contrib.misc import gpu_utils
from deep_architect import search_logging as sl
from search_space_factory import name_to_search_space_factory_fn
from searcher import name_to_searcher_fn
from deep_architect.contrib.misc.evaluators.tensorflow.classification import SimpleClassifierEvaluator
from deep_architect.contrib.communicators.communicator import get_communicator
def start_searcher(comm,
searcher,
resume_if_exists,
folderpath,
search_name,
searcher_load_path,
num_samples=-1,
num_epochs=-1,
save_every=1):
assert num_samples != -1 or num_epochs != -1
print('SEARCHER')
sl.create_search_folderpath(folderpath, search_name)
search_data_folder = sl.get_search_data_folderpath(folderpath, search_name)
save_filepath = ut.join_paths((search_data_folder, searcher_load_path))
models_sampled = 0
epochs = 0
finished = 0
killed = 0
best_accuracy = 0.
# Load previous searcher
if resume_if_exists:
searcher.load(search_data_folder)
state = ut.read_jsonfile(save_filepath)
epochs = state['epochs']
killed = state['killed']
models_sampled = state['models_finished']
finished = state['models_finished']
while (finished < models_sampled or killed < comm.num_workers):
# Search end conditions
cont = num_samples == -1 or models_sampled < num_samples
cont = cont and (num_epochs == -1 or epochs < num_epochs)
if cont:
# See whether workers are ready to consume architectures
if comm.is_ready_to_publish_architecture():
eval_logger = sl.EvaluationLogger(folderpath, search_name,
models_sampled)
_, _, vs, searcher_eval_token = searcher.sample()
eval_logger.log_config(vs, searcher_eval_token)
comm.publish_architecture_to_worker(vs, models_sampled,
searcher_eval_token)
models_sampled += 1
else:
if comm.is_ready_to_publish_architecture():
comm.kill_worker()
killed += 1
# See which workers have finished evaluation
for worker in range(comm.num_workers):
msg = comm.receive_results_in_master(worker)
if msg is not None:
results, model_id, searcher_eval_token = msg
eval_logger = sl.EvaluationLogger(folderpath, search_name,
model_id)
eval_logger.log_results(results)
if 'epoch' in results:
epochs = max(epochs, results['epoch'])
searcher.update(results['validation_accuracy'],
searcher_eval_token)
best_accuracy = max(best_accuracy,
results['validation_accuracy'])
finished += 1
if finished % save_every == 0:
print('Models sampled: %d Best Accuracy: %f' %
(finished, best_accuracy))
best_accuracy = 0.
searcher.save_state(search_data_folder)
state = {
'models_finished': finished,
'epochs': epochs,
'killed': killed
}
ut.write_jsonfile(state, save_filepath)
def start_worker(comm,
evaluator,
search_space_factory,
folderpath,
search_name,
resume=True,
save_every=1):
# set the available gpu for process
print('WORKER %d' % comm.get_rank())
step = 0
sl.create_search_folderpath(folderpath, search_name)
search_data_folder = sl.get_search_data_folderpath(folderpath, search_name)
save_filepath = ut.join_paths(
(search_data_folder, 'worker' + str(comm.get_rank()) + '.json'))
if resume:
evaluator.load_state(search_data_folder)
state = ut.read_jsonfile(save_filepath)
step = state['step']
while (True):
arch = comm.receive_architecture_in_worker()
# if a kill signal is received
if arch is None:
break
vs, evaluation_id, searcher_eval_token = arch
inputs, outputs = search_space_factory.get_search_space()
se.specify(outputs, vs)
results = evaluator.eval(inputs, outputs)
step += 1
if step % save_every == 0:
evaluator.save_state(search_data_folder)
state = {'step': step}
ut.write_jsonfile(state, save_filepath)
comm.publish_results_to_master(results, evaluation_id,
searcher_eval_token)
def main():
configs = ut.read_jsonfile(
"./examples/tensorflow/full_benchmarks/experiment_config.json")
parser = argparse.ArgumentParser("MPI Job for architecture search")
parser.add_argument('--config',
'-c',
action='store',
dest='config_name',
default='normal')
# Other arguments
parser.add_argument('--display-output',
'-o',
action='store_true',
dest='display_output',
default=False)
parser.add_argument('--resume',
'-r',
action='store_true',
dest='resume',
default=False)
options = parser.parse_args()
config = configs[options.config_name]
num_procs = config['num_procs'] if 'num_procs' in config else 0
comm = get_communicator(config['communicator'], num_procs)
if len(gpu_utils.get_gpu_information()) != 0:
#https://github.com/tensorflow/tensorflow/issues/1888
gpu_utils.set_visible_gpus(
[comm.get_rank() % gpu_utils.get_total_num_gpus()])
if 'eager' in config and config['eager']:
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
tf.enable_eager_execution()
datasets = {
'cifar10': lambda: (load_cifar10('data/cifar10/', one_hot=False), 10),
'mnist': lambda: (load_mnist('data/mnist/'), 10),
}
(Xtrain, ytrain, Xval, yval, Xtest,
ytest), num_classes = datasets[config['dataset']]()
search_space_factory = name_to_search_space_factory_fn[
config['search_space']](num_classes)
save_every = 1 if 'save_every' not in config else config['save_every']
if comm.get_rank() == 0:
searcher = name_to_searcher_fn[config['searcher']](
search_space_factory.get_search_space)
num_samples = -1 if 'samples' not in config else config['samples']
num_epochs = -1 if 'epochs' not in config else config['epochs']
start_searcher(comm,
searcher,
options.resume,
config['search_folder'],
config['search_name'],
config['searcher_file_name'],
num_samples=num_samples,
num_epochs=num_epochs,
save_every=save_every)
else:
train_d_advataset = InMemoryDataset(Xtrain, ytrain, True)
val_dataset = InMemoryDataset(Xval, yval, False)
test_dataset = InMemoryDataset(Xtest, ytest, False)
search_path = sl.get_search_folderpath(config['search_folder'],
config['search_name'])
ut.create_folder(ut.join_paths([search_path, 'scratch_data']),
create_parent_folders=True)
scratch_folder = ut.join_paths(
[search_path, 'scratch_data', 'eval_' + str(comm.get_rank())])
ut.create_folder(scratch_folder)
evaluators = {
'simple_classification':
lambda: SimpleClassifierEvaluator(
train_dataset,
val_dataset,
num_classes,
'./temp' + str(comm.get_rank()),
max_num_training_epochs=config['eval_epochs'],
log_output_to_terminal=options.display_output,
test_dataset=test_dataset),
}
assert not config['evaluator'].startswith('enas') or hasattr(
search_space_factory, 'weight_sharer')
evaluator = evaluators[config['evaluator']]()
start_worker(comm,
evaluator,
search_space_factory,
config['search_folder'],
config['search_name'],
resume=options.resume,
save_every=save_every)
if __name__ == "__main__":
main()
| mit |
arabenjamin/scikit-learn | sklearn/utils/fixes.py | 132 | 12882 | """Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Lars Buitinck
#
# License: BSD 3 clause
import inspect
import warnings
import sys
import functools
import os
import errno
import numpy as np
import scipy.sparse as sp
import scipy
def _parse_version(version_string):
version = []
for x in version_string.split('.'):
try:
version.append(int(x))
except ValueError:
# x may be of the form dev-1ea1592
version.append(x)
return tuple(version)
np_version = _parse_version(np.__version__)
sp_version = _parse_version(scipy.__version__)
try:
from scipy.special import expit # SciPy >= 0.10
with np.errstate(invalid='ignore', over='ignore'):
if np.isnan(expit(1000)): # SciPy < 0.14
raise ImportError("no stable expit in scipy.special")
except ImportError:
def expit(x, out=None):
"""Logistic sigmoid function, ``1 / (1 + exp(-x))``.
See sklearn.utils.extmath.log_logistic for the log of this function.
"""
if out is None:
out = np.empty(np.atleast_1d(x).shape, dtype=np.float64)
out[:] = x
# 1 / (1 + exp(-x)) = (1 + tanh(x / 2)) / 2
# This way of computing the logistic is both fast and stable.
out *= .5
np.tanh(out, out)
out += 1
out *= .5
return out.reshape(np.shape(x))
# little danse to see if np.copy has an 'order' keyword argument
if 'order' in inspect.getargspec(np.copy)[0]:
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
try:
if (not np.allclose(np.divide(.4, 1, casting="unsafe"),
np.divide(.4, 1, casting="unsafe", dtype=np.float))
or not np.allclose(np.divide(.4, 1), .4)):
raise TypeError('Divide not working with dtype: '
'https://github.com/numpy/numpy/issues/3484')
divide = np.divide
except TypeError:
# Compat for old versions of np.divide that do not provide support for
# the dtype args
def divide(x1, x2, out=None, dtype=None):
out_orig = out
if out is None:
out = np.asarray(x1, dtype=dtype)
if out is x1:
out = x1.copy()
else:
if out is not x1:
out[:] = x1
if dtype is not None and out.dtype != dtype:
out = out.astype(dtype)
out /= x2
if out_orig is None and np.isscalar(x1):
out = np.asscalar(out)
return out
try:
np.array(5).astype(float, copy=False)
except TypeError:
# Compat where astype accepted no copy argument
def astype(array, dtype, copy=True):
if not copy and array.dtype == dtype:
return array
return array.astype(dtype)
else:
astype = np.ndarray.astype
try:
with warnings.catch_warnings(record=True):
# Don't raise the numpy deprecation warnings that appear in
# 1.9, but avoid Python bug due to simplefilter('ignore')
warnings.simplefilter('always')
sp.csr_matrix([1.0, 2.0, 3.0]).max(axis=0)
except (TypeError, AttributeError):
# in scipy < 14.0, sparse matrix min/max doesn't accept an `axis` argument
# the following code is taken from the scipy 0.14 codebase
def _minor_reduce(X, ufunc):
major_index = np.flatnonzero(np.diff(X.indptr))
if X.data.size == 0 and major_index.size == 0:
# Numpy < 1.8.0 don't handle empty arrays in reduceat
value = np.zeros_like(X.data)
else:
value = ufunc.reduceat(X.data, X.indptr[major_index])
return major_index, value
def _min_or_max_axis(X, axis, min_or_max):
N = X.shape[axis]
if N == 0:
raise ValueError("zero-size array to reduction operation")
M = X.shape[1 - axis]
mat = X.tocsc() if axis == 0 else X.tocsr()
mat.sum_duplicates()
major_index, value = _minor_reduce(mat, min_or_max)
not_full = np.diff(mat.indptr)[major_index] < N
value[not_full] = min_or_max(value[not_full], 0)
mask = value != 0
major_index = np.compress(mask, major_index)
value = np.compress(mask, value)
from scipy.sparse import coo_matrix
if axis == 0:
res = coo_matrix((value, (np.zeros(len(value)), major_index)),
dtype=X.dtype, shape=(1, M))
else:
res = coo_matrix((value, (major_index, np.zeros(len(value)))),
dtype=X.dtype, shape=(M, 1))
return res.A.ravel()
def _sparse_min_or_max(X, axis, min_or_max):
if axis is None:
if 0 in X.shape:
raise ValueError("zero-size array to reduction operation")
zero = X.dtype.type(0)
if X.nnz == 0:
return zero
m = min_or_max.reduce(X.data.ravel())
if X.nnz != np.product(X.shape):
m = min_or_max(zero, m)
return m
if axis < 0:
axis += 2
if (axis == 0) or (axis == 1):
return _min_or_max_axis(X, axis, min_or_max)
else:
raise ValueError("invalid axis, use 0 for rows, or 1 for columns")
def sparse_min_max(X, axis):
return (_sparse_min_or_max(X, axis, np.minimum),
_sparse_min_or_max(X, axis, np.maximum))
else:
def sparse_min_max(X, axis):
return (X.min(axis=axis).toarray().ravel(),
X.max(axis=axis).toarray().ravel())
try:
from numpy import argpartition
except ImportError:
# numpy.argpartition was introduced in v 1.8.0
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
return np.argsort(a, axis=axis, order=order)
try:
from itertools import combinations_with_replacement
except ImportError:
# Backport of itertools.combinations_with_replacement for Python 2.6,
# from Python 3.4 documentation (http://tinyurl.com/comb-w-r), copyright
# Python Software Foundation (https://docs.python.org/3/license.html)
def combinations_with_replacement(iterable, r):
# combinations_with_replacement('ABC', 2) --> AA AB AC BB BC CC
pool = tuple(iterable)
n = len(pool)
if not n and r:
return
indices = [0] * r
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != n - 1:
break
else:
return
indices[i:] = [indices[i] + 1] * (r - i)
yield tuple(pool[i] for i in indices)
try:
from numpy import isclose
except ImportError:
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within
a tolerance.
This function was added to numpy v1.7.0, and the version you are
running has been backported from numpy v1.8.1. See its documentation
for more details.
"""
def within_tol(x, y, atol, rtol):
with np.errstate(invalid='ignore'):
result = np.less_equal(abs(x - y), atol + rtol * abs(y))
if np.isscalar(a) and np.isscalar(b):
result = bool(result)
return result
x = np.array(a, copy=False, subok=True, ndmin=1)
y = np.array(b, copy=False, subok=True, ndmin=1)
xfin = np.isfinite(x)
yfin = np.isfinite(y)
if all(xfin) and all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = np.zeros_like(finite, subok=True)
# Since we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * np.ones_like(cond)
y = y * np.ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
cond[np.isnan(x) & np.isnan(y)] = True
return cond
if np_version < (1, 7):
# Prior to 1.7.0, np.frombuffer wouldn't work for empty first arg.
def frombuffer_empty(buf, dtype):
if len(buf) == 0:
return np.empty(0, dtype=dtype)
else:
return np.frombuffer(buf, dtype=dtype)
else:
frombuffer_empty = np.frombuffer
if np_version < (1, 8):
def in1d(ar1, ar2, assume_unique=False, invert=False):
# Backport of numpy function in1d 1.8.1 to support numpy 1.6.2
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
if invert:
mask = np.ones(len(ar1), dtype=np.bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=np.bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = np.unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
else:
from numpy import in1d
if sp_version < (0, 15):
# Backport fix for scikit-learn/scikit-learn#2986 / scipy/scipy#4142
from ._scipy_sparse_lsqr_backport import lsqr as sparse_lsqr
else:
from scipy.sparse.linalg import lsqr as sparse_lsqr
if sys.version_info < (2, 7, 0):
# partial cannot be pickled in Python 2.6
# http://bugs.python.org/issue1398
class partial(object):
def __init__(self, func, *args, **keywords):
functools.update_wrapper(self, func)
self.func = func
self.args = args
self.keywords = keywords
def __call__(self, *args, **keywords):
args = self.args + args
kwargs = self.keywords.copy()
kwargs.update(keywords)
return self.func(*args, **kwargs)
else:
from functools import partial
if np_version < (1, 6, 2):
# Allow bincount to accept empty arrays
# https://github.com/numpy/numpy/commit/40f0844846a9d7665616b142407a3d74cb65a040
def bincount(x, weights=None, minlength=None):
if len(x) > 0:
return np.bincount(x, weights, minlength)
else:
if minlength is None:
minlength = 0
minlength = np.asscalar(np.asarray(minlength, dtype=np.intp))
return np.zeros(minlength, dtype=np.intp)
else:
from numpy import bincount
if 'exist_ok' in inspect.getargspec(os.makedirs).args:
makedirs = os.makedirs
else:
def makedirs(name, mode=0o777, exist_ok=False):
"""makedirs(name [, mode=0o777][, exist_ok=False])
Super-mkdir; create a leaf directory and all intermediate ones. Works
like mkdir, except that any intermediate path segment (not just the
rightmost) will be created if it does not exist. If the target
directory already exists, raise an OSError if exist_ok is False.
Otherwise no exception is raised. This is recursive.
"""
try:
os.makedirs(name, mode=mode)
except OSError as e:
if (not exist_ok or e.errno != errno.EEXIST
or not os.path.isdir(name)):
raise
| bsd-3-clause |
hasgeek/funnel | funnel/views/notification_feed.py | 1 | 4249 | """Views for notification feed (updates page)."""
from __future__ import annotations
from flask import abort
from baseframe import forms
from coaster.auth import current_auth
from coaster.views import ClassView, render_with, requestargs, route
from .. import app
from ..models import UserNotification, db
from ..typing import ReturnRenderWith
from .login_session import requires_login
@route('/updates')
class AllNotificationsView(ClassView):
current_section = 'notifications' # needed for showing active tab
@route('', endpoint='notifications', defaults={'unread_only': False})
@route('unread', endpoint='notifications_unread', defaults={'unread_only': True})
@requires_login
@render_with('notification_feed.html.jinja2', json=True)
@requestargs(('page', int), ('per_page', int))
def view(self, unread_only: bool, page=1, per_page=10) -> ReturnRenderWith:
pagination = UserNotification.web_notifications_for(
current_auth.user, unread_only
).paginate(page=page, per_page=per_page, max_per_page=100)
results = {
'unread_only': unread_only,
'show_transport_alert': not current_auth.user.has_transport_sms(),
'notifications': [
{
'notification': un.current_access(datasets=('primary', 'related')),
'html': un.views.render(),
'document_type': un.notification.document_type,
'document': un.document.current_access(
datasets=('primary', 'related')
)
if un.document
else None,
'fragment_type': un.notification.fragment_type,
'fragment': un.fragment.current_access(
datasets=('primary', 'related')
)
if un.fragment
else None,
}
for un in pagination.items
if un.is_not_deleted(revoke=True)
],
'has_next': pagination.has_next,
'has_prev': pagination.has_prev,
'page': pagination.page,
'per_page': pagination.per_page,
'pages': pagination.pages,
'next_num': pagination.next_num,
'prev_num': pagination.prev_num,
'count': pagination.total,
}
db.session.commit()
return results
def unread_count(self) -> int:
return UserNotification.unread_count_for(current_auth.user)
@route('count', endpoint='notifications_count')
def unread(self) -> ReturnRenderWith:
# This view must NOT have a `@requires_login` decorator as that will insert
# it as the next page after login
if current_auth.user:
return {
'status': 'ok',
'unread': self.unread_count(),
}
return {'status': 'error', 'error': 'requires_login'}, 400
@route(
'mark_read/<eventid_b58>', endpoint='notification_mark_read', methods=['POST']
)
@requires_login
def mark_read(self, eventid_b58: str) -> ReturnRenderWith:
form = forms.Form()
del form.form_nonce
if form.validate_on_submit():
un = UserNotification.get_for(current_auth.user, eventid_b58)
if un is None:
abort(404)
un.is_read = True
db.session.commit()
return {'status': 'ok', 'unread': self.unread_count()}
return {'status': 'error', 'error': 'csrf'}, 400
@route(
'mark_unread/<eventid_b58>',
endpoint='notification_mark_unread',
methods=['POST'],
)
@requires_login
def mark_unread(self, eventid_b58: str) -> ReturnRenderWith:
form = forms.Form()
del form.form_nonce
if form.validate_on_submit():
un = UserNotification.get_for(current_auth.user, eventid_b58)
if un is None:
abort(404)
un.is_read = False
db.session.commit()
return {'status': 'ok', 'unread': self.unread_count()}
return {'status': 'error', 'error': 'csrf'}, 400
AllNotificationsView.init_app(app)
| agpl-3.0 |
tapomayukh/projects_in_python | classification/Classification_with_kNN/Single_Contact_Classification/Time_Window/test10_cross_validate_objects_400ms.py | 1 | 4258 |
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/Window')
from data_400ms import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
if __name__ == '__main__':
Fmat = Fmat_original
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:18]
m_W, n_W = np.shape(W)
print 'Reduced Dimension Eigenvector Shape:',m_W, n_W
# Normalizes the data set with respect to its variance (Not an Integral part of PCA, but useful)
length = len(eigval_total)
s = np.matrix(np.zeros(length)).T
i = 0
while i < length:
s[i] = sqrt(C[i,i])
i = i+1
Z = np.divide(B,s)
m_Z, n_Z = np.shape(Z)
print 'Z-Score Shape:', m_Z, n_Z
#Projected Data:
Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B'
m_Y, n_Y = np.shape(Y.T)
print 'Transposed Projected Data Shape:', m_Y, n_Y
#Using PYMVPA
PCA_data = np.array(Y.T)
PCA_label_2 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Plush-Toy-Movable']*5 + ['Sponge-Movable']*5
clf = kNN(k=1)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_2)
print ds1.samples.shape
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
print error
print cvterr.confusion.asstring(description=False)
figure(1)
cvterr.confusion.plot(numbers='True',numbers_alpha=2)
#show()
# Variances
figure(2)
title('Variances of PCs')
stem(range(len(perc_total)),perc_total,'--b')
axis([-0.3,130.3,0,1.2])
grid('True')
show()
| mit |
negrinho/deep_architect | dev/tutorials/weight_sharing/weight_sharing/main.py | 1 | 2524 | ###${MARKDOWN}
# Recently, with the success of papers such as
# [Efficient Neural Architecture Search via Parameter Sharing](https://arxiv.org/abs/1802.03268),
# weight sharing has emerged as a popular way to speed up evaluation of sampled
# architectures in architecture search. Weight sharing simply involves sharing
# the weights of common layers between different sampled architectures.
# While DeepArchitect does not currently support weight sharing natively,
# there is a simple way to implement weight sharing within the context of the
# framework.
#
# This tutorial demonstrates how to implement basic weight sharing with a dynamic deep
# learning framework such as Tensorflow eager or pytorch. This example specifically
# will use Tensorflow eager.
import tensorflow as tf
from deep_architect.helpers.tensorflow_eager_support import siso_tensorflow_eager_module
# The WeightSharer object is a simple wrapper around a dictionary that stores a
# mapping from a layer name to the shared layer used by the DeepArchitect modules.
class WeightSharer:
def __init__(self):
self.name_to_weight = {}
def get(self, name, weight_fn):
if name not in self.name_to_weight:
self.name_to_weight[name] = weight_fn()
return self.name_to_weight[name]
weight_sharer = WeightSharer()
# Now to share weights or any other object, simply use a common name for the object
# you wish to share, and pass in a function to initialize the object. The first
# time the function is called, the convolution layer will be created. Subsequent
# calls will simply retrieve the layer from the WeightSharer object.
def conv2D(filter_size, channels, name):
def compile_fn(di, dh):
conv_fn = lambda: tf.keras.layers.Conv2D(channels, filter_size)
conv = weight_sharer.get(name, conv_fn)
def fn(di, is_training=True):
return {'out': conv(di['in'])}
return fn
return siso_tensorflow_eager_module('Conv2D', compile_fn, {})
conv_original = conv2D(3, 32, 'conv_layer')
# Now, calling the function again with the same 'name' argument will return
# a DeepArchitect module with the same internal convolutional layer
conv_shared = conv2D(3, 32, 'conv_layer')
# Implementing such a weight sharing scheme with another dynamic framework such
# as Pytorch is just as straightforward as above. To implement a weight sharing
# scheme with a graph based framework like Tensorflow, you must run the tensors
# that you wish to store and store the actual weights. | mit |
mike0sv/spark | python/pyspark/ml/feature.py | 7 | 125758 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
if sys.version > '3':
basestring = str
from pyspark import since, keyword_only
from pyspark.rdd import ignore_unicode_prefix
from pyspark.ml.linalg import _convert_to_vector
from pyspark.ml.param.shared import *
from pyspark.ml.util import JavaMLReadable, JavaMLWritable
from pyspark.ml.wrapper import JavaEstimator, JavaModel, JavaTransformer, _jvm
from pyspark.ml.common import inherit_doc
__all__ = ['Binarizer',
'BucketedRandomProjectionLSH', 'BucketedRandomProjectionLSHModel',
'Bucketizer',
'ChiSqSelector', 'ChiSqSelectorModel',
'CountVectorizer', 'CountVectorizerModel',
'DCT',
'ElementwiseProduct',
'HashingTF',
'IDF', 'IDFModel',
'Imputer', 'ImputerModel',
'IndexToString',
'MaxAbsScaler', 'MaxAbsScalerModel',
'MinHashLSH', 'MinHashLSHModel',
'MinMaxScaler', 'MinMaxScalerModel',
'NGram',
'Normalizer',
'OneHotEncoder',
'PCA', 'PCAModel',
'PolynomialExpansion',
'QuantileDiscretizer',
'RegexTokenizer',
'RFormula', 'RFormulaModel',
'SQLTransformer',
'StandardScaler', 'StandardScalerModel',
'StopWordsRemover',
'StringIndexer', 'StringIndexerModel',
'Tokenizer',
'VectorAssembler',
'VectorIndexer', 'VectorIndexerModel',
'VectorSlicer',
'Word2Vec', 'Word2VecModel']
@inherit_doc
class Binarizer(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
Binarize a column of continuous features given a threshold.
>>> df = spark.createDataFrame([(0.5,)], ["values"])
>>> binarizer = Binarizer(threshold=1.0, inputCol="values", outputCol="features")
>>> binarizer.transform(df).head().features
0.0
>>> binarizer.setParams(outputCol="freqs").transform(df).head().freqs
0.0
>>> params = {binarizer.threshold: -0.5, binarizer.outputCol: "vector"}
>>> binarizer.transform(df, params).head().vector
1.0
>>> binarizerPath = temp_path + "/binarizer"
>>> binarizer.save(binarizerPath)
>>> loadedBinarizer = Binarizer.load(binarizerPath)
>>> loadedBinarizer.getThreshold() == binarizer.getThreshold()
True
.. versionadded:: 1.4.0
"""
threshold = Param(Params._dummy(), "threshold",
"threshold in binary classification prediction, in range [0, 1]",
typeConverter=TypeConverters.toFloat)
@keyword_only
def __init__(self, threshold=0.0, inputCol=None, outputCol=None):
"""
__init__(self, threshold=0.0, inputCol=None, outputCol=None)
"""
super(Binarizer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.Binarizer", self.uid)
self._setDefault(threshold=0.0)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, threshold=0.0, inputCol=None, outputCol=None):
"""
setParams(self, threshold=0.0, inputCol=None, outputCol=None)
Sets params for this Binarizer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.4.0")
def setThreshold(self, value):
"""
Sets the value of :py:attr:`threshold`.
"""
return self._set(threshold=value)
@since("1.4.0")
def getThreshold(self):
"""
Gets the value of threshold or its default value.
"""
return self.getOrDefault(self.threshold)
class LSHParams(Params):
"""
Mixin for Locality Sensitive Hashing (LSH) algorithm parameters.
"""
numHashTables = Param(Params._dummy(), "numHashTables", "number of hash tables, where " +
"increasing number of hash tables lowers the false negative rate, " +
"and decreasing it improves the running performance.",
typeConverter=TypeConverters.toInt)
def __init__(self):
super(LSHParams, self).__init__()
def setNumHashTables(self, value):
"""
Sets the value of :py:attr:`numHashTables`.
"""
return self._set(numHashTables=value)
def getNumHashTables(self):
"""
Gets the value of numHashTables or its default value.
"""
return self.getOrDefault(self.numHashTables)
class LSHModel(JavaModel):
"""
Mixin for Locality Sensitive Hashing (LSH) models.
"""
def approxNearestNeighbors(self, dataset, key, numNearestNeighbors, distCol="distCol"):
"""
Given a large dataset and an item, approximately find at most k items which have the
closest distance to the item. If the :py:attr:`outputCol` is missing, the method will
transform the data; if the :py:attr:`outputCol` exists, it will use that. This allows
caching of the transformed data when necessary.
.. note:: This method is experimental and will likely change behavior in the next release.
:param dataset: The dataset to search for nearest neighbors of the key.
:param key: Feature vector representing the item to search for.
:param numNearestNeighbors: The maximum number of nearest neighbors.
:param distCol: Output column for storing the distance between each result row and the key.
Use "distCol" as default value if it's not specified.
:return: A dataset containing at most k items closest to the key. A column "distCol" is
added to show the distance between each row and the key.
"""
return self._call_java("approxNearestNeighbors", dataset, key, numNearestNeighbors,
distCol)
def approxSimilarityJoin(self, datasetA, datasetB, threshold, distCol="distCol"):
"""
Join two datasets to approximately find all pairs of rows whose distance are smaller than
the threshold. If the :py:attr:`outputCol` is missing, the method will transform the data;
if the :py:attr:`outputCol` exists, it will use that. This allows caching of the
transformed data when necessary.
:param datasetA: One of the datasets to join.
:param datasetB: Another dataset to join.
:param threshold: The threshold for the distance of row pairs.
:param distCol: Output column for storing the distance between each pair of rows. Use
"distCol" as default value if it's not specified.
:return: A joined dataset containing pairs of rows. The original rows are in columns
"datasetA" and "datasetB", and a column "distCol" is added to show the distance
between each pair.
"""
return self._call_java("approxSimilarityJoin", datasetA, datasetB, threshold, distCol)
@inherit_doc
class BucketedRandomProjectionLSH(JavaEstimator, LSHParams, HasInputCol, HasOutputCol, HasSeed,
JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
LSH class for Euclidean distance metrics.
The input is dense or sparse vectors, each of which represents a point in the Euclidean
distance space. The output will be vectors of configurable dimension. Hash values in the same
dimension are calculated by the same hash function.
.. seealso:: `Stable Distributions \
<https://en.wikipedia.org/wiki/Locality-sensitive_hashing#Stable_distributions>`_
.. seealso:: `Hashing for Similarity Search: A Survey <https://arxiv.org/abs/1408.2927>`_
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.sql.functions import col
>>> data = [(0, Vectors.dense([-1.0, -1.0 ]),),
... (1, Vectors.dense([-1.0, 1.0 ]),),
... (2, Vectors.dense([1.0, -1.0 ]),),
... (3, Vectors.dense([1.0, 1.0]),)]
>>> df = spark.createDataFrame(data, ["id", "features"])
>>> brp = BucketedRandomProjectionLSH(inputCol="features", outputCol="hashes",
... seed=12345, bucketLength=1.0)
>>> model = brp.fit(df)
>>> model.transform(df).head()
Row(id=0, features=DenseVector([-1.0, -1.0]), hashes=[DenseVector([-1.0])])
>>> data2 = [(4, Vectors.dense([2.0, 2.0 ]),),
... (5, Vectors.dense([2.0, 3.0 ]),),
... (6, Vectors.dense([3.0, 2.0 ]),),
... (7, Vectors.dense([3.0, 3.0]),)]
>>> df2 = spark.createDataFrame(data2, ["id", "features"])
>>> model.approxNearestNeighbors(df2, Vectors.dense([1.0, 2.0]), 1).collect()
[Row(id=4, features=DenseVector([2.0, 2.0]), hashes=[DenseVector([1.0])], distCol=1.0)]
>>> model.approxSimilarityJoin(df, df2, 3.0, distCol="EuclideanDistance").select(
... col("datasetA.id").alias("idA"),
... col("datasetB.id").alias("idB"),
... col("EuclideanDistance")).show()
+---+---+-----------------+
|idA|idB|EuclideanDistance|
+---+---+-----------------+
| 3| 6| 2.23606797749979|
+---+---+-----------------+
...
>>> brpPath = temp_path + "/brp"
>>> brp.save(brpPath)
>>> brp2 = BucketedRandomProjectionLSH.load(brpPath)
>>> brp2.getBucketLength() == brp.getBucketLength()
True
>>> modelPath = temp_path + "/brp-model"
>>> model.save(modelPath)
>>> model2 = BucketedRandomProjectionLSHModel.load(modelPath)
>>> model.transform(df).head().hashes == model2.transform(df).head().hashes
True
.. versionadded:: 2.2.0
"""
bucketLength = Param(Params._dummy(), "bucketLength", "the length of each hash bucket, " +
"a larger bucket lowers the false negative rate.",
typeConverter=TypeConverters.toFloat)
@keyword_only
def __init__(self, inputCol=None, outputCol=None, seed=None, numHashTables=1,
bucketLength=None):
"""
__init__(self, inputCol=None, outputCol=None, seed=None, numHashTables=1, \
bucketLength=None)
"""
super(BucketedRandomProjectionLSH, self).__init__()
self._java_obj = \
self._new_java_obj("org.apache.spark.ml.feature.BucketedRandomProjectionLSH", self.uid)
self._setDefault(numHashTables=1)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.2.0")
def setParams(self, inputCol=None, outputCol=None, seed=None, numHashTables=1,
bucketLength=None):
"""
setParams(self, inputCol=None, outputCol=None, seed=None, numHashTables=1, \
bucketLength=None)
Sets params for this BucketedRandomProjectionLSH.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.2.0")
def setBucketLength(self, value):
"""
Sets the value of :py:attr:`bucketLength`.
"""
return self._set(bucketLength=value)
@since("2.2.0")
def getBucketLength(self):
"""
Gets the value of bucketLength or its default value.
"""
return self.getOrDefault(self.bucketLength)
def _create_model(self, java_model):
return BucketedRandomProjectionLSHModel(java_model)
class BucketedRandomProjectionLSHModel(LSHModel, JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
Model fitted by :py:class:`BucketedRandomProjectionLSH`, where multiple random vectors are
stored. The vectors are normalized to be unit vectors and each vector is used in a hash
function: :math:`h_i(x) = floor(r_i \cdot x / bucketLength)` where :math:`r_i` is the
i-th random unit vector. The number of buckets will be `(max L2 norm of input vectors) /
bucketLength`.
.. versionadded:: 2.2.0
"""
@inherit_doc
class Bucketizer(JavaTransformer, HasInputCol, HasOutputCol, HasHandleInvalid,
JavaMLReadable, JavaMLWritable):
"""
Maps a column of continuous features to a column of feature buckets.
>>> values = [(0.1,), (0.4,), (1.2,), (1.5,), (float("nan"),), (float("nan"),)]
>>> df = spark.createDataFrame(values, ["values"])
>>> bucketizer = Bucketizer(splits=[-float("inf"), 0.5, 1.4, float("inf")],
... inputCol="values", outputCol="buckets")
>>> bucketed = bucketizer.setHandleInvalid("keep").transform(df).collect()
>>> len(bucketed)
6
>>> bucketed[0].buckets
0.0
>>> bucketed[1].buckets
0.0
>>> bucketed[2].buckets
1.0
>>> bucketed[3].buckets
2.0
>>> bucketizer.setParams(outputCol="b").transform(df).head().b
0.0
>>> bucketizerPath = temp_path + "/bucketizer"
>>> bucketizer.save(bucketizerPath)
>>> loadedBucketizer = Bucketizer.load(bucketizerPath)
>>> loadedBucketizer.getSplits() == bucketizer.getSplits()
True
>>> bucketed = bucketizer.setHandleInvalid("skip").transform(df).collect()
>>> len(bucketed)
4
.. versionadded:: 1.4.0
"""
splits = \
Param(Params._dummy(), "splits",
"Split points for mapping continuous features into buckets. With n+1 splits, " +
"there are n buckets. A bucket defined by splits x,y holds values in the " +
"range [x,y) except the last bucket, which also includes y. The splits " +
"should be of length >= 3 and strictly increasing. Values at -inf, inf must be " +
"explicitly provided to cover all Double values; otherwise, values outside the " +
"splits specified will be treated as errors.",
typeConverter=TypeConverters.toListFloat)
handleInvalid = Param(Params._dummy(), "handleInvalid", "how to handle invalid entries. " +
"Options are 'skip' (filter out rows with invalid values), " +
"'error' (throw an error), or 'keep' (keep invalid values in a special " +
"additional bucket).",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, splits=None, inputCol=None, outputCol=None, handleInvalid="error"):
"""
__init__(self, splits=None, inputCol=None, outputCol=None, handleInvalid="error")
"""
super(Bucketizer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.Bucketizer", self.uid)
self._setDefault(handleInvalid="error")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, splits=None, inputCol=None, outputCol=None, handleInvalid="error"):
"""
setParams(self, splits=None, inputCol=None, outputCol=None, handleInvalid="error")
Sets params for this Bucketizer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.4.0")
def setSplits(self, value):
"""
Sets the value of :py:attr:`splits`.
"""
return self._set(splits=value)
@since("1.4.0")
def getSplits(self):
"""
Gets the value of threshold or its default value.
"""
return self.getOrDefault(self.splits)
@inherit_doc
class CountVectorizer(JavaEstimator, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
Extracts a vocabulary from document collections and generates a :py:attr:`CountVectorizerModel`.
>>> df = spark.createDataFrame(
... [(0, ["a", "b", "c"]), (1, ["a", "b", "b", "c", "a"])],
... ["label", "raw"])
>>> cv = CountVectorizer(inputCol="raw", outputCol="vectors")
>>> model = cv.fit(df)
>>> model.transform(df).show(truncate=False)
+-----+---------------+-------------------------+
|label|raw |vectors |
+-----+---------------+-------------------------+
|0 |[a, b, c] |(3,[0,1,2],[1.0,1.0,1.0])|
|1 |[a, b, b, c, a]|(3,[0,1,2],[2.0,2.0,1.0])|
+-----+---------------+-------------------------+
...
>>> sorted(model.vocabulary) == ['a', 'b', 'c']
True
>>> countVectorizerPath = temp_path + "/count-vectorizer"
>>> cv.save(countVectorizerPath)
>>> loadedCv = CountVectorizer.load(countVectorizerPath)
>>> loadedCv.getMinDF() == cv.getMinDF()
True
>>> loadedCv.getMinTF() == cv.getMinTF()
True
>>> loadedCv.getVocabSize() == cv.getVocabSize()
True
>>> modelPath = temp_path + "/count-vectorizer-model"
>>> model.save(modelPath)
>>> loadedModel = CountVectorizerModel.load(modelPath)
>>> loadedModel.vocabulary == model.vocabulary
True
.. versionadded:: 1.6.0
"""
minTF = Param(
Params._dummy(), "minTF", "Filter to ignore rare words in" +
" a document. For each document, terms with frequency/count less than the given" +
" threshold are ignored. If this is an integer >= 1, then this specifies a count (of" +
" times the term must appear in the document); if this is a double in [0,1), then this " +
"specifies a fraction (out of the document's token count). Note that the parameter is " +
"only used in transform of CountVectorizerModel and does not affect fitting. Default 1.0",
typeConverter=TypeConverters.toFloat)
minDF = Param(
Params._dummy(), "minDF", "Specifies the minimum number of" +
" different documents a term must appear in to be included in the vocabulary." +
" If this is an integer >= 1, this specifies the number of documents the term must" +
" appear in; if this is a double in [0,1), then this specifies the fraction of documents." +
" Default 1.0", typeConverter=TypeConverters.toFloat)
vocabSize = Param(
Params._dummy(), "vocabSize", "max size of the vocabulary. Default 1 << 18.",
typeConverter=TypeConverters.toInt)
binary = Param(
Params._dummy(), "binary", "Binary toggle to control the output vector values." +
" If True, all nonzero counts (after minTF filter applied) are set to 1. This is useful" +
" for discrete probabilistic models that model binary events rather than integer counts." +
" Default False", typeConverter=TypeConverters.toBoolean)
@keyword_only
def __init__(self, minTF=1.0, minDF=1.0, vocabSize=1 << 18, binary=False, inputCol=None,
outputCol=None):
"""
__init__(self, minTF=1.0, minDF=1.0, vocabSize=1 << 18, binary=False, inputCol=None,\
outputCol=None)
"""
super(CountVectorizer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.CountVectorizer",
self.uid)
self._setDefault(minTF=1.0, minDF=1.0, vocabSize=1 << 18, binary=False)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, minTF=1.0, minDF=1.0, vocabSize=1 << 18, binary=False, inputCol=None,
outputCol=None):
"""
setParams(self, minTF=1.0, minDF=1.0, vocabSize=1 << 18, binary=False, inputCol=None,\
outputCol=None)
Set the params for the CountVectorizer
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.6.0")
def setMinTF(self, value):
"""
Sets the value of :py:attr:`minTF`.
"""
return self._set(minTF=value)
@since("1.6.0")
def getMinTF(self):
"""
Gets the value of minTF or its default value.
"""
return self.getOrDefault(self.minTF)
@since("1.6.0")
def setMinDF(self, value):
"""
Sets the value of :py:attr:`minDF`.
"""
return self._set(minDF=value)
@since("1.6.0")
def getMinDF(self):
"""
Gets the value of minDF or its default value.
"""
return self.getOrDefault(self.minDF)
@since("1.6.0")
def setVocabSize(self, value):
"""
Sets the value of :py:attr:`vocabSize`.
"""
return self._set(vocabSize=value)
@since("1.6.0")
def getVocabSize(self):
"""
Gets the value of vocabSize or its default value.
"""
return self.getOrDefault(self.vocabSize)
@since("2.0.0")
def setBinary(self, value):
"""
Sets the value of :py:attr:`binary`.
"""
return self._set(binary=value)
@since("2.0.0")
def getBinary(self):
"""
Gets the value of binary or its default value.
"""
return self.getOrDefault(self.binary)
def _create_model(self, java_model):
return CountVectorizerModel(java_model)
class CountVectorizerModel(JavaModel, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`CountVectorizer`.
.. versionadded:: 1.6.0
"""
@property
@since("1.6.0")
def vocabulary(self):
"""
An array of terms in the vocabulary.
"""
return self._call_java("vocabulary")
@inherit_doc
class DCT(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
A feature transformer that takes the 1D discrete cosine transform
of a real vector. No zero padding is performed on the input vector.
It returns a real vector of the same length representing the DCT.
The return vector is scaled such that the transform matrix is
unitary (aka scaled DCT-II).
.. seealso:: `More information on Wikipedia \
<https://en.wikipedia.org/wiki/Discrete_cosine_transform#DCT-II Wikipedia>`_.
>>> from pyspark.ml.linalg import Vectors
>>> df1 = spark.createDataFrame([(Vectors.dense([5.0, 8.0, 6.0]),)], ["vec"])
>>> dct = DCT(inverse=False, inputCol="vec", outputCol="resultVec")
>>> df2 = dct.transform(df1)
>>> df2.head().resultVec
DenseVector([10.969..., -0.707..., -2.041...])
>>> df3 = DCT(inverse=True, inputCol="resultVec", outputCol="origVec").transform(df2)
>>> df3.head().origVec
DenseVector([5.0, 8.0, 6.0])
>>> dctPath = temp_path + "/dct"
>>> dct.save(dctPath)
>>> loadedDtc = DCT.load(dctPath)
>>> loadedDtc.getInverse()
False
.. versionadded:: 1.6.0
"""
inverse = Param(Params._dummy(), "inverse", "Set transformer to perform inverse DCT, " +
"default False.", typeConverter=TypeConverters.toBoolean)
@keyword_only
def __init__(self, inverse=False, inputCol=None, outputCol=None):
"""
__init__(self, inverse=False, inputCol=None, outputCol=None)
"""
super(DCT, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.DCT", self.uid)
self._setDefault(inverse=False)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, inverse=False, inputCol=None, outputCol=None):
"""
setParams(self, inverse=False, inputCol=None, outputCol=None)
Sets params for this DCT.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.6.0")
def setInverse(self, value):
"""
Sets the value of :py:attr:`inverse`.
"""
return self._set(inverse=value)
@since("1.6.0")
def getInverse(self):
"""
Gets the value of inverse or its default value.
"""
return self.getOrDefault(self.inverse)
@inherit_doc
class ElementwiseProduct(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable,
JavaMLWritable):
"""
Outputs the Hadamard product (i.e., the element-wise product) of each input vector
with a provided "weight" vector. In other words, it scales each column of the dataset
by a scalar multiplier.
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([(Vectors.dense([2.0, 1.0, 3.0]),)], ["values"])
>>> ep = ElementwiseProduct(scalingVec=Vectors.dense([1.0, 2.0, 3.0]),
... inputCol="values", outputCol="eprod")
>>> ep.transform(df).head().eprod
DenseVector([2.0, 2.0, 9.0])
>>> ep.setParams(scalingVec=Vectors.dense([2.0, 3.0, 5.0])).transform(df).head().eprod
DenseVector([4.0, 3.0, 15.0])
>>> elementwiseProductPath = temp_path + "/elementwise-product"
>>> ep.save(elementwiseProductPath)
>>> loadedEp = ElementwiseProduct.load(elementwiseProductPath)
>>> loadedEp.getScalingVec() == ep.getScalingVec()
True
.. versionadded:: 1.5.0
"""
scalingVec = Param(Params._dummy(), "scalingVec", "Vector for hadamard product.",
typeConverter=TypeConverters.toVector)
@keyword_only
def __init__(self, scalingVec=None, inputCol=None, outputCol=None):
"""
__init__(self, scalingVec=None, inputCol=None, outputCol=None)
"""
super(ElementwiseProduct, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.ElementwiseProduct",
self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.5.0")
def setParams(self, scalingVec=None, inputCol=None, outputCol=None):
"""
setParams(self, scalingVec=None, inputCol=None, outputCol=None)
Sets params for this ElementwiseProduct.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setScalingVec(self, value):
"""
Sets the value of :py:attr:`scalingVec`.
"""
return self._set(scalingVec=value)
@since("2.0.0")
def getScalingVec(self):
"""
Gets the value of scalingVec or its default value.
"""
return self.getOrDefault(self.scalingVec)
@inherit_doc
class HashingTF(JavaTransformer, HasInputCol, HasOutputCol, HasNumFeatures, JavaMLReadable,
JavaMLWritable):
"""
Maps a sequence of terms to their term frequencies using the hashing trick.
Currently we use Austin Appleby's MurmurHash 3 algorithm (MurmurHash3_x86_32)
to calculate the hash code value for the term object.
Since a simple modulo is used to transform the hash function to a column index,
it is advisable to use a power of two as the numFeatures parameter;
otherwise the features will not be mapped evenly to the columns.
>>> df = spark.createDataFrame([(["a", "b", "c"],)], ["words"])
>>> hashingTF = HashingTF(numFeatures=10, inputCol="words", outputCol="features")
>>> hashingTF.transform(df).head().features
SparseVector(10, {0: 1.0, 1: 1.0, 2: 1.0})
>>> hashingTF.setParams(outputCol="freqs").transform(df).head().freqs
SparseVector(10, {0: 1.0, 1: 1.0, 2: 1.0})
>>> params = {hashingTF.numFeatures: 5, hashingTF.outputCol: "vector"}
>>> hashingTF.transform(df, params).head().vector
SparseVector(5, {0: 1.0, 1: 1.0, 2: 1.0})
>>> hashingTFPath = temp_path + "/hashing-tf"
>>> hashingTF.save(hashingTFPath)
>>> loadedHashingTF = HashingTF.load(hashingTFPath)
>>> loadedHashingTF.getNumFeatures() == hashingTF.getNumFeatures()
True
.. versionadded:: 1.3.0
"""
binary = Param(Params._dummy(), "binary", "If True, all non zero counts are set to 1. " +
"This is useful for discrete probabilistic models that model binary events " +
"rather than integer counts. Default False.",
typeConverter=TypeConverters.toBoolean)
@keyword_only
def __init__(self, numFeatures=1 << 18, binary=False, inputCol=None, outputCol=None):
"""
__init__(self, numFeatures=1 << 18, binary=False, inputCol=None, outputCol=None)
"""
super(HashingTF, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.HashingTF", self.uid)
self._setDefault(numFeatures=1 << 18, binary=False)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.3.0")
def setParams(self, numFeatures=1 << 18, binary=False, inputCol=None, outputCol=None):
"""
setParams(self, numFeatures=1 << 18, binary=False, inputCol=None, outputCol=None)
Sets params for this HashingTF.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setBinary(self, value):
"""
Sets the value of :py:attr:`binary`.
"""
return self._set(binary=value)
@since("2.0.0")
def getBinary(self):
"""
Gets the value of binary or its default value.
"""
return self.getOrDefault(self.binary)
@inherit_doc
class IDF(JavaEstimator, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
Compute the Inverse Document Frequency (IDF) given a collection of documents.
>>> from pyspark.ml.linalg import DenseVector
>>> df = spark.createDataFrame([(DenseVector([1.0, 2.0]),),
... (DenseVector([0.0, 1.0]),), (DenseVector([3.0, 0.2]),)], ["tf"])
>>> idf = IDF(minDocFreq=3, inputCol="tf", outputCol="idf")
>>> model = idf.fit(df)
>>> model.idf
DenseVector([0.0, 0.0])
>>> model.transform(df).head().idf
DenseVector([0.0, 0.0])
>>> idf.setParams(outputCol="freqs").fit(df).transform(df).collect()[1].freqs
DenseVector([0.0, 0.0])
>>> params = {idf.minDocFreq: 1, idf.outputCol: "vector"}
>>> idf.fit(df, params).transform(df).head().vector
DenseVector([0.2877, 0.0])
>>> idfPath = temp_path + "/idf"
>>> idf.save(idfPath)
>>> loadedIdf = IDF.load(idfPath)
>>> loadedIdf.getMinDocFreq() == idf.getMinDocFreq()
True
>>> modelPath = temp_path + "/idf-model"
>>> model.save(modelPath)
>>> loadedModel = IDFModel.load(modelPath)
>>> loadedModel.transform(df).head().idf == model.transform(df).head().idf
True
.. versionadded:: 1.4.0
"""
minDocFreq = Param(Params._dummy(), "minDocFreq",
"minimum number of documents in which a term should appear for filtering",
typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, minDocFreq=0, inputCol=None, outputCol=None):
"""
__init__(self, minDocFreq=0, inputCol=None, outputCol=None)
"""
super(IDF, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.IDF", self.uid)
self._setDefault(minDocFreq=0)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, minDocFreq=0, inputCol=None, outputCol=None):
"""
setParams(self, minDocFreq=0, inputCol=None, outputCol=None)
Sets params for this IDF.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.4.0")
def setMinDocFreq(self, value):
"""
Sets the value of :py:attr:`minDocFreq`.
"""
return self._set(minDocFreq=value)
@since("1.4.0")
def getMinDocFreq(self):
"""
Gets the value of minDocFreq or its default value.
"""
return self.getOrDefault(self.minDocFreq)
def _create_model(self, java_model):
return IDFModel(java_model)
class IDFModel(JavaModel, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`IDF`.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def idf(self):
"""
Returns the IDF vector.
"""
return self._call_java("idf")
@inherit_doc
class Imputer(JavaEstimator, HasInputCols, JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
Imputation estimator for completing missing values, either using the mean or the median
of the columns in which the missing values are located. The input columns should be of
DoubleType or FloatType. Currently Imputer does not support categorical features and
possibly creates incorrect values for a categorical feature.
Note that the mean/median value is computed after filtering out missing values.
All Null values in the input columns are treated as missing, and so are also imputed. For
computing median, :py:meth:`pyspark.sql.DataFrame.approxQuantile` is used with a
relative error of `0.001`.
>>> df = spark.createDataFrame([(1.0, float("nan")), (2.0, float("nan")), (float("nan"), 3.0),
... (4.0, 4.0), (5.0, 5.0)], ["a", "b"])
>>> imputer = Imputer(inputCols=["a", "b"], outputCols=["out_a", "out_b"])
>>> model = imputer.fit(df)
>>> model.surrogateDF.show()
+---+---+
| a| b|
+---+---+
|3.0|4.0|
+---+---+
...
>>> model.transform(df).show()
+---+---+-----+-----+
| a| b|out_a|out_b|
+---+---+-----+-----+
|1.0|NaN| 1.0| 4.0|
|2.0|NaN| 2.0| 4.0|
|NaN|3.0| 3.0| 3.0|
...
>>> imputer.setStrategy("median").setMissingValue(1.0).fit(df).transform(df).show()
+---+---+-----+-----+
| a| b|out_a|out_b|
+---+---+-----+-----+
|1.0|NaN| 4.0| NaN|
...
>>> imputerPath = temp_path + "/imputer"
>>> imputer.save(imputerPath)
>>> loadedImputer = Imputer.load(imputerPath)
>>> loadedImputer.getStrategy() == imputer.getStrategy()
True
>>> loadedImputer.getMissingValue()
1.0
>>> modelPath = temp_path + "/imputer-model"
>>> model.save(modelPath)
>>> loadedModel = ImputerModel.load(modelPath)
>>> loadedModel.transform(df).head().out_a == model.transform(df).head().out_a
True
.. versionadded:: 2.2.0
"""
outputCols = Param(Params._dummy(), "outputCols",
"output column names.", typeConverter=TypeConverters.toListString)
strategy = Param(Params._dummy(), "strategy",
"strategy for imputation. If mean, then replace missing values using the mean "
"value of the feature. If median, then replace missing values using the "
"median value of the feature.",
typeConverter=TypeConverters.toString)
missingValue = Param(Params._dummy(), "missingValue",
"The placeholder for the missing values. All occurrences of missingValue "
"will be imputed.", typeConverter=TypeConverters.toFloat)
@keyword_only
def __init__(self, strategy="mean", missingValue=float("nan"), inputCols=None,
outputCols=None):
"""
__init__(self, strategy="mean", missingValue=float("nan"), inputCols=None, \
outputCols=None):
"""
super(Imputer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.Imputer", self.uid)
self._setDefault(strategy="mean", missingValue=float("nan"))
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.2.0")
def setParams(self, strategy="mean", missingValue=float("nan"), inputCols=None,
outputCols=None):
"""
setParams(self, strategy="mean", missingValue=float("nan"), inputCols=None, \
outputCols=None)
Sets params for this Imputer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.2.0")
def setOutputCols(self, value):
"""
Sets the value of :py:attr:`outputCols`.
"""
return self._set(outputCols=value)
@since("2.2.0")
def getOutputCols(self):
"""
Gets the value of :py:attr:`outputCols` or its default value.
"""
return self.getOrDefault(self.outputCols)
@since("2.2.0")
def setStrategy(self, value):
"""
Sets the value of :py:attr:`strategy`.
"""
return self._set(strategy=value)
@since("2.2.0")
def getStrategy(self):
"""
Gets the value of :py:attr:`strategy` or its default value.
"""
return self.getOrDefault(self.strategy)
@since("2.2.0")
def setMissingValue(self, value):
"""
Sets the value of :py:attr:`missingValue`.
"""
return self._set(missingValue=value)
@since("2.2.0")
def getMissingValue(self):
"""
Gets the value of :py:attr:`missingValue` or its default value.
"""
return self.getOrDefault(self.missingValue)
def _create_model(self, java_model):
return ImputerModel(java_model)
class ImputerModel(JavaModel, JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
Model fitted by :py:class:`Imputer`.
.. versionadded:: 2.2.0
"""
@property
@since("2.2.0")
def surrogateDF(self):
"""
Returns a DataFrame containing inputCols and their corresponding surrogates,
which are used to replace the missing values in the input DataFrame.
"""
return self._call_java("surrogateDF")
@inherit_doc
class MaxAbsScaler(JavaEstimator, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
Rescale each feature individually to range [-1, 1] by dividing through the largest maximum
absolute value in each feature. It does not shift/center the data, and thus does not destroy
any sparsity.
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([(Vectors.dense([1.0]),), (Vectors.dense([2.0]),)], ["a"])
>>> maScaler = MaxAbsScaler(inputCol="a", outputCol="scaled")
>>> model = maScaler.fit(df)
>>> model.transform(df).show()
+-----+------+
| a|scaled|
+-----+------+
|[1.0]| [0.5]|
|[2.0]| [1.0]|
+-----+------+
...
>>> scalerPath = temp_path + "/max-abs-scaler"
>>> maScaler.save(scalerPath)
>>> loadedMAScaler = MaxAbsScaler.load(scalerPath)
>>> loadedMAScaler.getInputCol() == maScaler.getInputCol()
True
>>> loadedMAScaler.getOutputCol() == maScaler.getOutputCol()
True
>>> modelPath = temp_path + "/max-abs-scaler-model"
>>> model.save(modelPath)
>>> loadedModel = MaxAbsScalerModel.load(modelPath)
>>> loadedModel.maxAbs == model.maxAbs
True
.. versionadded:: 2.0.0
"""
@keyword_only
def __init__(self, inputCol=None, outputCol=None):
"""
__init__(self, inputCol=None, outputCol=None)
"""
super(MaxAbsScaler, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.MaxAbsScaler", self.uid)
self._setDefault()
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, inputCol=None, outputCol=None):
"""
setParams(self, inputCol=None, outputCol=None)
Sets params for this MaxAbsScaler.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return MaxAbsScalerModel(java_model)
class MaxAbsScalerModel(JavaModel, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`MaxAbsScaler`.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def maxAbs(self):
"""
Max Abs vector.
"""
return self._call_java("maxAbs")
@inherit_doc
class MinHashLSH(JavaEstimator, LSHParams, HasInputCol, HasOutputCol, HasSeed,
JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
LSH class for Jaccard distance.
The input can be dense or sparse vectors, but it is more efficient if it is sparse.
For example, `Vectors.sparse(10, [(2, 1.0), (3, 1.0), (5, 1.0)])` means there are 10 elements
in the space. This set contains elements 2, 3, and 5. Also, any input vector must have at
least 1 non-zero index, and all non-zero values are treated as binary "1" values.
.. seealso:: `Wikipedia on MinHash <https://en.wikipedia.org/wiki/MinHash>`_
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.sql.functions import col
>>> data = [(0, Vectors.sparse(6, [0, 1, 2], [1.0, 1.0, 1.0]),),
... (1, Vectors.sparse(6, [2, 3, 4], [1.0, 1.0, 1.0]),),
... (2, Vectors.sparse(6, [0, 2, 4], [1.0, 1.0, 1.0]),)]
>>> df = spark.createDataFrame(data, ["id", "features"])
>>> mh = MinHashLSH(inputCol="features", outputCol="hashes", seed=12345)
>>> model = mh.fit(df)
>>> model.transform(df).head()
Row(id=0, features=SparseVector(6, {0: 1.0, 1: 1.0, 2: 1.0}), hashes=[DenseVector([-1638925...
>>> data2 = [(3, Vectors.sparse(6, [1, 3, 5], [1.0, 1.0, 1.0]),),
... (4, Vectors.sparse(6, [2, 3, 5], [1.0, 1.0, 1.0]),),
... (5, Vectors.sparse(6, [1, 2, 4], [1.0, 1.0, 1.0]),)]
>>> df2 = spark.createDataFrame(data2, ["id", "features"])
>>> key = Vectors.sparse(6, [1, 2], [1.0, 1.0])
>>> model.approxNearestNeighbors(df2, key, 1).collect()
[Row(id=5, features=SparseVector(6, {1: 1.0, 2: 1.0, 4: 1.0}), hashes=[DenseVector([-163892...
>>> model.approxSimilarityJoin(df, df2, 0.6, distCol="JaccardDistance").select(
... col("datasetA.id").alias("idA"),
... col("datasetB.id").alias("idB"),
... col("JaccardDistance")).show()
+---+---+---------------+
|idA|idB|JaccardDistance|
+---+---+---------------+
| 1| 4| 0.5|
| 0| 5| 0.5|
+---+---+---------------+
...
>>> mhPath = temp_path + "/mh"
>>> mh.save(mhPath)
>>> mh2 = MinHashLSH.load(mhPath)
>>> mh2.getOutputCol() == mh.getOutputCol()
True
>>> modelPath = temp_path + "/mh-model"
>>> model.save(modelPath)
>>> model2 = MinHashLSHModel.load(modelPath)
>>> model.transform(df).head().hashes == model2.transform(df).head().hashes
True
.. versionadded:: 2.2.0
"""
@keyword_only
def __init__(self, inputCol=None, outputCol=None, seed=None, numHashTables=1):
"""
__init__(self, inputCol=None, outputCol=None, seed=None, numHashTables=1)
"""
super(MinHashLSH, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.MinHashLSH", self.uid)
self._setDefault(numHashTables=1)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.2.0")
def setParams(self, inputCol=None, outputCol=None, seed=None, numHashTables=1):
"""
setParams(self, inputCol=None, outputCol=None, seed=None, numHashTables=1)
Sets params for this MinHashLSH.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return MinHashLSHModel(java_model)
class MinHashLSHModel(LSHModel, JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
Model produced by :py:class:`MinHashLSH`, where where multiple hash functions are stored. Each
hash function is picked from the following family of hash functions, where :math:`a_i` and
:math:`b_i` are randomly chosen integers less than prime:
:math:`h_i(x) = ((x \cdot a_i + b_i) \mod prime)` This hash family is approximately min-wise
independent according to the reference.
.. seealso:: Tom Bohman, Colin Cooper, and Alan Frieze. "Min-wise independent linear \
permutations." Electronic Journal of Combinatorics 7 (2000): R26.
.. versionadded:: 2.2.0
"""
@inherit_doc
class MinMaxScaler(JavaEstimator, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
Rescale each feature individually to a common range [min, max] linearly using column summary
statistics, which is also known as min-max normalization or Rescaling. The rescaled value for
feature E is calculated as,
Rescaled(e_i) = (e_i - E_min) / (E_max - E_min) * (max - min) + min
For the case E_max == E_min, Rescaled(e_i) = 0.5 * (max + min)
.. note:: Since zero values will probably be transformed to non-zero values, output of the
transformer will be DenseVector even for sparse input.
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([(Vectors.dense([0.0]),), (Vectors.dense([2.0]),)], ["a"])
>>> mmScaler = MinMaxScaler(inputCol="a", outputCol="scaled")
>>> model = mmScaler.fit(df)
>>> model.originalMin
DenseVector([0.0])
>>> model.originalMax
DenseVector([2.0])
>>> model.transform(df).show()
+-----+------+
| a|scaled|
+-----+------+
|[0.0]| [0.0]|
|[2.0]| [1.0]|
+-----+------+
...
>>> minMaxScalerPath = temp_path + "/min-max-scaler"
>>> mmScaler.save(minMaxScalerPath)
>>> loadedMMScaler = MinMaxScaler.load(minMaxScalerPath)
>>> loadedMMScaler.getMin() == mmScaler.getMin()
True
>>> loadedMMScaler.getMax() == mmScaler.getMax()
True
>>> modelPath = temp_path + "/min-max-scaler-model"
>>> model.save(modelPath)
>>> loadedModel = MinMaxScalerModel.load(modelPath)
>>> loadedModel.originalMin == model.originalMin
True
>>> loadedModel.originalMax == model.originalMax
True
.. versionadded:: 1.6.0
"""
min = Param(Params._dummy(), "min", "Lower bound of the output feature range",
typeConverter=TypeConverters.toFloat)
max = Param(Params._dummy(), "max", "Upper bound of the output feature range",
typeConverter=TypeConverters.toFloat)
@keyword_only
def __init__(self, min=0.0, max=1.0, inputCol=None, outputCol=None):
"""
__init__(self, min=0.0, max=1.0, inputCol=None, outputCol=None)
"""
super(MinMaxScaler, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.MinMaxScaler", self.uid)
self._setDefault(min=0.0, max=1.0)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, min=0.0, max=1.0, inputCol=None, outputCol=None):
"""
setParams(self, min=0.0, max=1.0, inputCol=None, outputCol=None)
Sets params for this MinMaxScaler.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.6.0")
def setMin(self, value):
"""
Sets the value of :py:attr:`min`.
"""
return self._set(min=value)
@since("1.6.0")
def getMin(self):
"""
Gets the value of min or its default value.
"""
return self.getOrDefault(self.min)
@since("1.6.0")
def setMax(self, value):
"""
Sets the value of :py:attr:`max`.
"""
return self._set(max=value)
@since("1.6.0")
def getMax(self):
"""
Gets the value of max or its default value.
"""
return self.getOrDefault(self.max)
def _create_model(self, java_model):
return MinMaxScalerModel(java_model)
class MinMaxScalerModel(JavaModel, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`MinMaxScaler`.
.. versionadded:: 1.6.0
"""
@property
@since("2.0.0")
def originalMin(self):
"""
Min value for each original column during fitting.
"""
return self._call_java("originalMin")
@property
@since("2.0.0")
def originalMax(self):
"""
Max value for each original column during fitting.
"""
return self._call_java("originalMax")
@inherit_doc
@ignore_unicode_prefix
class NGram(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
A feature transformer that converts the input array of strings into an array of n-grams. Null
values in the input array are ignored.
It returns an array of n-grams where each n-gram is represented by a space-separated string of
words.
When the input is empty, an empty array is returned.
When the input array length is less than n (number of elements per n-gram), no n-grams are
returned.
>>> df = spark.createDataFrame([Row(inputTokens=["a", "b", "c", "d", "e"])])
>>> ngram = NGram(n=2, inputCol="inputTokens", outputCol="nGrams")
>>> ngram.transform(df).head()
Row(inputTokens=[u'a', u'b', u'c', u'd', u'e'], nGrams=[u'a b', u'b c', u'c d', u'd e'])
>>> # Change n-gram length
>>> ngram.setParams(n=4).transform(df).head()
Row(inputTokens=[u'a', u'b', u'c', u'd', u'e'], nGrams=[u'a b c d', u'b c d e'])
>>> # Temporarily modify output column.
>>> ngram.transform(df, {ngram.outputCol: "output"}).head()
Row(inputTokens=[u'a', u'b', u'c', u'd', u'e'], output=[u'a b c d', u'b c d e'])
>>> ngram.transform(df).head()
Row(inputTokens=[u'a', u'b', u'c', u'd', u'e'], nGrams=[u'a b c d', u'b c d e'])
>>> # Must use keyword arguments to specify params.
>>> ngram.setParams("text")
Traceback (most recent call last):
...
TypeError: Method setParams forces keyword arguments.
>>> ngramPath = temp_path + "/ngram"
>>> ngram.save(ngramPath)
>>> loadedNGram = NGram.load(ngramPath)
>>> loadedNGram.getN() == ngram.getN()
True
.. versionadded:: 1.5.0
"""
n = Param(Params._dummy(), "n", "number of elements per n-gram (>=1)",
typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, n=2, inputCol=None, outputCol=None):
"""
__init__(self, n=2, inputCol=None, outputCol=None)
"""
super(NGram, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.NGram", self.uid)
self._setDefault(n=2)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.5.0")
def setParams(self, n=2, inputCol=None, outputCol=None):
"""
setParams(self, n=2, inputCol=None, outputCol=None)
Sets params for this NGram.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.5.0")
def setN(self, value):
"""
Sets the value of :py:attr:`n`.
"""
return self._set(n=value)
@since("1.5.0")
def getN(self):
"""
Gets the value of n or its default value.
"""
return self.getOrDefault(self.n)
@inherit_doc
class Normalizer(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
Normalize a vector to have unit norm using the given p-norm.
>>> from pyspark.ml.linalg import Vectors
>>> svec = Vectors.sparse(4, {1: 4.0, 3: 3.0})
>>> df = spark.createDataFrame([(Vectors.dense([3.0, -4.0]), svec)], ["dense", "sparse"])
>>> normalizer = Normalizer(p=2.0, inputCol="dense", outputCol="features")
>>> normalizer.transform(df).head().features
DenseVector([0.6, -0.8])
>>> normalizer.setParams(inputCol="sparse", outputCol="freqs").transform(df).head().freqs
SparseVector(4, {1: 0.8, 3: 0.6})
>>> params = {normalizer.p: 1.0, normalizer.inputCol: "dense", normalizer.outputCol: "vector"}
>>> normalizer.transform(df, params).head().vector
DenseVector([0.4286, -0.5714])
>>> normalizerPath = temp_path + "/normalizer"
>>> normalizer.save(normalizerPath)
>>> loadedNormalizer = Normalizer.load(normalizerPath)
>>> loadedNormalizer.getP() == normalizer.getP()
True
.. versionadded:: 1.4.0
"""
p = Param(Params._dummy(), "p", "the p norm value.",
typeConverter=TypeConverters.toFloat)
@keyword_only
def __init__(self, p=2.0, inputCol=None, outputCol=None):
"""
__init__(self, p=2.0, inputCol=None, outputCol=None)
"""
super(Normalizer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.Normalizer", self.uid)
self._setDefault(p=2.0)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, p=2.0, inputCol=None, outputCol=None):
"""
setParams(self, p=2.0, inputCol=None, outputCol=None)
Sets params for this Normalizer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.4.0")
def setP(self, value):
"""
Sets the value of :py:attr:`p`.
"""
return self._set(p=value)
@since("1.4.0")
def getP(self):
"""
Gets the value of p or its default value.
"""
return self.getOrDefault(self.p)
@inherit_doc
class OneHotEncoder(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
A one-hot encoder that maps a column of category indices to a
column of binary vectors, with at most a single one-value per row
that indicates the input category index.
For example with 5 categories, an input value of 2.0 would map to
an output vector of `[0.0, 0.0, 1.0, 0.0]`.
The last category is not included by default (configurable via
:py:attr:`dropLast`) because it makes the vector entries sum up to
one, and hence linearly dependent.
So an input value of 4.0 maps to `[0.0, 0.0, 0.0, 0.0]`.
.. note:: This is different from scikit-learn's OneHotEncoder,
which keeps all categories. The output vectors are sparse.
.. seealso::
:py:class:`StringIndexer` for converting categorical values into
category indices
>>> stringIndexer = StringIndexer(inputCol="label", outputCol="indexed")
>>> model = stringIndexer.fit(stringIndDf)
>>> td = model.transform(stringIndDf)
>>> encoder = OneHotEncoder(inputCol="indexed", outputCol="features")
>>> encoder.transform(td).head().features
SparseVector(2, {0: 1.0})
>>> encoder.setParams(outputCol="freqs").transform(td).head().freqs
SparseVector(2, {0: 1.0})
>>> params = {encoder.dropLast: False, encoder.outputCol: "test"}
>>> encoder.transform(td, params).head().test
SparseVector(3, {0: 1.0})
>>> onehotEncoderPath = temp_path + "/onehot-encoder"
>>> encoder.save(onehotEncoderPath)
>>> loadedEncoder = OneHotEncoder.load(onehotEncoderPath)
>>> loadedEncoder.getDropLast() == encoder.getDropLast()
True
.. versionadded:: 1.4.0
"""
dropLast = Param(Params._dummy(), "dropLast", "whether to drop the last category",
typeConverter=TypeConverters.toBoolean)
@keyword_only
def __init__(self, dropLast=True, inputCol=None, outputCol=None):
"""
__init__(self, dropLast=True, inputCol=None, outputCol=None)
"""
super(OneHotEncoder, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.OneHotEncoder", self.uid)
self._setDefault(dropLast=True)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, dropLast=True, inputCol=None, outputCol=None):
"""
setParams(self, dropLast=True, inputCol=None, outputCol=None)
Sets params for this OneHotEncoder.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.4.0")
def setDropLast(self, value):
"""
Sets the value of :py:attr:`dropLast`.
"""
return self._set(dropLast=value)
@since("1.4.0")
def getDropLast(self):
"""
Gets the value of dropLast or its default value.
"""
return self.getOrDefault(self.dropLast)
@inherit_doc
class PolynomialExpansion(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable,
JavaMLWritable):
"""
Perform feature expansion in a polynomial space. As said in `wikipedia of Polynomial Expansion
<http://en.wikipedia.org/wiki/Polynomial_expansion>`_, "In mathematics, an
expansion of a product of sums expresses it as a sum of products by using the fact that
multiplication distributes over addition". Take a 2-variable feature vector as an example:
`(x, y)`, if we want to expand it with degree 2, then we get `(x, x * x, y, x * y, y * y)`.
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([(Vectors.dense([0.5, 2.0]),)], ["dense"])
>>> px = PolynomialExpansion(degree=2, inputCol="dense", outputCol="expanded")
>>> px.transform(df).head().expanded
DenseVector([0.5, 0.25, 2.0, 1.0, 4.0])
>>> px.setParams(outputCol="test").transform(df).head().test
DenseVector([0.5, 0.25, 2.0, 1.0, 4.0])
>>> polyExpansionPath = temp_path + "/poly-expansion"
>>> px.save(polyExpansionPath)
>>> loadedPx = PolynomialExpansion.load(polyExpansionPath)
>>> loadedPx.getDegree() == px.getDegree()
True
.. versionadded:: 1.4.0
"""
degree = Param(Params._dummy(), "degree", "the polynomial degree to expand (>= 1)",
typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, degree=2, inputCol=None, outputCol=None):
"""
__init__(self, degree=2, inputCol=None, outputCol=None)
"""
super(PolynomialExpansion, self).__init__()
self._java_obj = self._new_java_obj(
"org.apache.spark.ml.feature.PolynomialExpansion", self.uid)
self._setDefault(degree=2)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, degree=2, inputCol=None, outputCol=None):
"""
setParams(self, degree=2, inputCol=None, outputCol=None)
Sets params for this PolynomialExpansion.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.4.0")
def setDegree(self, value):
"""
Sets the value of :py:attr:`degree`.
"""
return self._set(degree=value)
@since("1.4.0")
def getDegree(self):
"""
Gets the value of degree or its default value.
"""
return self.getOrDefault(self.degree)
@inherit_doc
class QuantileDiscretizer(JavaEstimator, HasInputCol, HasOutputCol, HasHandleInvalid,
JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
`QuantileDiscretizer` takes a column with continuous features and outputs a column with binned
categorical features. The number of bins can be set using the :py:attr:`numBuckets` parameter.
It is possible that the number of buckets used will be less than this value, for example, if
there are too few distinct values of the input to create enough distinct quantiles.
NaN handling: Note also that
QuantileDiscretizer will raise an error when it finds NaN values in the dataset, but the user
can also choose to either keep or remove NaN values within the dataset by setting
:py:attr:`handleInvalid` parameter. If the user chooses to keep NaN values, they will be
handled specially and placed into their own bucket, for example, if 4 buckets are used, then
non-NaN data will be put into buckets[0-3], but NaNs will be counted in a special bucket[4].
Algorithm: The bin ranges are chosen using an approximate algorithm (see the documentation for
:py:meth:`~.DataFrameStatFunctions.approxQuantile` for a detailed description).
The precision of the approximation can be controlled with the
:py:attr:`relativeError` parameter.
The lower and upper bin bounds will be `-Infinity` and `+Infinity`, covering all real values.
>>> values = [(0.1,), (0.4,), (1.2,), (1.5,), (float("nan"),), (float("nan"),)]
>>> df = spark.createDataFrame(values, ["values"])
>>> qds = QuantileDiscretizer(numBuckets=2,
... inputCol="values", outputCol="buckets", relativeError=0.01, handleInvalid="error")
>>> qds.getRelativeError()
0.01
>>> bucketizer = qds.fit(df)
>>> qds.setHandleInvalid("keep").fit(df).transform(df).count()
6
>>> qds.setHandleInvalid("skip").fit(df).transform(df).count()
4
>>> splits = bucketizer.getSplits()
>>> splits[0]
-inf
>>> print("%2.1f" % round(splits[1], 1))
0.4
>>> bucketed = bucketizer.transform(df).head()
>>> bucketed.buckets
0.0
>>> quantileDiscretizerPath = temp_path + "/quantile-discretizer"
>>> qds.save(quantileDiscretizerPath)
>>> loadedQds = QuantileDiscretizer.load(quantileDiscretizerPath)
>>> loadedQds.getNumBuckets() == qds.getNumBuckets()
True
.. versionadded:: 2.0.0
"""
numBuckets = Param(Params._dummy(), "numBuckets",
"Maximum number of buckets (quantiles, or " +
"categories) into which data points are grouped. Must be >= 2.",
typeConverter=TypeConverters.toInt)
relativeError = Param(Params._dummy(), "relativeError", "The relative target precision for " +
"the approximate quantile algorithm used to generate buckets. " +
"Must be in the range [0, 1].",
typeConverter=TypeConverters.toFloat)
handleInvalid = Param(Params._dummy(), "handleInvalid", "how to handle invalid entries. " +
"Options are skip (filter out rows with invalid values), " +
"error (throw an error), or keep (keep invalid values in a special " +
"additional bucket).",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, numBuckets=2, inputCol=None, outputCol=None, relativeError=0.001,
handleInvalid="error"):
"""
__init__(self, numBuckets=2, inputCol=None, outputCol=None, relativeError=0.001, \
handleInvalid="error")
"""
super(QuantileDiscretizer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.QuantileDiscretizer",
self.uid)
self._setDefault(numBuckets=2, relativeError=0.001, handleInvalid="error")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, numBuckets=2, inputCol=None, outputCol=None, relativeError=0.001,
handleInvalid="error"):
"""
setParams(self, numBuckets=2, inputCol=None, outputCol=None, relativeError=0.001, \
handleInvalid="error")
Set the params for the QuantileDiscretizer
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.0.0")
def setNumBuckets(self, value):
"""
Sets the value of :py:attr:`numBuckets`.
"""
return self._set(numBuckets=value)
@since("2.0.0")
def getNumBuckets(self):
"""
Gets the value of numBuckets or its default value.
"""
return self.getOrDefault(self.numBuckets)
@since("2.0.0")
def setRelativeError(self, value):
"""
Sets the value of :py:attr:`relativeError`.
"""
return self._set(relativeError=value)
@since("2.0.0")
def getRelativeError(self):
"""
Gets the value of relativeError or its default value.
"""
return self.getOrDefault(self.relativeError)
def _create_model(self, java_model):
"""
Private method to convert the java_model to a Python model.
"""
return Bucketizer(splits=list(java_model.getSplits()),
inputCol=self.getInputCol(),
outputCol=self.getOutputCol(),
handleInvalid=self.getHandleInvalid())
@inherit_doc
@ignore_unicode_prefix
class RegexTokenizer(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
A regex based tokenizer that extracts tokens either by using the
provided regex pattern (in Java dialect) to split the text
(default) or repeatedly matching the regex (if gaps is false).
Optional parameters also allow filtering tokens using a minimal
length.
It returns an array of strings that can be empty.
>>> df = spark.createDataFrame([("A B c",)], ["text"])
>>> reTokenizer = RegexTokenizer(inputCol="text", outputCol="words")
>>> reTokenizer.transform(df).head()
Row(text=u'A B c', words=[u'a', u'b', u'c'])
>>> # Change a parameter.
>>> reTokenizer.setParams(outputCol="tokens").transform(df).head()
Row(text=u'A B c', tokens=[u'a', u'b', u'c'])
>>> # Temporarily modify a parameter.
>>> reTokenizer.transform(df, {reTokenizer.outputCol: "words"}).head()
Row(text=u'A B c', words=[u'a', u'b', u'c'])
>>> reTokenizer.transform(df).head()
Row(text=u'A B c', tokens=[u'a', u'b', u'c'])
>>> # Must use keyword arguments to specify params.
>>> reTokenizer.setParams("text")
Traceback (most recent call last):
...
TypeError: Method setParams forces keyword arguments.
>>> regexTokenizerPath = temp_path + "/regex-tokenizer"
>>> reTokenizer.save(regexTokenizerPath)
>>> loadedReTokenizer = RegexTokenizer.load(regexTokenizerPath)
>>> loadedReTokenizer.getMinTokenLength() == reTokenizer.getMinTokenLength()
True
>>> loadedReTokenizer.getGaps() == reTokenizer.getGaps()
True
.. versionadded:: 1.4.0
"""
minTokenLength = Param(Params._dummy(), "minTokenLength", "minimum token length (>= 0)",
typeConverter=TypeConverters.toInt)
gaps = Param(Params._dummy(), "gaps", "whether regex splits on gaps (True) or matches tokens " +
"(False)")
pattern = Param(Params._dummy(), "pattern", "regex pattern (Java dialect) used for tokenizing",
typeConverter=TypeConverters.toString)
toLowercase = Param(Params._dummy(), "toLowercase", "whether to convert all characters to " +
"lowercase before tokenizing", typeConverter=TypeConverters.toBoolean)
@keyword_only
def __init__(self, minTokenLength=1, gaps=True, pattern="\\s+", inputCol=None,
outputCol=None, toLowercase=True):
"""
__init__(self, minTokenLength=1, gaps=True, pattern="\\s+", inputCol=None, \
outputCol=None, toLowercase=True)
"""
super(RegexTokenizer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.RegexTokenizer", self.uid)
self._setDefault(minTokenLength=1, gaps=True, pattern="\\s+", toLowercase=True)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, minTokenLength=1, gaps=True, pattern="\\s+", inputCol=None,
outputCol=None, toLowercase=True):
"""
setParams(self, minTokenLength=1, gaps=True, pattern="\\s+", inputCol=None, \
outputCol=None, toLowercase=True)
Sets params for this RegexTokenizer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.4.0")
def setMinTokenLength(self, value):
"""
Sets the value of :py:attr:`minTokenLength`.
"""
return self._set(minTokenLength=value)
@since("1.4.0")
def getMinTokenLength(self):
"""
Gets the value of minTokenLength or its default value.
"""
return self.getOrDefault(self.minTokenLength)
@since("1.4.0")
def setGaps(self, value):
"""
Sets the value of :py:attr:`gaps`.
"""
return self._set(gaps=value)
@since("1.4.0")
def getGaps(self):
"""
Gets the value of gaps or its default value.
"""
return self.getOrDefault(self.gaps)
@since("1.4.0")
def setPattern(self, value):
"""
Sets the value of :py:attr:`pattern`.
"""
return self._set(pattern=value)
@since("1.4.0")
def getPattern(self):
"""
Gets the value of pattern or its default value.
"""
return self.getOrDefault(self.pattern)
@since("2.0.0")
def setToLowercase(self, value):
"""
Sets the value of :py:attr:`toLowercase`.
"""
return self._set(toLowercase=value)
@since("2.0.0")
def getToLowercase(self):
"""
Gets the value of toLowercase or its default value.
"""
return self.getOrDefault(self.toLowercase)
@inherit_doc
class SQLTransformer(JavaTransformer, JavaMLReadable, JavaMLWritable):
"""
Implements the transforms which are defined by SQL statement.
Currently we only support SQL syntax like 'SELECT ... FROM __THIS__'
where '__THIS__' represents the underlying table of the input dataset.
>>> df = spark.createDataFrame([(0, 1.0, 3.0), (2, 2.0, 5.0)], ["id", "v1", "v2"])
>>> sqlTrans = SQLTransformer(
... statement="SELECT *, (v1 + v2) AS v3, (v1 * v2) AS v4 FROM __THIS__")
>>> sqlTrans.transform(df).head()
Row(id=0, v1=1.0, v2=3.0, v3=4.0, v4=3.0)
>>> sqlTransformerPath = temp_path + "/sql-transformer"
>>> sqlTrans.save(sqlTransformerPath)
>>> loadedSqlTrans = SQLTransformer.load(sqlTransformerPath)
>>> loadedSqlTrans.getStatement() == sqlTrans.getStatement()
True
.. versionadded:: 1.6.0
"""
statement = Param(Params._dummy(), "statement", "SQL statement",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, statement=None):
"""
__init__(self, statement=None)
"""
super(SQLTransformer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.SQLTransformer", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, statement=None):
"""
setParams(self, statement=None)
Sets params for this SQLTransformer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.6.0")
def setStatement(self, value):
"""
Sets the value of :py:attr:`statement`.
"""
return self._set(statement=value)
@since("1.6.0")
def getStatement(self):
"""
Gets the value of statement or its default value.
"""
return self.getOrDefault(self.statement)
@inherit_doc
class StandardScaler(JavaEstimator, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
Standardizes features by removing the mean and scaling to unit variance using column summary
statistics on the samples in the training set.
The "unit std" is computed using the `corrected sample standard deviation \
<https://en.wikipedia.org/wiki/Standard_deviation#Corrected_sample_standard_deviation>`_,
which is computed as the square root of the unbiased sample variance.
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([(Vectors.dense([0.0]),), (Vectors.dense([2.0]),)], ["a"])
>>> standardScaler = StandardScaler(inputCol="a", outputCol="scaled")
>>> model = standardScaler.fit(df)
>>> model.mean
DenseVector([1.0])
>>> model.std
DenseVector([1.4142])
>>> model.transform(df).collect()[1].scaled
DenseVector([1.4142])
>>> standardScalerPath = temp_path + "/standard-scaler"
>>> standardScaler.save(standardScalerPath)
>>> loadedStandardScaler = StandardScaler.load(standardScalerPath)
>>> loadedStandardScaler.getWithMean() == standardScaler.getWithMean()
True
>>> loadedStandardScaler.getWithStd() == standardScaler.getWithStd()
True
>>> modelPath = temp_path + "/standard-scaler-model"
>>> model.save(modelPath)
>>> loadedModel = StandardScalerModel.load(modelPath)
>>> loadedModel.std == model.std
True
>>> loadedModel.mean == model.mean
True
.. versionadded:: 1.4.0
"""
withMean = Param(Params._dummy(), "withMean", "Center data with mean",
typeConverter=TypeConverters.toBoolean)
withStd = Param(Params._dummy(), "withStd", "Scale to unit standard deviation",
typeConverter=TypeConverters.toBoolean)
@keyword_only
def __init__(self, withMean=False, withStd=True, inputCol=None, outputCol=None):
"""
__init__(self, withMean=False, withStd=True, inputCol=None, outputCol=None)
"""
super(StandardScaler, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.StandardScaler", self.uid)
self._setDefault(withMean=False, withStd=True)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, withMean=False, withStd=True, inputCol=None, outputCol=None):
"""
setParams(self, withMean=False, withStd=True, inputCol=None, outputCol=None)
Sets params for this StandardScaler.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.4.0")
def setWithMean(self, value):
"""
Sets the value of :py:attr:`withMean`.
"""
return self._set(withMean=value)
@since("1.4.0")
def getWithMean(self):
"""
Gets the value of withMean or its default value.
"""
return self.getOrDefault(self.withMean)
@since("1.4.0")
def setWithStd(self, value):
"""
Sets the value of :py:attr:`withStd`.
"""
return self._set(withStd=value)
@since("1.4.0")
def getWithStd(self):
"""
Gets the value of withStd or its default value.
"""
return self.getOrDefault(self.withStd)
def _create_model(self, java_model):
return StandardScalerModel(java_model)
class StandardScalerModel(JavaModel, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`StandardScaler`.
.. versionadded:: 1.4.0
"""
@property
@since("2.0.0")
def std(self):
"""
Standard deviation of the StandardScalerModel.
"""
return self._call_java("std")
@property
@since("2.0.0")
def mean(self):
"""
Mean of the StandardScalerModel.
"""
return self._call_java("mean")
@inherit_doc
class StringIndexer(JavaEstimator, HasInputCol, HasOutputCol, HasHandleInvalid, JavaMLReadable,
JavaMLWritable):
"""
A label indexer that maps a string column of labels to an ML column of label indices.
If the input column is numeric, we cast it to string and index the string values.
The indices are in [0, numLabels). By default, this is ordered by label frequencies
so the most frequent label gets index 0. The ordering behavior is controlled by
setting :py:attr:`stringOrderType`. Its default value is 'frequencyDesc'.
>>> stringIndexer = StringIndexer(inputCol="label", outputCol="indexed", handleInvalid="error",
... stringOrderType="frequencyDesc")
>>> model = stringIndexer.fit(stringIndDf)
>>> td = model.transform(stringIndDf)
>>> sorted(set([(i[0], i[1]) for i in td.select(td.id, td.indexed).collect()]),
... key=lambda x: x[0])
[(0, 0.0), (1, 2.0), (2, 1.0), (3, 0.0), (4, 0.0), (5, 1.0)]
>>> inverter = IndexToString(inputCol="indexed", outputCol="label2", labels=model.labels)
>>> itd = inverter.transform(td)
>>> sorted(set([(i[0], str(i[1])) for i in itd.select(itd.id, itd.label2).collect()]),
... key=lambda x: x[0])
[(0, 'a'), (1, 'b'), (2, 'c'), (3, 'a'), (4, 'a'), (5, 'c')]
>>> stringIndexerPath = temp_path + "/string-indexer"
>>> stringIndexer.save(stringIndexerPath)
>>> loadedIndexer = StringIndexer.load(stringIndexerPath)
>>> loadedIndexer.getHandleInvalid() == stringIndexer.getHandleInvalid()
True
>>> modelPath = temp_path + "/string-indexer-model"
>>> model.save(modelPath)
>>> loadedModel = StringIndexerModel.load(modelPath)
>>> loadedModel.labels == model.labels
True
>>> indexToStringPath = temp_path + "/index-to-string"
>>> inverter.save(indexToStringPath)
>>> loadedInverter = IndexToString.load(indexToStringPath)
>>> loadedInverter.getLabels() == inverter.getLabels()
True
>>> stringIndexer.getStringOrderType()
'frequencyDesc'
>>> stringIndexer = StringIndexer(inputCol="label", outputCol="indexed", handleInvalid="error",
... stringOrderType="alphabetDesc")
>>> model = stringIndexer.fit(stringIndDf)
>>> td = model.transform(stringIndDf)
>>> sorted(set([(i[0], i[1]) for i in td.select(td.id, td.indexed).collect()]),
... key=lambda x: x[0])
[(0, 2.0), (1, 1.0), (2, 0.0), (3, 2.0), (4, 2.0), (5, 0.0)]
.. versionadded:: 1.4.0
"""
stringOrderType = Param(Params._dummy(), "stringOrderType",
"How to order labels of string column. The first label after " +
"ordering is assigned an index of 0. Supported options: " +
"frequencyDesc, frequencyAsc, alphabetDesc, alphabetAsc.",
typeConverter=TypeConverters.toString)
handleInvalid = Param(Params._dummy(), "handleInvalid", "how to handle invalid data (unseen " +
"or NULL values) in features and label column of string type. " +
"Options are 'skip' (filter out rows with invalid data), " +
"error (throw an error), or 'keep' (put invalid data " +
"in a special additional bucket, at index numLabels).",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, inputCol=None, outputCol=None, handleInvalid="error",
stringOrderType="frequencyDesc"):
"""
__init__(self, inputCol=None, outputCol=None, handleInvalid="error", \
stringOrderType="frequencyDesc")
"""
super(StringIndexer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.StringIndexer", self.uid)
self._setDefault(handleInvalid="error", stringOrderType="frequencyDesc")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, inputCol=None, outputCol=None, handleInvalid="error",
stringOrderType="frequencyDesc"):
"""
setParams(self, inputCol=None, outputCol=None, handleInvalid="error", \
stringOrderType="frequencyDesc")
Sets params for this StringIndexer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return StringIndexerModel(java_model)
@since("2.3.0")
def setStringOrderType(self, value):
"""
Sets the value of :py:attr:`stringOrderType`.
"""
return self._set(stringOrderType=value)
@since("2.3.0")
def getStringOrderType(self):
"""
Gets the value of :py:attr:`stringOrderType` or its default value 'frequencyDesc'.
"""
return self.getOrDefault(self.stringOrderType)
class StringIndexerModel(JavaModel, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`StringIndexer`.
.. versionadded:: 1.4.0
"""
@property
@since("1.5.0")
def labels(self):
"""
Ordered list of labels, corresponding to indices to be assigned.
"""
return self._call_java("labels")
@inherit_doc
class IndexToString(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
A :py:class:`Transformer` that maps a column of indices back to a new column of
corresponding string values.
The index-string mapping is either from the ML attributes of the input column,
or from user-supplied labels (which take precedence over ML attributes).
See L{StringIndexer} for converting strings into indices.
.. versionadded:: 1.6.0
"""
labels = Param(Params._dummy(), "labels",
"Optional array of labels specifying index-string mapping." +
" If not provided or if empty, then metadata from inputCol is used instead.",
typeConverter=TypeConverters.toListString)
@keyword_only
def __init__(self, inputCol=None, outputCol=None, labels=None):
"""
__init__(self, inputCol=None, outputCol=None, labels=None)
"""
super(IndexToString, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.IndexToString",
self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, inputCol=None, outputCol=None, labels=None):
"""
setParams(self, inputCol=None, outputCol=None, labels=None)
Sets params for this IndexToString.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.6.0")
def setLabels(self, value):
"""
Sets the value of :py:attr:`labels`.
"""
return self._set(labels=value)
@since("1.6.0")
def getLabels(self):
"""
Gets the value of :py:attr:`labels` or its default value.
"""
return self.getOrDefault(self.labels)
class StopWordsRemover(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
A feature transformer that filters out stop words from input.
.. note:: null values from input array are preserved unless adding null to stopWords explicitly.
>>> df = spark.createDataFrame([(["a", "b", "c"],)], ["text"])
>>> remover = StopWordsRemover(inputCol="text", outputCol="words", stopWords=["b"])
>>> remover.transform(df).head().words == ['a', 'c']
True
>>> stopWordsRemoverPath = temp_path + "/stopwords-remover"
>>> remover.save(stopWordsRemoverPath)
>>> loadedRemover = StopWordsRemover.load(stopWordsRemoverPath)
>>> loadedRemover.getStopWords() == remover.getStopWords()
True
>>> loadedRemover.getCaseSensitive() == remover.getCaseSensitive()
True
.. versionadded:: 1.6.0
"""
stopWords = Param(Params._dummy(), "stopWords", "The words to be filtered out",
typeConverter=TypeConverters.toListString)
caseSensitive = Param(Params._dummy(), "caseSensitive", "whether to do a case sensitive " +
"comparison over the stop words", typeConverter=TypeConverters.toBoolean)
@keyword_only
def __init__(self, inputCol=None, outputCol=None, stopWords=None, caseSensitive=False):
"""
__init__(self, inputCol=None, outputCol=None, stopWords=None, caseSensitive=false)
"""
super(StopWordsRemover, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.StopWordsRemover",
self.uid)
self._setDefault(stopWords=StopWordsRemover.loadDefaultStopWords("english"),
caseSensitive=False)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, inputCol=None, outputCol=None, stopWords=None, caseSensitive=False):
"""
setParams(self, inputCol=None, outputCol=None, stopWords=None, caseSensitive=false)
Sets params for this StopWordRemover.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.6.0")
def setStopWords(self, value):
"""
Sets the value of :py:attr:`stopWords`.
"""
return self._set(stopWords=value)
@since("1.6.0")
def getStopWords(self):
"""
Gets the value of :py:attr:`stopWords` or its default value.
"""
return self.getOrDefault(self.stopWords)
@since("1.6.0")
def setCaseSensitive(self, value):
"""
Sets the value of :py:attr:`caseSensitive`.
"""
return self._set(caseSensitive=value)
@since("1.6.0")
def getCaseSensitive(self):
"""
Gets the value of :py:attr:`caseSensitive` or its default value.
"""
return self.getOrDefault(self.caseSensitive)
@staticmethod
@since("2.0.0")
def loadDefaultStopWords(language):
"""
Loads the default stop words for the given language.
Supported languages: danish, dutch, english, finnish, french, german, hungarian,
italian, norwegian, portuguese, russian, spanish, swedish, turkish
"""
stopWordsObj = _jvm().org.apache.spark.ml.feature.StopWordsRemover
return list(stopWordsObj.loadDefaultStopWords(language))
@inherit_doc
@ignore_unicode_prefix
class Tokenizer(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
A tokenizer that converts the input string to lowercase and then
splits it by white spaces.
>>> df = spark.createDataFrame([("a b c",)], ["text"])
>>> tokenizer = Tokenizer(inputCol="text", outputCol="words")
>>> tokenizer.transform(df).head()
Row(text=u'a b c', words=[u'a', u'b', u'c'])
>>> # Change a parameter.
>>> tokenizer.setParams(outputCol="tokens").transform(df).head()
Row(text=u'a b c', tokens=[u'a', u'b', u'c'])
>>> # Temporarily modify a parameter.
>>> tokenizer.transform(df, {tokenizer.outputCol: "words"}).head()
Row(text=u'a b c', words=[u'a', u'b', u'c'])
>>> tokenizer.transform(df).head()
Row(text=u'a b c', tokens=[u'a', u'b', u'c'])
>>> # Must use keyword arguments to specify params.
>>> tokenizer.setParams("text")
Traceback (most recent call last):
...
TypeError: Method setParams forces keyword arguments.
>>> tokenizerPath = temp_path + "/tokenizer"
>>> tokenizer.save(tokenizerPath)
>>> loadedTokenizer = Tokenizer.load(tokenizerPath)
>>> loadedTokenizer.transform(df).head().tokens == tokenizer.transform(df).head().tokens
True
.. versionadded:: 1.3.0
"""
@keyword_only
def __init__(self, inputCol=None, outputCol=None):
"""
__init__(self, inputCol=None, outputCol=None)
"""
super(Tokenizer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.Tokenizer", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.3.0")
def setParams(self, inputCol=None, outputCol=None):
"""
setParams(self, inputCol=None, outputCol=None)
Sets params for this Tokenizer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@inherit_doc
class VectorAssembler(JavaTransformer, HasInputCols, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
A feature transformer that merges multiple columns into a vector column.
>>> df = spark.createDataFrame([(1, 0, 3)], ["a", "b", "c"])
>>> vecAssembler = VectorAssembler(inputCols=["a", "b", "c"], outputCol="features")
>>> vecAssembler.transform(df).head().features
DenseVector([1.0, 0.0, 3.0])
>>> vecAssembler.setParams(outputCol="freqs").transform(df).head().freqs
DenseVector([1.0, 0.0, 3.0])
>>> params = {vecAssembler.inputCols: ["b", "a"], vecAssembler.outputCol: "vector"}
>>> vecAssembler.transform(df, params).head().vector
DenseVector([0.0, 1.0])
>>> vectorAssemblerPath = temp_path + "/vector-assembler"
>>> vecAssembler.save(vectorAssemblerPath)
>>> loadedAssembler = VectorAssembler.load(vectorAssemblerPath)
>>> loadedAssembler.transform(df).head().freqs == vecAssembler.transform(df).head().freqs
True
.. versionadded:: 1.4.0
"""
@keyword_only
def __init__(self, inputCols=None, outputCol=None):
"""
__init__(self, inputCols=None, outputCol=None)
"""
super(VectorAssembler, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.VectorAssembler", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, inputCols=None, outputCol=None):
"""
setParams(self, inputCols=None, outputCol=None)
Sets params for this VectorAssembler.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@inherit_doc
class VectorIndexer(JavaEstimator, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
Class for indexing categorical feature columns in a dataset of `Vector`.
This has 2 usage modes:
- Automatically identify categorical features (default behavior)
- This helps process a dataset of unknown vectors into a dataset with some continuous
features and some categorical features. The choice between continuous and categorical
is based upon a maxCategories parameter.
- Set maxCategories to the maximum number of categorical any categorical feature should
have.
- E.g.: Feature 0 has unique values {-1.0, 0.0}, and feature 1 values {1.0, 3.0, 5.0}.
If maxCategories = 2, then feature 0 will be declared categorical and use indices {0, 1},
and feature 1 will be declared continuous.
- Index all features, if all features are categorical
- If maxCategories is set to be very large, then this will build an index of unique
values for all features.
- Warning: This can cause problems if features are continuous since this will collect ALL
unique values to the driver.
- E.g.: Feature 0 has unique values {-1.0, 0.0}, and feature 1 values {1.0, 3.0, 5.0}.
If maxCategories >= 3, then both features will be declared categorical.
This returns a model which can transform categorical features to use 0-based indices.
Index stability:
- This is not guaranteed to choose the same category index across multiple runs.
- If a categorical feature includes value 0, then this is guaranteed to map value 0 to
index 0. This maintains vector sparsity.
- More stability may be added in the future.
TODO: Future extensions: The following functionality is planned for the future:
- Preserve metadata in transform; if a feature's metadata is already present,
do not recompute.
- Specify certain features to not index, either via a parameter or via existing metadata.
- Add warning if a categorical feature has only 1 category.
- Add option for allowing unknown categories.
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([(Vectors.dense([-1.0, 0.0]),),
... (Vectors.dense([0.0, 1.0]),), (Vectors.dense([0.0, 2.0]),)], ["a"])
>>> indexer = VectorIndexer(maxCategories=2, inputCol="a", outputCol="indexed")
>>> model = indexer.fit(df)
>>> model.transform(df).head().indexed
DenseVector([1.0, 0.0])
>>> model.numFeatures
2
>>> model.categoryMaps
{0: {0.0: 0, -1.0: 1}}
>>> indexer.setParams(outputCol="test").fit(df).transform(df).collect()[1].test
DenseVector([0.0, 1.0])
>>> params = {indexer.maxCategories: 3, indexer.outputCol: "vector"}
>>> model2 = indexer.fit(df, params)
>>> model2.transform(df).head().vector
DenseVector([1.0, 0.0])
>>> vectorIndexerPath = temp_path + "/vector-indexer"
>>> indexer.save(vectorIndexerPath)
>>> loadedIndexer = VectorIndexer.load(vectorIndexerPath)
>>> loadedIndexer.getMaxCategories() == indexer.getMaxCategories()
True
>>> modelPath = temp_path + "/vector-indexer-model"
>>> model.save(modelPath)
>>> loadedModel = VectorIndexerModel.load(modelPath)
>>> loadedModel.numFeatures == model.numFeatures
True
>>> loadedModel.categoryMaps == model.categoryMaps
True
.. versionadded:: 1.4.0
"""
maxCategories = Param(Params._dummy(), "maxCategories",
"Threshold for the number of values a categorical feature can take " +
"(>= 2). If a feature is found to have > maxCategories values, then " +
"it is declared continuous.", typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, maxCategories=20, inputCol=None, outputCol=None):
"""
__init__(self, maxCategories=20, inputCol=None, outputCol=None)
"""
super(VectorIndexer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.VectorIndexer", self.uid)
self._setDefault(maxCategories=20)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, maxCategories=20, inputCol=None, outputCol=None):
"""
setParams(self, maxCategories=20, inputCol=None, outputCol=None)
Sets params for this VectorIndexer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.4.0")
def setMaxCategories(self, value):
"""
Sets the value of :py:attr:`maxCategories`.
"""
return self._set(maxCategories=value)
@since("1.4.0")
def getMaxCategories(self):
"""
Gets the value of maxCategories or its default value.
"""
return self.getOrDefault(self.maxCategories)
def _create_model(self, java_model):
return VectorIndexerModel(java_model)
class VectorIndexerModel(JavaModel, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`VectorIndexer`.
Transform categorical features to use 0-based indices instead of their original values.
- Categorical features are mapped to indices.
- Continuous features (columns) are left unchanged.
This also appends metadata to the output column, marking features as Numeric (continuous),
Nominal (categorical), or Binary (either continuous or categorical).
Non-ML metadata is not carried over from the input to the output column.
This maintains vector sparsity.
.. versionadded:: 1.4.0
"""
@property
@since("1.4.0")
def numFeatures(self):
"""
Number of features, i.e., length of Vectors which this transforms.
"""
return self._call_java("numFeatures")
@property
@since("1.4.0")
def categoryMaps(self):
"""
Feature value index. Keys are categorical feature indices (column indices).
Values are maps from original features values to 0-based category indices.
If a feature is not in this map, it is treated as continuous.
"""
return self._call_java("javaCategoryMaps")
@inherit_doc
class VectorSlicer(JavaTransformer, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
This class takes a feature vector and outputs a new feature vector with a subarray
of the original features.
The subset of features can be specified with either indices (`setIndices()`)
or names (`setNames()`). At least one feature must be selected. Duplicate features
are not allowed, so there can be no overlap between selected indices and names.
The output vector will order features with the selected indices first (in the order given),
followed by the selected names (in the order given).
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame([
... (Vectors.dense([-2.0, 2.3, 0.0, 0.0, 1.0]),),
... (Vectors.dense([0.0, 0.0, 0.0, 0.0, 0.0]),),
... (Vectors.dense([0.6, -1.1, -3.0, 4.5, 3.3]),)], ["features"])
>>> vs = VectorSlicer(inputCol="features", outputCol="sliced", indices=[1, 4])
>>> vs.transform(df).head().sliced
DenseVector([2.3, 1.0])
>>> vectorSlicerPath = temp_path + "/vector-slicer"
>>> vs.save(vectorSlicerPath)
>>> loadedVs = VectorSlicer.load(vectorSlicerPath)
>>> loadedVs.getIndices() == vs.getIndices()
True
>>> loadedVs.getNames() == vs.getNames()
True
.. versionadded:: 1.6.0
"""
indices = Param(Params._dummy(), "indices", "An array of indices to select features from " +
"a vector column. There can be no overlap with names.",
typeConverter=TypeConverters.toListInt)
names = Param(Params._dummy(), "names", "An array of feature names to select features from " +
"a vector column. These names must be specified by ML " +
"org.apache.spark.ml.attribute.Attribute. There can be no overlap with " +
"indices.", typeConverter=TypeConverters.toListString)
@keyword_only
def __init__(self, inputCol=None, outputCol=None, indices=None, names=None):
"""
__init__(self, inputCol=None, outputCol=None, indices=None, names=None)
"""
super(VectorSlicer, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.VectorSlicer", self.uid)
self._setDefault(indices=[], names=[])
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.6.0")
def setParams(self, inputCol=None, outputCol=None, indices=None, names=None):
"""
setParams(self, inputCol=None, outputCol=None, indices=None, names=None):
Sets params for this VectorSlicer.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.6.0")
def setIndices(self, value):
"""
Sets the value of :py:attr:`indices`.
"""
return self._set(indices=value)
@since("1.6.0")
def getIndices(self):
"""
Gets the value of indices or its default value.
"""
return self.getOrDefault(self.indices)
@since("1.6.0")
def setNames(self, value):
"""
Sets the value of :py:attr:`names`.
"""
return self._set(names=value)
@since("1.6.0")
def getNames(self):
"""
Gets the value of names or its default value.
"""
return self.getOrDefault(self.names)
@inherit_doc
@ignore_unicode_prefix
class Word2Vec(JavaEstimator, HasStepSize, HasMaxIter, HasSeed, HasInputCol, HasOutputCol,
JavaMLReadable, JavaMLWritable):
"""
Word2Vec trains a model of `Map(String, Vector)`, i.e. transforms a word into a code for further
natural language processing or machine learning process.
>>> sent = ("a b " * 100 + "a c " * 10).split(" ")
>>> doc = spark.createDataFrame([(sent,), (sent,)], ["sentence"])
>>> word2Vec = Word2Vec(vectorSize=5, seed=42, inputCol="sentence", outputCol="model")
>>> model = word2Vec.fit(doc)
>>> model.getVectors().show()
+----+--------------------+
|word| vector|
+----+--------------------+
| a|[0.09461779892444...|
| b|[1.15474212169647...|
| c|[-0.3794820010662...|
+----+--------------------+
...
>>> from pyspark.sql.functions import format_number as fmt
>>> model.findSynonyms("a", 2).select("word", fmt("similarity", 5).alias("similarity")).show()
+----+----------+
|word|similarity|
+----+----------+
| b| 0.25053|
| c| -0.69805|
+----+----------+
...
>>> model.transform(doc).head().model
DenseVector([0.5524, -0.4995, -0.3599, 0.0241, 0.3461])
>>> word2vecPath = temp_path + "/word2vec"
>>> word2Vec.save(word2vecPath)
>>> loadedWord2Vec = Word2Vec.load(word2vecPath)
>>> loadedWord2Vec.getVectorSize() == word2Vec.getVectorSize()
True
>>> loadedWord2Vec.getNumPartitions() == word2Vec.getNumPartitions()
True
>>> loadedWord2Vec.getMinCount() == word2Vec.getMinCount()
True
>>> modelPath = temp_path + "/word2vec-model"
>>> model.save(modelPath)
>>> loadedModel = Word2VecModel.load(modelPath)
>>> loadedModel.getVectors().first().word == model.getVectors().first().word
True
>>> loadedModel.getVectors().first().vector == model.getVectors().first().vector
True
.. versionadded:: 1.4.0
"""
vectorSize = Param(Params._dummy(), "vectorSize",
"the dimension of codes after transforming from words",
typeConverter=TypeConverters.toInt)
numPartitions = Param(Params._dummy(), "numPartitions",
"number of partitions for sentences of words",
typeConverter=TypeConverters.toInt)
minCount = Param(Params._dummy(), "minCount",
"the minimum number of times a token must appear to be included in the " +
"word2vec model's vocabulary", typeConverter=TypeConverters.toInt)
windowSize = Param(Params._dummy(), "windowSize",
"the window size (context words from [-window, window]). Default value is 5",
typeConverter=TypeConverters.toInt)
maxSentenceLength = Param(Params._dummy(), "maxSentenceLength",
"Maximum length (in words) of each sentence in the input data. " +
"Any sentence longer than this threshold will " +
"be divided into chunks up to the size.",
typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, vectorSize=100, minCount=5, numPartitions=1, stepSize=0.025, maxIter=1,
seed=None, inputCol=None, outputCol=None, windowSize=5, maxSentenceLength=1000):
"""
__init__(self, vectorSize=100, minCount=5, numPartitions=1, stepSize=0.025, maxIter=1, \
seed=None, inputCol=None, outputCol=None, windowSize=5, maxSentenceLength=1000)
"""
super(Word2Vec, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.Word2Vec", self.uid)
self._setDefault(vectorSize=100, minCount=5, numPartitions=1, stepSize=0.025, maxIter=1,
windowSize=5, maxSentenceLength=1000)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, vectorSize=100, minCount=5, numPartitions=1, stepSize=0.025, maxIter=1,
seed=None, inputCol=None, outputCol=None, windowSize=5, maxSentenceLength=1000):
"""
setParams(self, minCount=5, numPartitions=1, stepSize=0.025, maxIter=1, seed=None, \
inputCol=None, outputCol=None, windowSize=5, maxSentenceLength=1000)
Sets params for this Word2Vec.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.4.0")
def setVectorSize(self, value):
"""
Sets the value of :py:attr:`vectorSize`.
"""
return self._set(vectorSize=value)
@since("1.4.0")
def getVectorSize(self):
"""
Gets the value of vectorSize or its default value.
"""
return self.getOrDefault(self.vectorSize)
@since("1.4.0")
def setNumPartitions(self, value):
"""
Sets the value of :py:attr:`numPartitions`.
"""
return self._set(numPartitions=value)
@since("1.4.0")
def getNumPartitions(self):
"""
Gets the value of numPartitions or its default value.
"""
return self.getOrDefault(self.numPartitions)
@since("1.4.0")
def setMinCount(self, value):
"""
Sets the value of :py:attr:`minCount`.
"""
return self._set(minCount=value)
@since("1.4.0")
def getMinCount(self):
"""
Gets the value of minCount or its default value.
"""
return self.getOrDefault(self.minCount)
@since("2.0.0")
def setWindowSize(self, value):
"""
Sets the value of :py:attr:`windowSize`.
"""
return self._set(windowSize=value)
@since("2.0.0")
def getWindowSize(self):
"""
Gets the value of windowSize or its default value.
"""
return self.getOrDefault(self.windowSize)
@since("2.0.0")
def setMaxSentenceLength(self, value):
"""
Sets the value of :py:attr:`maxSentenceLength`.
"""
return self._set(maxSentenceLength=value)
@since("2.0.0")
def getMaxSentenceLength(self):
"""
Gets the value of maxSentenceLength or its default value.
"""
return self.getOrDefault(self.maxSentenceLength)
def _create_model(self, java_model):
return Word2VecModel(java_model)
class Word2VecModel(JavaModel, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`Word2Vec`.
.. versionadded:: 1.4.0
"""
@since("1.5.0")
def getVectors(self):
"""
Returns the vector representation of the words as a dataframe
with two fields, word and vector.
"""
return self._call_java("getVectors")
@since("1.5.0")
def findSynonyms(self, word, num):
"""
Find "num" number of words closest in similarity to "word".
word can be a string or vector representation.
Returns a dataframe with two fields word and similarity (which
gives the cosine similarity).
"""
if not isinstance(word, basestring):
word = _convert_to_vector(word)
return self._call_java("findSynonyms", word, num)
@inherit_doc
class PCA(JavaEstimator, HasInputCol, HasOutputCol, JavaMLReadable, JavaMLWritable):
"""
PCA trains a model to project vectors to a lower dimensional space of the
top :py:attr:`k` principal components.
>>> from pyspark.ml.linalg import Vectors
>>> data = [(Vectors.sparse(5, [(1, 1.0), (3, 7.0)]),),
... (Vectors.dense([2.0, 0.0, 3.0, 4.0, 5.0]),),
... (Vectors.dense([4.0, 0.0, 0.0, 6.0, 7.0]),)]
>>> df = spark.createDataFrame(data,["features"])
>>> pca = PCA(k=2, inputCol="features", outputCol="pca_features")
>>> model = pca.fit(df)
>>> model.transform(df).collect()[0].pca_features
DenseVector([1.648..., -4.013...])
>>> model.explainedVariance
DenseVector([0.794..., 0.205...])
>>> pcaPath = temp_path + "/pca"
>>> pca.save(pcaPath)
>>> loadedPca = PCA.load(pcaPath)
>>> loadedPca.getK() == pca.getK()
True
>>> modelPath = temp_path + "/pca-model"
>>> model.save(modelPath)
>>> loadedModel = PCAModel.load(modelPath)
>>> loadedModel.pc == model.pc
True
>>> loadedModel.explainedVariance == model.explainedVariance
True
.. versionadded:: 1.5.0
"""
k = Param(Params._dummy(), "k", "the number of principal components",
typeConverter=TypeConverters.toInt)
@keyword_only
def __init__(self, k=None, inputCol=None, outputCol=None):
"""
__init__(self, k=None, inputCol=None, outputCol=None)
"""
super(PCA, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.PCA", self.uid)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.5.0")
def setParams(self, k=None, inputCol=None, outputCol=None):
"""
setParams(self, k=None, inputCol=None, outputCol=None)
Set params for this PCA.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.5.0")
def setK(self, value):
"""
Sets the value of :py:attr:`k`.
"""
return self._set(k=value)
@since("1.5.0")
def getK(self):
"""
Gets the value of k or its default value.
"""
return self.getOrDefault(self.k)
def _create_model(self, java_model):
return PCAModel(java_model)
class PCAModel(JavaModel, JavaMLReadable, JavaMLWritable):
"""
Model fitted by :py:class:`PCA`. Transforms vectors to a lower dimensional space.
.. versionadded:: 1.5.0
"""
@property
@since("2.0.0")
def pc(self):
"""
Returns a principal components Matrix.
Each column is one principal component.
"""
return self._call_java("pc")
@property
@since("2.0.0")
def explainedVariance(self):
"""
Returns a vector of proportions of variance
explained by each principal component.
"""
return self._call_java("explainedVariance")
@inherit_doc
class RFormula(JavaEstimator, HasFeaturesCol, HasLabelCol, HasHandleInvalid,
JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
Implements the transforms required for fitting a dataset against an
R model formula. Currently we support a limited subset of the R
operators, including '~', '.', ':', '+', and '-'. Also see the `R formula docs
<http://stat.ethz.ch/R-manual/R-patched/library/stats/html/formula.html>`_.
>>> df = spark.createDataFrame([
... (1.0, 1.0, "a"),
... (0.0, 2.0, "b"),
... (0.0, 0.0, "a")
... ], ["y", "x", "s"])
>>> rf = RFormula(formula="y ~ x + s")
>>> model = rf.fit(df)
>>> model.transform(df).show()
+---+---+---+---------+-----+
| y| x| s| features|label|
+---+---+---+---------+-----+
|1.0|1.0| a|[1.0,1.0]| 1.0|
|0.0|2.0| b|[2.0,0.0]| 0.0|
|0.0|0.0| a|[0.0,1.0]| 0.0|
+---+---+---+---------+-----+
...
>>> rf.fit(df, {rf.formula: "y ~ . - s"}).transform(df).show()
+---+---+---+--------+-----+
| y| x| s|features|label|
+---+---+---+--------+-----+
|1.0|1.0| a| [1.0]| 1.0|
|0.0|2.0| b| [2.0]| 0.0|
|0.0|0.0| a| [0.0]| 0.0|
+---+---+---+--------+-----+
...
>>> rFormulaPath = temp_path + "/rFormula"
>>> rf.save(rFormulaPath)
>>> loadedRF = RFormula.load(rFormulaPath)
>>> loadedRF.getFormula() == rf.getFormula()
True
>>> loadedRF.getFeaturesCol() == rf.getFeaturesCol()
True
>>> loadedRF.getLabelCol() == rf.getLabelCol()
True
>>> loadedRF.getHandleInvalid() == rf.getHandleInvalid()
True
>>> str(loadedRF)
'RFormula(y ~ x + s) (uid=...)'
>>> modelPath = temp_path + "/rFormulaModel"
>>> model.save(modelPath)
>>> loadedModel = RFormulaModel.load(modelPath)
>>> loadedModel.uid == model.uid
True
>>> loadedModel.transform(df).show()
+---+---+---+---------+-----+
| y| x| s| features|label|
+---+---+---+---------+-----+
|1.0|1.0| a|[1.0,1.0]| 1.0|
|0.0|2.0| b|[2.0,0.0]| 0.0|
|0.0|0.0| a|[0.0,1.0]| 0.0|
+---+---+---+---------+-----+
...
>>> str(loadedModel)
'RFormulaModel(ResolvedRFormula(label=y, terms=[x,s], hasIntercept=true)) (uid=...)'
.. versionadded:: 1.5.0
"""
formula = Param(Params._dummy(), "formula", "R model formula",
typeConverter=TypeConverters.toString)
forceIndexLabel = Param(Params._dummy(), "forceIndexLabel",
"Force to index label whether it is numeric or string",
typeConverter=TypeConverters.toBoolean)
stringIndexerOrderType = Param(Params._dummy(), "stringIndexerOrderType",
"How to order categories of a string feature column used by " +
"StringIndexer. The last category after ordering is dropped " +
"when encoding strings. Supported options: frequencyDesc, " +
"frequencyAsc, alphabetDesc, alphabetAsc. The default value " +
"is frequencyDesc. When the ordering is set to alphabetDesc, " +
"RFormula drops the same category as R when encoding strings.",
typeConverter=TypeConverters.toString)
handleInvalid = Param(Params._dummy(), "handleInvalid", "how to handle invalid entries. " +
"Options are 'skip' (filter out rows with invalid values), " +
"'error' (throw an error), or 'keep' (put invalid data in a special " +
"additional bucket, at index numLabels).",
typeConverter=TypeConverters.toString)
@keyword_only
def __init__(self, formula=None, featuresCol="features", labelCol="label",
forceIndexLabel=False, stringIndexerOrderType="frequencyDesc",
handleInvalid="error"):
"""
__init__(self, formula=None, featuresCol="features", labelCol="label", \
forceIndexLabel=False, stringIndexerOrderType="frequencyDesc", \
handleInvalid="error")
"""
super(RFormula, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.RFormula", self.uid)
self._setDefault(forceIndexLabel=False, stringIndexerOrderType="frequencyDesc",
handleInvalid="error")
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.5.0")
def setParams(self, formula=None, featuresCol="features", labelCol="label",
forceIndexLabel=False, stringIndexerOrderType="frequencyDesc",
handleInvalid="error"):
"""
setParams(self, formula=None, featuresCol="features", labelCol="label", \
forceIndexLabel=False, stringIndexerOrderType="frequencyDesc", \
handleInvalid="error")
Sets params for RFormula.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("1.5.0")
def setFormula(self, value):
"""
Sets the value of :py:attr:`formula`.
"""
return self._set(formula=value)
@since("1.5.0")
def getFormula(self):
"""
Gets the value of :py:attr:`formula`.
"""
return self.getOrDefault(self.formula)
@since("2.1.0")
def setForceIndexLabel(self, value):
"""
Sets the value of :py:attr:`forceIndexLabel`.
"""
return self._set(forceIndexLabel=value)
@since("2.1.0")
def getForceIndexLabel(self):
"""
Gets the value of :py:attr:`forceIndexLabel`.
"""
return self.getOrDefault(self.forceIndexLabel)
@since("2.3.0")
def setStringIndexerOrderType(self, value):
"""
Sets the value of :py:attr:`stringIndexerOrderType`.
"""
return self._set(stringIndexerOrderType=value)
@since("2.3.0")
def getStringIndexerOrderType(self):
"""
Gets the value of :py:attr:`stringIndexerOrderType` or its default value 'frequencyDesc'.
"""
return self.getOrDefault(self.stringIndexerOrderType)
def _create_model(self, java_model):
return RFormulaModel(java_model)
def __str__(self):
formulaStr = self.getFormula() if self.isDefined(self.formula) else ""
return "RFormula(%s) (uid=%s)" % (formulaStr, self.uid)
class RFormulaModel(JavaModel, JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
Model fitted by :py:class:`RFormula`. Fitting is required to determine the
factor levels of formula terms.
.. versionadded:: 1.5.0
"""
def __str__(self):
resolvedFormula = self._call_java("resolvedFormula")
return "RFormulaModel(%s) (uid=%s)" % (resolvedFormula, self.uid)
@inherit_doc
class ChiSqSelector(JavaEstimator, HasFeaturesCol, HasOutputCol, HasLabelCol, JavaMLReadable,
JavaMLWritable):
"""
.. note:: Experimental
Chi-Squared feature selection, which selects categorical features to use for predicting a
categorical label.
The selector supports different selection methods: `numTopFeatures`, `percentile`, `fpr`,
`fdr`, `fwe`.
* `numTopFeatures` chooses a fixed number of top features according to a chi-squared test.
* `percentile` is similar but chooses a fraction of all features
instead of a fixed number.
* `fpr` chooses all features whose p-values are below a threshold,
thus controlling the false positive rate of selection.
* `fdr` uses the `Benjamini-Hochberg procedure <https://en.wikipedia.org/wiki/
False_discovery_rate#Benjamini.E2.80.93Hochberg_procedure>`_
to choose all features whose false discovery rate is below a threshold.
* `fwe` chooses all features whose p-values are below a threshold. The threshold is scaled by
1/numFeatures, thus controlling the family-wise error rate of selection.
By default, the selection method is `numTopFeatures`, with the default number of top features
set to 50.
>>> from pyspark.ml.linalg import Vectors
>>> df = spark.createDataFrame(
... [(Vectors.dense([0.0, 0.0, 18.0, 1.0]), 1.0),
... (Vectors.dense([0.0, 1.0, 12.0, 0.0]), 0.0),
... (Vectors.dense([1.0, 0.0, 15.0, 0.1]), 0.0)],
... ["features", "label"])
>>> selector = ChiSqSelector(numTopFeatures=1, outputCol="selectedFeatures")
>>> model = selector.fit(df)
>>> model.transform(df).head().selectedFeatures
DenseVector([18.0])
>>> model.selectedFeatures
[2]
>>> chiSqSelectorPath = temp_path + "/chi-sq-selector"
>>> selector.save(chiSqSelectorPath)
>>> loadedSelector = ChiSqSelector.load(chiSqSelectorPath)
>>> loadedSelector.getNumTopFeatures() == selector.getNumTopFeatures()
True
>>> modelPath = temp_path + "/chi-sq-selector-model"
>>> model.save(modelPath)
>>> loadedModel = ChiSqSelectorModel.load(modelPath)
>>> loadedModel.selectedFeatures == model.selectedFeatures
True
.. versionadded:: 2.0.0
"""
selectorType = Param(Params._dummy(), "selectorType",
"The selector type of the ChisqSelector. " +
"Supported options: numTopFeatures (default), percentile and fpr.",
typeConverter=TypeConverters.toString)
numTopFeatures = \
Param(Params._dummy(), "numTopFeatures",
"Number of features that selector will select, ordered by ascending p-value. " +
"If the number of features is < numTopFeatures, then this will select " +
"all features.", typeConverter=TypeConverters.toInt)
percentile = Param(Params._dummy(), "percentile", "Percentile of features that selector " +
"will select, ordered by ascending p-value.",
typeConverter=TypeConverters.toFloat)
fpr = Param(Params._dummy(), "fpr", "The highest p-value for features to be kept.",
typeConverter=TypeConverters.toFloat)
fdr = Param(Params._dummy(), "fdr", "The upper bound of the expected false discovery rate.",
typeConverter=TypeConverters.toFloat)
fwe = Param(Params._dummy(), "fwe", "The upper bound of the expected family-wise error rate.",
typeConverter=TypeConverters.toFloat)
@keyword_only
def __init__(self, numTopFeatures=50, featuresCol="features", outputCol=None,
labelCol="label", selectorType="numTopFeatures", percentile=0.1, fpr=0.05,
fdr=0.05, fwe=0.05):
"""
__init__(self, numTopFeatures=50, featuresCol="features", outputCol=None, \
labelCol="label", selectorType="numTopFeatures", percentile=0.1, fpr=0.05, \
fdr=0.05, fwe=0.05)
"""
super(ChiSqSelector, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.feature.ChiSqSelector", self.uid)
self._setDefault(numTopFeatures=50, selectorType="numTopFeatures", percentile=0.1,
fpr=0.05, fdr=0.05, fwe=0.05)
kwargs = self._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("2.0.0")
def setParams(self, numTopFeatures=50, featuresCol="features", outputCol=None,
labelCol="labels", selectorType="numTopFeatures", percentile=0.1, fpr=0.05,
fdr=0.05, fwe=0.05):
"""
setParams(self, numTopFeatures=50, featuresCol="features", outputCol=None, \
labelCol="labels", selectorType="numTopFeatures", percentile=0.1, fpr=0.05, \
fdr=0.05, fwe=0.05)
Sets params for this ChiSqSelector.
"""
kwargs = self._input_kwargs
return self._set(**kwargs)
@since("2.1.0")
def setSelectorType(self, value):
"""
Sets the value of :py:attr:`selectorType`.
"""
return self._set(selectorType=value)
@since("2.1.0")
def getSelectorType(self):
"""
Gets the value of selectorType or its default value.
"""
return self.getOrDefault(self.selectorType)
@since("2.0.0")
def setNumTopFeatures(self, value):
"""
Sets the value of :py:attr:`numTopFeatures`.
Only applicable when selectorType = "numTopFeatures".
"""
return self._set(numTopFeatures=value)
@since("2.0.0")
def getNumTopFeatures(self):
"""
Gets the value of numTopFeatures or its default value.
"""
return self.getOrDefault(self.numTopFeatures)
@since("2.1.0")
def setPercentile(self, value):
"""
Sets the value of :py:attr:`percentile`.
Only applicable when selectorType = "percentile".
"""
return self._set(percentile=value)
@since("2.1.0")
def getPercentile(self):
"""
Gets the value of percentile or its default value.
"""
return self.getOrDefault(self.percentile)
@since("2.1.0")
def setFpr(self, value):
"""
Sets the value of :py:attr:`fpr`.
Only applicable when selectorType = "fpr".
"""
return self._set(fpr=value)
@since("2.1.0")
def getFpr(self):
"""
Gets the value of fpr or its default value.
"""
return self.getOrDefault(self.fpr)
@since("2.2.0")
def setFdr(self, value):
"""
Sets the value of :py:attr:`fdr`.
Only applicable when selectorType = "fdr".
"""
return self._set(fdr=value)
@since("2.2.0")
def getFdr(self):
"""
Gets the value of fdr or its default value.
"""
return self.getOrDefault(self.fdr)
@since("2.2.0")
def setFwe(self, value):
"""
Sets the value of :py:attr:`fwe`.
Only applicable when selectorType = "fwe".
"""
return self._set(fwe=value)
@since("2.2.0")
def getFwe(self):
"""
Gets the value of fwe or its default value.
"""
return self.getOrDefault(self.fwe)
def _create_model(self, java_model):
return ChiSqSelectorModel(java_model)
class ChiSqSelectorModel(JavaModel, JavaMLReadable, JavaMLWritable):
"""
.. note:: Experimental
Model fitted by :py:class:`ChiSqSelector`.
.. versionadded:: 2.0.0
"""
@property
@since("2.0.0")
def selectedFeatures(self):
"""
List of indices to select (filter).
"""
return self._call_java("selectedFeatures")
if __name__ == "__main__":
import doctest
import tempfile
import pyspark.ml.feature
from pyspark.sql import Row, SparkSession
globs = globals().copy()
features = pyspark.ml.feature.__dict__.copy()
globs.update(features)
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.feature tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
testData = sc.parallelize([Row(id=0, label="a"), Row(id=1, label="b"),
Row(id=2, label="c"), Row(id=3, label="a"),
Row(id=4, label="a"), Row(id=5, label="c")], 2)
globs['stringIndDf'] = spark.createDataFrame(testData)
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
exit(-1)
| apache-2.0 |
arabenjamin/scikit-learn | sklearn/decomposition/tests/test_kernel_pca.py | 154 | 8058 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import (assert_array_almost_equal, assert_less,
assert_equal, assert_not_equal,
assert_raises)
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.metrics.pairwise import rbf_kernel
def test_kernel_pca():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
def histogram(x, y, **kwargs):
# Histogram kernel implemented as a callable.
assert_equal(kwargs, {}) # no kernel_params that we didn't ask for
return np.minimum(x, y).sum()
for eigen_solver in ("auto", "dense", "arpack"):
for kernel in ("linear", "rbf", "poly", histogram):
# histogram kernel produces singular matrix inside linalg.solve
# XXX use a least-squares approximation?
inv = not callable(kernel)
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=inv)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# non-regression test: previously, gamma would be 0 by default,
# forcing all eigenvalues to 0 under the poly kernel
assert_not_equal(X_fit_transformed, [])
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
if inv:
X_pred2 = kpca.inverse_transform(X_pred_transformed)
assert_equal(X_pred2.shape, X_pred.shape)
def test_invalid_parameters():
assert_raises(ValueError, KernelPCA, 10, fit_inverse_transform=True,
kernel='precomputed')
def test_kernel_pca_sparse():
rng = np.random.RandomState(0)
X_fit = sp.csr_matrix(rng.random_sample((5, 4)))
X_pred = sp.csr_matrix(rng.random_sample((2, 4)))
for eigen_solver in ("auto", "arpack"):
for kernel in ("linear", "rbf", "poly"):
# transform fit data
kpca = KernelPCA(4, kernel=kernel, eigen_solver=eigen_solver,
fit_inverse_transform=False)
X_fit_transformed = kpca.fit_transform(X_fit)
X_fit_transformed2 = kpca.fit(X_fit).transform(X_fit)
assert_array_almost_equal(np.abs(X_fit_transformed),
np.abs(X_fit_transformed2))
# transform new data
X_pred_transformed = kpca.transform(X_pred)
assert_equal(X_pred_transformed.shape[1],
X_fit_transformed.shape[1])
# inverse transform
# X_pred2 = kpca.inverse_transform(X_pred_transformed)
# assert_equal(X_pred2.shape, X_pred.shape)
def test_kernel_pca_linear_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
# for a linear kernel, kernel PCA should find the same projection as PCA
# modulo the sign (direction)
# fit only the first four components: fifth is near zero eigenvalue, so
# can be trimmed due to roundoff error
assert_array_almost_equal(
np.abs(KernelPCA(4).fit(X_fit).transform(X_pred)),
np.abs(PCA(4).fit(X_fit).transform(X_pred)))
def test_kernel_pca_n_components():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
for c in [1, 2, 4]:
kpca = KernelPCA(n_components=c, eigen_solver=eigen_solver)
shape = kpca.fit(X_fit).transform(X_pred).shape
assert_equal(shape, (2, c))
def test_remove_zero_eig():
X = np.array([[1 - 1e-30, 1], [1, 1], [1, 1 - 1e-20]])
# n_components=None (default) => remove_zero_eig is True
kpca = KernelPCA()
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
kpca = KernelPCA(n_components=2)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 2))
kpca = KernelPCA(n_components=2, remove_zero_eig=True)
Xt = kpca.fit_transform(X)
assert_equal(Xt.shape, (3, 0))
def test_kernel_pca_precomputed():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
X_pred = rng.random_sample((2, 4))
for eigen_solver in ("dense", "arpack"):
X_kpca = KernelPCA(4, eigen_solver=eigen_solver).\
fit(X_fit).transform(X_pred)
X_kpca2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_pred, X_fit.T))
X_kpca_train = KernelPCA(
4, eigen_solver=eigen_solver,
kernel='precomputed').fit_transform(np.dot(X_fit, X_fit.T))
X_kpca_train2 = KernelPCA(
4, eigen_solver=eigen_solver, kernel='precomputed').fit(
np.dot(X_fit, X_fit.T)).transform(np.dot(X_fit, X_fit.T))
assert_array_almost_equal(np.abs(X_kpca),
np.abs(X_kpca2))
assert_array_almost_equal(np.abs(X_kpca_train),
np.abs(X_kpca_train2))
def test_kernel_pca_invalid_kernel():
rng = np.random.RandomState(0)
X_fit = rng.random_sample((2, 4))
kpca = KernelPCA(kernel="tototiti")
assert_raises(ValueError, kpca.fit, X_fit)
def test_gridsearch_pipeline():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="rbf", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(kernel_pca__gamma=2. ** np.arange(-2, 2))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
grid_search.fit(X, y)
assert_equal(grid_search.best_score_, 1)
def test_gridsearch_pipeline_precomputed():
# Test if we can do a grid-search to find parameters to separate
# circles with a perceptron model using a precomputed kernel.
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
kpca = KernelPCA(kernel="precomputed", n_components=2)
pipeline = Pipeline([("kernel_pca", kpca), ("Perceptron", Perceptron())])
param_grid = dict(Perceptron__n_iter=np.arange(1, 5))
grid_search = GridSearchCV(pipeline, cv=3, param_grid=param_grid)
X_kernel = rbf_kernel(X, gamma=2.)
grid_search.fit(X_kernel, y)
assert_equal(grid_search.best_score_, 1)
def test_nested_circles():
# Test the linear separability of the first 2D KPCA transform
X, y = make_circles(n_samples=400, factor=.3, noise=.05,
random_state=0)
# 2D nested circles are not linearly separable
train_score = Perceptron().fit(X, y).score(X, y)
assert_less(train_score, 0.8)
# Project the circles data into the first 2 components of a RBF Kernel
# PCA model.
# Note that the gamma value is data dependent. If this test breaks
# and the gamma value has to be updated, the Kernel PCA example will
# have to be updated too.
kpca = KernelPCA(kernel="rbf", n_components=2,
fit_inverse_transform=True, gamma=2.)
X_kpca = kpca.fit_transform(X)
# The data is perfectly linearly separable in that space
train_score = Perceptron().fit(X_kpca, y).score(X_kpca, y)
assert_equal(train_score, 1.0)
| bsd-3-clause |
YihaoLu/statsmodels | statsmodels/formula/tests/test_formula.py | 29 | 4647 | from statsmodels.compat.python import iteritems, StringIO
import warnings
from statsmodels.formula.api import ols
from statsmodels.formula.formulatools import make_hypotheses_matrices
from statsmodels.tools import add_constant
from statsmodels.datasets.longley import load, load_pandas
import numpy.testing as npt
from statsmodels.tools.testing import assert_equal
from numpy.testing.utils import WarningManager
longley_formula = 'TOTEMP ~ GNPDEFL + GNP + UNEMP + ARMED + POP + YEAR'
class CheckFormulaOLS(object):
@classmethod
def setupClass(cls):
cls.data = load()
def test_endog_names(self):
assert self.model.endog_names == 'TOTEMP'
def test_exog_names(self):
assert self.model.exog_names == ['Intercept', 'GNPDEFL', 'GNP',
'UNEMP', 'ARMED', 'POP', 'YEAR']
def test_design(self):
npt.assert_equal(self.model.exog,
add_constant(self.data.exog, prepend=True))
def test_endog(self):
npt.assert_equal(self.model.endog, self.data.endog)
def test_summary(self):
# smoke test
warn_ctx = WarningManager()
warn_ctx.__enter__()
try:
warnings.filterwarnings("ignore",
"kurtosistest only valid for n>=20")
self.model.fit().summary()
finally:
warn_ctx.__exit__()
class TestFormulaPandas(CheckFormulaOLS):
@classmethod
def setupClass(cls):
data = load_pandas().data
cls.model = ols(longley_formula, data)
super(TestFormulaPandas, cls).setupClass()
class TestFormulaDict(CheckFormulaOLS):
@classmethod
def setupClass(cls):
data = dict((k, v.tolist()) for k, v in iteritems(load_pandas().data))
cls.model = ols(longley_formula, data)
super(TestFormulaDict, cls).setupClass()
class TestFormulaRecArray(CheckFormulaOLS):
@classmethod
def setupClass(cls):
data = load().data
cls.model = ols(longley_formula, data)
super(TestFormulaRecArray, cls).setupClass()
def test_tests():
formula = 'TOTEMP ~ GNPDEFL + GNP + UNEMP + ARMED + POP + YEAR'
dta = load_pandas().data
results = ols(formula, dta).fit()
test_formula = '(GNPDEFL = GNP), (UNEMP = 2), (YEAR/1829 = 1)'
LC = make_hypotheses_matrices(results, test_formula)
R = LC.coefs
Q = LC.constants
npt.assert_almost_equal(R, [[0, 1, -1, 0, 0, 0, 0],
[0, 0 , 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1./1829]], 8)
npt.assert_array_equal(Q, [[0],[2],[1]])
def test_formula_labels():
# make sure labels pass through patsy as expected
# data(Duncan) from car in R
dta = StringIO(""""type" "income" "education" "prestige"\n"accountant" "prof" 62 86 82\n"pilot" "prof" 72 76 83\n"architect" "prof" 75 92 90\n"author" "prof" 55 90 76\n"chemist" "prof" 64 86 90\n"minister" "prof" 21 84 87\n"professor" "prof" 64 93 93\n"dentist" "prof" 80 100 90\n"reporter" "wc" 67 87 52\n"engineer" "prof" 72 86 88\n"undertaker" "prof" 42 74 57\n"lawyer" "prof" 76 98 89\n"physician" "prof" 76 97 97\n"welfare.worker" "prof" 41 84 59\n"teacher" "prof" 48 91 73\n"conductor" "wc" 76 34 38\n"contractor" "prof" 53 45 76\n"factory.owner" "prof" 60 56 81\n"store.manager" "prof" 42 44 45\n"banker" "prof" 78 82 92\n"bookkeeper" "wc" 29 72 39\n"mail.carrier" "wc" 48 55 34\n"insurance.agent" "wc" 55 71 41\n"store.clerk" "wc" 29 50 16\n"carpenter" "bc" 21 23 33\n"electrician" "bc" 47 39 53\n"RR.engineer" "bc" 81 28 67\n"machinist" "bc" 36 32 57\n"auto.repairman" "bc" 22 22 26\n"plumber" "bc" 44 25 29\n"gas.stn.attendant" "bc" 15 29 10\n"coal.miner" "bc" 7 7 15\n"streetcar.motorman" "bc" 42 26 19\n"taxi.driver" "bc" 9 19 10\n"truck.driver" "bc" 21 15 13\n"machine.operator" "bc" 21 20 24\n"barber" "bc" 16 26 20\n"bartender" "bc" 16 28 7\n"shoe.shiner" "bc" 9 17 3\n"cook" "bc" 14 22 16\n"soda.clerk" "bc" 12 30 6\n"watchman" "bc" 17 25 11\n"janitor" "bc" 7 20 8\n"policeman" "bc" 34 47 41\n"waiter" "bc" 8 32 10""")
from pandas import read_table
dta = read_table(dta, sep=" ")
model = ols("prestige ~ income + education", dta).fit()
assert_equal(model.fittedvalues.index, dta.index)
def test_formula_predict():
from numpy import log
formula = """TOTEMP ~ log(GNPDEFL) + log(GNP) + UNEMP + ARMED +
POP + YEAR"""
data = load_pandas()
dta = load_pandas().data
results = ols(formula, dta).fit()
npt.assert_almost_equal(results.fittedvalues.values,
results.predict(data.exog), 8)
| bsd-3-clause |
JuliaSprenger/python-neo | neo/io/neuralynxio.py | 5 | 2251 | """
Class for reading data from Neuralynx files.
This IO supports NCS, NEV and NSE file formats.
Depends on: numpy
Supported: Read
Author: Julia Sprenger, Carlos Canova
"""
from neo.io.basefromrawio import BaseFromRaw
from neo.rawio.neuralynxrawio.neuralynxrawio import NeuralynxRawIO
class NeuralynxIO(NeuralynxRawIO, BaseFromRaw):
"""
Class for reading data from Neuralynx files.
This IO supports NCS, NEV, NSE and NTT file formats.
NCS contains signals for one channel
NEV contains events
NSE contains spikes and waveforms for mono electrodes
NTT contains spikes and waveforms for tetrodes
"""
_prefered_signal_group_mode = 'group-by-same-units'
mode = 'dir'
def __init__(self, dirname='', filename='', use_cache=False, cache_path='same_as_resource',
exclude_filename=None, keep_original_times=False):
"""
Initialise IO instance
Parameters
----------
dirname : str
Directory containing data files
filename : str
Name of a single ncs, nse, nev, or ntt file to include in dataset. Will be ignored,
if dirname is provided.
use_cache : bool, optional
Cache results of initial file scans for faster loading in subsequent runs.
Default: False
cache_path : str, optional
Folder path to use for cache files.
Default: 'same_as_resource'
exclude_filename: str or list
Filename or list of filenames to be excluded. Expects base filenames without
directory path.
keep_original_times : bool
Preserve original time stamps as in data files. By default datasets are
shifted to begin at t_start = 0*pq.second.
Default: False
"""
NeuralynxRawIO.__init__(self, dirname=dirname, filename=filename, use_cache=use_cache,
cache_path=cache_path, exclude_filename=exclude_filename,
keep_original_times=keep_original_times)
if self.rawmode == 'one-file':
BaseFromRaw.__init__(self, filename)
elif self.rawmode == 'one-dir':
BaseFromRaw.__init__(self, dirname)
| bsd-3-clause |
rdevon/cortex | cortex/built_ins/networks/tv_models_wrapper.py | 1 | 1172 | from torch import nn
from torchvision import models
from .utils import finish_layer_1d, get_nonlinearity
class AlexNet(models.AlexNet):
def __init__(self, shape, dim_out=None, fully_connected_layers=None,
nonlinearity='ReLU', n_steps=None,
**layer_args):
super(AlexNet, self).__init__()
fully_connected_layers = fully_connected_layers or []
self.fc = nn.Sequential()
dim_out_ = (256 * ((shape[0] + 4 - 10) // 32) *
((shape[1] + 4 - 10) // 32))
nonlinearity = get_nonlinearity(nonlinearity)
for dim_h in fully_connected_layers:
dim_in = dim_out_
dim_out_ = dim_h
name = 'linear_%s_%s' % (dim_in, dim_out_)
self.fc.add_module(name, nn.Linear(dim_in, dim_out_))
finish_layer_1d(self.fc, name, dim_out_,
nonlinearity=nonlinearity, **layer_args)
if dim_out:
name = 'dim_out'
self.fc.add_module(name, nn.Linear(dim_out_, dim_out))
def forward(self, x):
x = self.features(x)
x = x.view(x.size()[0], -1)
return self.fc(x)
| bsd-3-clause |
DonBeo/scikit-learn | sklearn/tests/test_common.py | 1 | 15820 | """
General tests for all estimators in sklearn.
"""
# Authors: Andreas Mueller <[email protected]>
# Gael Varoquaux [email protected]
# License: BSD 3 clause
from __future__ import print_function
import os
import warnings
import sys
import pkgutil
from sklearn.externals.six import PY3
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_false, clean_warning_registry
from sklearn.utils.testing import all_estimators
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
import sklearn
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_classification
from sklearn.cluster.bicluster import BiclusterMixin
from sklearn.cross_validation import train_test_split
from sklearn.linear_model.base import LinearClassifierMixin
from sklearn.utils.estimator_checks import (
check_dtype_object,
check_parameters_default_constructible,
check_estimator_sparse_data,
check_estimators_dtypes,
check_transformer,
check_clustering,
check_clusterer_compute_labels_predict,
check_regressors_int,
check_regressors_train,
check_regressors_pickle,
check_transformer_pickle,
check_transformers_unfitted,
check_estimators_empty_data_messages,
check_estimators_nan_inf,
check_estimators_unfitted,
check_classifiers_one_label,
check_classifiers_train,
check_classifiers_classes,
check_classifiers_input_shapes,
check_classifiers_pickle,
check_class_weight_classifiers,
check_class_weight_auto_classifiers,
check_class_weight_auto_linear_classifier,
check_estimators_overwrite_params,
check_estimators_partial_fit_n_features,
check_sparsify_coefficients,
check_classifier_data_not_an_array,
check_regressor_data_not_an_array,
check_transformer_data_not_an_array,
check_transformer_n_iter,
check_fit_score_takes_y,
check_non_transformer_estimators_n_iter,
check_pipeline_consistency,
CROSS_DECOMPOSITION)
def test_all_estimator_no_base_class():
# test that all_estimators doesn't find abstract classes.
for name, Estimator in all_estimators():
msg = ("Base estimators such as {0} should not be included"
" in all_estimators").format(name)
assert_false(name.lower().startswith('base'), msg=msg)
def test_all_estimators():
# Test that estimators are default-constructible, clonable
# and have working repr.
estimators = all_estimators(include_meta_estimators=True)
# Meta sanity-check to make sure that the estimator introspection runs
# properly
assert_greater(len(estimators), 0)
for name, Estimator in estimators:
# some can just not be sensibly default constructed
yield check_parameters_default_constructible, name, Estimator
def test_non_meta_estimators():
# input validation etc for non-meta estimators
estimators = all_estimators()
for name, Estimator in estimators:
if issubclass(Estimator, BiclusterMixin):
continue
if name.endswith("HMM") or name.startswith("_"):
continue
if name not in CROSS_DECOMPOSITION:
yield check_estimators_dtypes, name, Estimator
yield check_fit_score_takes_y, name, Estimator
yield check_dtype_object, name, Estimator
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages, name, Estimator
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
yield check_pipeline_consistency, name, Estimator
if name not in CROSS_DECOMPOSITION + ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf, name, Estimator
if name not in CROSS_DECOMPOSITION + ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params, name, Estimator
if hasattr(Estimator, 'sparsify'):
yield check_sparsify_coefficients, name, Estimator
yield check_estimator_sparse_data, name, Estimator
def test_transformers():
# test if transformers do something sensible on training set
# also test all shapes / shape errors
transformers = all_estimators(type_filter='transformer')
for name, Transformer in transformers:
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
yield check_transformer_pickle, name, Transformer
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array, name, Transformer
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer']:
# basic tests
yield check_transformer, name, Transformer
yield check_transformers_unfitted, name, Transformer
def test_clustering():
# test if clustering algorithms do something sensible
# also test all shapes / shape errors
clustering = all_estimators(type_filter='cluster')
for name, Alg in clustering:
# test whether any classifier overwrites his init parameters during fit
yield check_clusterer_compute_labels_predict, name, Alg
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering, name, Alg
yield check_estimators_partial_fit_n_features, name, Alg
def test_classifiers():
# test if classifiers can cope with non-consecutive classes
classifiers = all_estimators(type_filter='classifier')
for name, Classifier in classifiers:
# test classfiers can handle non-array data
yield check_classifier_data_not_an_array, name, Classifier
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label, name, Classifier
yield check_classifiers_classes, name, Classifier
yield check_classifiers_pickle, name, Classifier
yield check_estimators_partial_fit_n_features, name, Classifier
# basic consistency testing
yield check_classifiers_train, name, Classifier
if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]
# TODO some complication with -1 label
and name not in ["DecisionTreeClassifier",
"ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
# test if classifiers can cope with y.shape = (n_samples, 1)
yield check_classifiers_input_shapes, name, Classifier
# test if NotFittedError is raised
yield check_estimators_unfitted, name, Classifier
def test_regressors():
regressors = all_estimators(type_filter='regressor')
# TODO: test with intercept
# TODO: test with multiple responses
for name, Regressor in regressors:
# basic testing
yield check_regressors_train, name, Regressor
yield check_regressor_data_not_an_array, name, Regressor
yield check_estimators_partial_fit_n_features, name, Regressor
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_regressors_pickle, name, Regressor
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int, name, Regressor
# Test if NotFittedError is raised
yield check_estimators_unfitted, name, Regressor
def test_configure():
# Smoke test the 'configure' step of setup, this tests all the
# 'configure' functions in the setup.pys in the scikit
cwd = os.getcwd()
setup_path = os.path.abspath(os.path.join(sklearn.__path__[0], '..'))
setup_filename = os.path.join(setup_path, 'setup.py')
if not os.path.exists(setup_filename):
return
try:
os.chdir(setup_path)
old_argv = sys.argv
sys.argv = ['setup.py', 'config']
clean_warning_registry()
with warnings.catch_warnings():
# The configuration spits out warnings when not finding
# Blas/Atlas development headers
warnings.simplefilter('ignore', UserWarning)
if PY3:
with open('setup.py') as f:
exec(f.read(), dict(__name__='__main__'))
else:
execfile('setup.py', dict(__name__='__main__'))
finally:
sys.argv = old_argv
os.chdir(cwd)
def test_class_weight_classifiers():
# test that class_weight works and that the semantics are consistent
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
classifiers = [c for c in classifiers
if 'class_weight' in c[1]().get_params().keys()]
for name, Classifier in classifiers:
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
continue
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
continue
yield check_class_weight_classifiers, name, Classifier
def test_class_weight_auto_classifiers():
# Test that class_weight="auto" improves f1-score
# This test is broken; its success depends on:
# * a rare fortuitous RNG seed for make_classification; and
# * the use of binary F1 over a seemingly arbitrary positive class for two
# datasets, and weighted average F1 for the third.
# Its expectations need to be clarified and reimplemented.
raise SkipTest('This test requires redefinition')
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
classifiers = [c for c in classifiers
if 'class_weight' in c[1]().get_params().keys()]
for n_classes, weights in zip([2, 3], [[.8, .2], [.8, .1, .1]]):
# create unbalanced dataset
X, y = make_classification(n_classes=n_classes, n_samples=200,
n_features=10, weights=weights,
random_state=0, n_informative=n_classes)
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
for name, Classifier in classifiers:
if (name != "NuSVC"
# the sparse version has a parameter that doesn't do anything
and not name.startswith("RidgeClassifier")
# RidgeClassifier behaves unexpected
# FIXME!
and not name.endswith("NB")):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
yield (check_class_weight_auto_classifiers, name, Classifier,
X_train, y_train, X_test, y_test, weights)
def test_class_weight_auto_linear_classifiers():
classifiers = all_estimators(type_filter='classifier')
clean_warning_registry()
with warnings.catch_warnings(record=True):
linear_classifiers = [
(name, clazz)
for name, clazz in classifiers
if 'class_weight' in clazz().get_params().keys()
and issubclass(clazz, LinearClassifierMixin)]
for name, Classifier in linear_classifiers:
if name == "LogisticRegressionCV":
# Contrary to RidgeClassifierCV, LogisticRegressionCV use actual
# CV folds and fit a model for each CV iteration before averaging
# the coef. Therefore it is expected to not behave exactly as the
# other linear model.
continue
yield check_class_weight_auto_linear_classifier, name, Classifier
@ignore_warnings
def test_import_all_consistency():
# Smoke test to check that any name in a __all__ list is actually defined
# in the namespace of the module or package.
pkgs = pkgutil.walk_packages(path=sklearn.__path__, prefix='sklearn.',
onerror=lambda _: None)
submods = [modname for _, modname, _ in pkgs]
for modname in submods + ['sklearn']:
if ".tests." in modname:
continue
package = __import__(modname, fromlist="dummy")
for name in getattr(package, '__all__', ()):
if getattr(package, name, None) is None:
raise AttributeError(
"Module '{0}' has no attribute '{1}'".format(
modname, name))
def test_root_import_all_completeness():
EXCEPTIONS = ('utils', 'tests', 'base', 'setup')
for _, modname, _ in pkgutil.walk_packages(path=sklearn.__path__,
onerror=lambda _: None):
if '.' in modname or modname.startswith('_') or modname in EXCEPTIONS:
continue
assert_in(modname, sklearn.__all__)
def test_non_transformer_estimators_n_iter():
# Test that all estimators of type which are non-transformer
# and which have an attribute of max_iter, return the attribute
# of n_iter atleast 1.
for est_type in ['regressor', 'classifier', 'cluster']:
regressors = all_estimators(type_filter=est_type)
for name, Estimator in regressors:
# LassoLars stops early for the default alpha=1.0 for
# the iris dataset.
if name == 'LassoLars':
estimator = Estimator(alpha=0.)
else:
estimator = Estimator()
if hasattr(estimator, "max_iter"):
# These models are dependent on external solvers like
# libsvm and accessing the iter parameter is non-trivial.
if name in (['Ridge', 'SVR', 'NuSVR', 'NuSVC',
'RidgeClassifier', 'SVC', 'RandomizedLasso',
'LogisticRegressionCV']):
continue
# Tested in test_transformer_n_iter below
elif (name in CROSS_DECOMPOSITION or
name in ['LinearSVC', 'LogisticRegression']):
continue
else:
# Multitask models related to ENet cannot handle
# if y is mono-output.
yield (check_non_transformer_estimators_n_iter,
name, estimator, 'Multi' in name)
def test_transformer_n_iter():
transformers = all_estimators(type_filter='transformer')
for name, Estimator in transformers:
estimator = Estimator()
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if hasattr(estimator, "max_iter") and name not in external_solver:
yield check_transformer_n_iter, name, estimator
| bsd-3-clause |
anntzer/scikit-learn | sklearn/feature_extraction/tests/test_text.py | 5 | 54271 | from collections.abc import Mapping
import re
import pytest
import warnings
from scipy import sparse
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from sklearn.utils import IS_PYPY
from sklearn.utils._testing import (
assert_almost_equal,
fails_if_pypy,
assert_allclose_dense_sparse,
skip_if_32bit,
)
from collections import defaultdict
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace("é", "e")
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ["the_ultimate_feature"]
def test_strip_accents():
# check some classical latin accentuated symbols
a = "àáâãäåçèéêë"
expected = "aaaaaaceeee"
assert strip_accents_unicode(a) == expected
a = "ìíîïñòóôõöùúûüý"
expected = "iiiinooooouuuuy"
assert strip_accents_unicode(a) == expected
# check some arabic
a = "\u0625" # alef with a hamza below: إ
expected = "\u0627" # simple alef: ا
assert strip_accents_unicode(a) == expected
# mix letters accentuated and not
a = "this is à test"
expected = "this is a test"
assert strip_accents_unicode(a) == expected
# strings that are already decomposed
a = "o\u0308" # o with diaeresis
expected = "o"
assert strip_accents_unicode(a) == expected
# combining marks by themselves
a = "\u0300\u0301\u0302\u0303"
expected = ""
assert strip_accents_unicode(a) == expected
# Multiple combining marks on one character
a = "o\u0308\u0304"
expected = "o"
assert strip_accents_unicode(a) == expected
def test_to_ascii():
# check some classical latin accentuated symbols
a = "àáâãäåçèéêë"
expected = "aaaaaaceeee"
assert strip_accents_ascii(a) == expected
a = "ìíîïñòóôõöùúûüý"
expected = "iiiinooooouuuuy"
assert strip_accents_ascii(a) == expected
# check some arabic
a = "\u0625" # halef with a hamza below
expected = "" # halef has no direct ascii match
assert strip_accents_ascii(a) == expected
# mix letters accentuated and not
a = "this is à test"
expected = "this is a test"
assert strip_accents_ascii(a) == expected
@pytest.mark.parametrize("Vectorizer", (CountVectorizer, HashingVectorizer))
def test_word_analyzer_unigrams(Vectorizer):
wa = Vectorizer(strip_accents="ascii").build_analyzer()
text = "J'ai mangé du kangourou ce midi, c'était pas très bon."
expected = [
"ai",
"mange",
"du",
"kangourou",
"ce",
"midi",
"etait",
"pas",
"tres",
"bon",
]
assert wa(text) == expected
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ["this", "is", "test", "really", "met", "harry", "yesterday"]
assert wa(text) == expected
wa = Vectorizer(input="file").build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ["this", "is", "test", "with", "file", "like", "object"]
assert wa(text) == expected
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = "J'ai mangé du kangourou ce midi, c'était pas très bon."
expected = [
"AI",
"MANGE",
"DU",
"KANGOUROU",
"CE",
"MIDI",
"ETAIT",
"PAS",
"TRES",
"BON",
]
assert wa(text) == expected
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize, strip_accents="ascii").build_analyzer()
text = "J'ai mangé du kangourou ce midi, c'était pas très bon."
expected = [
"j'ai",
"mange",
"du",
"kangourou",
"ce",
"midi,",
"c'etait",
"pas",
"tres",
"bon.",
]
assert wa(text) == expected
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(
analyzer="word", strip_accents="unicode", ngram_range=(1, 2)
).build_analyzer()
text = "J'ai mangé du kangourou ce midi, c'était pas très bon."
expected = [
"ai",
"mange",
"du",
"kangourou",
"ce",
"midi",
"etait",
"pas",
"tres",
"bon",
"ai mange",
"mange du",
"du kangourou",
"kangourou ce",
"ce midi",
"midi etait",
"etait pas",
"pas tres",
"tres bon",
]
assert wa(text) == expected
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mangé du kangourou ce midi, c'était pas très bon."
text_bytes = text.encode("utf-8")
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding="ascii").build_analyzer()
with pytest.raises(UnicodeDecodeError):
wa(text_bytes)
ca = CountVectorizer(
analyzer="char", ngram_range=(3, 6), encoding="ascii"
).build_analyzer()
with pytest.raises(UnicodeDecodeError):
ca(text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(
analyzer="char", strip_accents="unicode", ngram_range=(3, 6)
).build_analyzer()
text = "J'ai mangé du kangourou ce midi, c'était pas très bon"
expected = ["j'a", "'ai", "ai ", "i m", " ma"]
assert cnga(text)[:5] == expected
expected = ["s tres", " tres ", "tres b", "res bo", "es bon"]
assert cnga(text)[-5:] == expected
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ["thi", "his", "is ", "s i", " is"]
assert cnga(text)[:5] == expected
expected = [" yeste", "yester", "esterd", "sterda", "terday"]
assert cnga(text)[-5:] == expected
cnga = CountVectorizer(
input="file", analyzer="char", ngram_range=(3, 6)
).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ["thi", "his", "is ", "s i", " is"]
assert cnga(text)[:5] == expected
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(
analyzer="char_wb", strip_accents="unicode", ngram_range=(3, 6)
).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [" th", "thi", "his", "is ", " thi"]
assert cnga(text)[:5] == expected
expected = ["yester", "esterd", "sterda", "terday", "erday "]
assert cnga(text)[-5:] == expected
cnga = CountVectorizer(
input="file", analyzer="char_wb", ngram_range=(3, 6)
).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [" a ", " te", "tes", "est", "st ", " tes"]
assert cnga(text)[:6] == expected
def test_word_ngram_analyzer():
cnga = CountVectorizer(
analyzer="word", strip_accents="unicode", ngram_range=(3, 6)
).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ["this is test", "is test really", "test really met"]
assert cnga(text)[:3] == expected
expected = [
"test really met harry yesterday",
"this is test really met harry",
"is test really met harry yesterday",
]
assert cnga(text)[-3:] == expected
cnga_file = CountVectorizer(
input="file", analyzer="word", ngram_range=(3, 6)
).build_analyzer()
file = StringIO(text)
assert cnga_file(file) == cnga(text)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert vect.vocabulary_ == vocab
else:
assert set(vect.vocabulary_) == terms
X = vect.transform(JUNK_FOOD_DOCS)
assert X.shape[1] == len(terms)
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
inv = vect.inverse_transform(X)
assert len(inv) == X.shape[0]
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline(
[
("count", CountVectorizer(vocabulary=what_we_like)),
("tfidf", TfidfTransformer()),
]
)
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert set(pipe.named_steps["count"].vocabulary_) == set(what_we_like)
assert X.shape[1] == len(what_we_like)
def test_countvectorizer_custom_vocabulary_repeated_indices():
vocab = {"pizza": 0, "beer": 0}
msg = "Vocabulary contains repeated indices"
with pytest.raises(ValueError, match=msg):
vect = CountVectorizer(vocabulary=vocab)
vect.fit(["pasta_siziliana"])
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
with pytest.raises(ValueError, match="doesn't contain index"):
vect = CountVectorizer(vocabulary=vocab)
vect.fit(["pasta_verdura"])
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words="english")
assert cv.get_stop_words() == ENGLISH_STOP_WORDS
cv.set_params(stop_words="_bad_str_stop_")
with pytest.raises(ValueError):
cv.get_stop_words()
cv.set_params(stop_words="_bad_unicode_stop_")
with pytest.raises(ValueError):
cv.get_stop_words()
stoplist = ["some", "other", "words"]
cv.set_params(stop_words=stoplist)
assert cv.get_stop_words() == set(stoplist)
def test_countvectorizer_empty_vocabulary():
with pytest.raises(ValueError, match="empty vocabulary"):
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
with pytest.raises(ValueError, match="empty vocabulary"):
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert X1.shape[1] != X2.shape[1]
# TODO: Remove in 1.2 when get_feature_names is removed.
@pytest.mark.filterwarnings("ignore::FutureWarning:sklearn")
@pytest.mark.parametrize("get_names", ["get_feature_names", "get_feature_names_out"])
def test_countvectorizer_custom_token_pattern(get_names):
"""Check `get_feature_names()` when a custom token pattern is passed.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/12971
"""
corpus = [
"This is the 1st document in my corpus.",
"This document is the 2nd sample.",
"And this is the 3rd one.",
"Is this the 4th document?",
]
token_pattern = r"[0-9]{1,3}(?:st|nd|rd|th)\s\b(\w{2,})\b"
vectorizer = CountVectorizer(token_pattern=token_pattern)
vectorizer.fit_transform(corpus)
expected = ["document", "one", "sample"]
feature_names_out = getattr(vectorizer, get_names)()
assert_array_equal(feature_names_out, expected)
def test_countvectorizer_custom_token_pattern_with_several_group():
"""Check that we raise an error if token pattern capture several groups.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/12971
"""
corpus = [
"This is the 1st document in my corpus.",
"This document is the 2nd sample.",
"And this is the 3rd one.",
"Is this the 4th document?",
]
token_pattern = r"([0-9]{1,3}(?:st|nd|rd|th))\s\b(\w{2,})\b"
err_msg = "More than 1 capturing group in token pattern"
vectorizer = CountVectorizer(token_pattern=token_pattern)
with pytest.raises(ValueError, match=err_msg):
vectorizer.fit(corpus)
def test_countvectorizer_uppercase_in_vocab():
# Check that the check for uppercase in the provided vocabulary is only done at fit
# time and not at transform time (#21251)
vocabulary = ["Sample", "Upper", "Case", "Vocabulary"]
message = (
"Upper case characters found in"
" vocabulary while 'lowercase'"
" is True. These entries will not"
" be matched with any documents"
)
vectorizer = CountVectorizer(lowercase=True, vocabulary=vocabulary)
with pytest.warns(UserWarning, match=message):
vectorizer.fit(vocabulary)
with warnings.catch_warnings():
warnings.simplefilter("error", UserWarning)
vectorizer.transform(vocabulary)
def test_tf_transformer_feature_names_out():
"""Check get_feature_names_out for TfidfTransformer"""
X = [[1, 1, 1], [1, 1, 0], [1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm="l2").fit(X)
feature_names_in = ["a", "c", "b"]
feature_names_out = tr.get_feature_names_out(feature_names_in)
assert_array_equal(feature_names_in, feature_names_out)
def test_tf_idf_smoothing():
X = [[1, 1, 1], [1, 1, 0], [1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm="l2")
tfidf = tr.fit_transform(X).toarray()
assert (tfidf >= 0).all()
# check normalization
assert_array_almost_equal((tfidf**2).sum(axis=1), [1.0, 1.0, 1.0])
# this is robust to features with only zeros
X = [[1, 1, 0], [1, 1, 0], [1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm="l2")
tfidf = tr.fit_transform(X).toarray()
assert (tfidf >= 0).all()
def test_tfidf_no_smoothing():
X = [[1, 1, 1], [1, 1, 0], [1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm="l2")
tfidf = tr.fit_transform(X).toarray()
assert (tfidf >= 0).all()
# check normalization
assert_array_almost_equal((tfidf**2).sum(axis=1), [1.0, 1.0, 1.0])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0], [1, 1, 0], [1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm="l2")
in_warning_message = "divide by zero"
with pytest.warns(RuntimeWarning, match=in_warning_message):
tr.fit_transform(X).toarray()
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert tfidf[0] == 1
assert tfidf[1] > tfidf[0]
assert tfidf[2] > tfidf[1]
assert tfidf[1] < 2
assert tfidf[2] < 3
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, "tocsr"):
counts_train = counts_train.tocsr()
assert counts_train[0, v1.vocabulary_["pizza"]] == 2
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, "tocsr"):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert counts_test[0, vocabulary["salad"]] == 1
assert counts_test[0, vocabulary["tomato"]] == 1
assert counts_test[0, vocabulary["water"]] == 1
# stop word from the fixed list
assert "the" not in vocabulary
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert "copyright" not in vocabulary
# not present in the sample
assert counts_test[0, vocabulary["coke"]] == 0
assert counts_test[0, vocabulary["burger"]] == 0
assert counts_test[0, vocabulary["beer"]] == 0
assert counts_test[0, vocabulary["pizza"]] == 0
# test tf-idf
t1 = TfidfTransformer(norm="l1")
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert len(t1.idf_) == len(v1.vocabulary_)
assert tfidf.shape == (n_train, len(v1.vocabulary_))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert tfidf_test.shape == (len(test_data), len(v1.vocabulary_))
# test tf alone
t2 = TfidfTransformer(norm="l1", use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert not hasattr(t2, "idf_")
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
with pytest.raises(ValueError):
t3.transform(counts_train)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm="l1")
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert not tv.fixed_vocabulary_
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
with pytest.raises(ValueError):
v3.transform(train_data)
# ascii preprocessor?
v3.set_params(strip_accents="ascii", lowercase=False)
processor = v3.build_preprocessor()
text = "J'ai mangé du kangourou ce midi, c'était pas très bon."
expected = strip_accents_ascii(text)
result = processor(text)
assert expected == result
# error on bad strip_accents param
v3.set_params(strip_accents="_gabbledegook_", preprocessor=None)
with pytest.raises(ValueError):
v3.build_preprocessor()
# error with bad analyzer type
v3.set_params = "_invalid_analyzer_type_"
with pytest.raises(ValueError):
v3.build_analyzer()
def test_tfidf_vectorizer_setters():
norm, use_idf, smooth_idf, sublinear_tf = "l2", False, False, False
tv = TfidfVectorizer(
norm=norm, use_idf=use_idf, smooth_idf=smooth_idf, sublinear_tf=sublinear_tf
)
tv.fit(JUNK_FOOD_DOCS)
assert tv._tfidf.norm == norm
assert tv._tfidf.use_idf == use_idf
assert tv._tfidf.smooth_idf == smooth_idf
assert tv._tfidf.sublinear_tf == sublinear_tf
# assigning value to `TfidfTransformer` should not have any effect until
# fitting
tv.norm = "l1"
tv.use_idf = True
tv.smooth_idf = True
tv.sublinear_tf = True
assert tv._tfidf.norm == norm
assert tv._tfidf.use_idf == use_idf
assert tv._tfidf.smooth_idf == smooth_idf
assert tv._tfidf.sublinear_tf == sublinear_tf
tv.fit(JUNK_FOOD_DOCS)
assert tv._tfidf.norm == tv.norm
assert tv._tfidf.use_idf == tv.use_idf
assert tv._tfidf.smooth_idf == tv.smooth_idf
assert tv._tfidf.sublinear_tf == tv.sublinear_tf
@fails_if_pypy
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert X.shape == (len(ALL_FOOD_DOCS), v.n_features)
assert X.dtype == v.dtype
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert np.min(X.data) > -1
assert np.min(X.data) < 0
assert np.max(X.data) > 0
assert np.max(X.data) < 1
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), norm="l1")
X = v.transform(ALL_FOOD_DOCS)
assert X.shape == (len(ALL_FOOD_DOCS), v.n_features)
assert X.dtype == v.dtype
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert ngrams_nnz > token_nnz
assert ngrams_nnz < 2 * token_nnz
# makes the feature values bounded
assert np.min(X.data) > -1
assert np.max(X.data) < 1
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
# TODO: Remove in 1.2 when get_feature_names is removed.
@pytest.mark.filterwarnings("ignore::FutureWarning:sklearn")
@pytest.mark.parametrize("get_names", ["get_feature_names", "get_feature_names_out"])
def test_feature_names(get_names):
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
with pytest.raises(ValueError):
getattr(cv, get_names)()
assert not cv.fixed_vocabulary_
# test for vocabulary learned from data
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert len(cv.vocabulary_) == n_features
feature_names = getattr(cv, get_names)()
if get_names == "get_feature_names_out":
assert isinstance(feature_names, np.ndarray)
assert feature_names.dtype == object
else:
# get_feature_names
assert isinstance(feature_names, list)
assert len(feature_names) == n_features
assert_array_equal(
[
"beer",
"burger",
"celeri",
"coke",
"pizza",
"salad",
"sparkling",
"tomato",
"water",
],
feature_names,
)
for idx, name in enumerate(feature_names):
assert idx == cv.vocabulary_.get(name)
# test for custom vocabulary
vocab = [
"beer",
"burger",
"celeri",
"coke",
"pizza",
"salad",
"sparkling",
"tomato",
"water",
]
cv = CountVectorizer(vocabulary=vocab)
feature_names = getattr(cv, get_names)()
assert_array_equal(
[
"beer",
"burger",
"celeri",
"coke",
"pizza",
"salad",
"sparkling",
"tomato",
"water",
],
feature_names,
)
assert cv.fixed_vocabulary_
for idx, name in enumerate(feature_names):
assert idx == cv.vocabulary_.get(name)
@pytest.mark.parametrize("Vectorizer", (CountVectorizer, TfidfVectorizer))
def test_vectorizer_max_features(Vectorizer):
expected_vocabulary = {"burger", "beer", "salad", "pizza"}
expected_stop_words = {
"celeri",
"tomato",
"copyright",
"coke",
"sparkling",
"water",
"the",
}
# test bounded number of extracted features
vectorizer = Vectorizer(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert set(vectorizer.vocabulary_) == expected_vocabulary
assert vectorizer.stop_words_ == expected_stop_words
# TODO: Remove in 1.2 when get_feature_names is removed.
@pytest.mark.filterwarnings("ignore::FutureWarning:sklearn")
@pytest.mark.parametrize("get_names", ["get_feature_names", "get_feature_names_out"])
def test_count_vectorizer_max_features(get_names):
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = getattr(cv_1, get_names)()
features_3 = getattr(cv_3, get_names)()
features_None = getattr(cv_None, get_names)()
# The most common feature is "the", with frequency 7.
assert 7 == counts_1.max()
assert 7 == counts_3.max()
assert 7 == counts_None.max()
# The most common feature should be the same
assert "the" == features_1[np.argmax(counts_1)]
assert "the" == features_3[np.argmax(counts_3)]
assert "the" == features_None[np.argmax(counts_None)]
def test_vectorizer_max_df():
test_data = ["abc", "dea", "eat"]
vect = CountVectorizer(analyzer="char", max_df=1.0)
vect.fit(test_data)
assert "a" in vect.vocabulary_.keys()
assert len(vect.vocabulary_.keys()) == 6
assert len(vect.stop_words_) == 0
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert "a" not in vect.vocabulary_.keys() # {ae} ignored
assert len(vect.vocabulary_.keys()) == 4 # {bcdt} remain
assert "a" in vect.stop_words_
assert len(vect.stop_words_) == 2
vect.max_df = 1
vect.fit(test_data)
assert "a" not in vect.vocabulary_.keys() # {ae} ignored
assert len(vect.vocabulary_.keys()) == 4 # {bcdt} remain
assert "a" in vect.stop_words_
assert len(vect.stop_words_) == 2
def test_vectorizer_min_df():
test_data = ["abc", "dea", "eat"]
vect = CountVectorizer(analyzer="char", min_df=1)
vect.fit(test_data)
assert "a" in vect.vocabulary_.keys()
assert len(vect.vocabulary_.keys()) == 6
assert len(vect.stop_words_) == 0
vect.min_df = 2
vect.fit(test_data)
assert "c" not in vect.vocabulary_.keys() # {bcdt} ignored
assert len(vect.vocabulary_.keys()) == 2 # {ae} remain
assert "c" in vect.stop_words_
assert len(vect.stop_words_) == 4
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert "c" not in vect.vocabulary_.keys() # {bcdet} ignored
assert len(vect.vocabulary_.keys()) == 1 # {a} remains
assert "c" in vect.stop_words_
assert len(vect.stop_words_) == 5
# TODO: Remove in 1.2 when get_feature_names is removed.
@pytest.mark.filterwarnings("ignore::FutureWarning:sklearn")
@pytest.mark.parametrize("get_names", ["get_feature_names", "get_feature_names_out"])
def test_count_binary_occurrences(get_names):
# by default multiple occurrences are counted as longs
test_data = ["aaabc", "abbde"]
vect = CountVectorizer(analyzer="char", max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(["a", "b", "c", "d", "e"], getattr(vect, get_names)())
assert_array_equal([[3, 1, 1, 0, 0], [1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer="char", max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0], [1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer="char", max_df=1.0, binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert X_sparse.dtype == np.float32
@fails_if_pypy
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ["aaabc", "abbde"]
vect = HashingVectorizer(alternate_sign=False, analyzer="char", norm=None)
X = vect.transform(test_data)
assert np.max(X[0:1].data) == 3
assert np.max(X[1:2].data) == 2
assert X.dtype == np.float64
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(
analyzer="char", alternate_sign=False, binary=True, norm=None
)
X = vect.transform(test_data)
assert np.max(X.data) == 1
assert X.dtype == np.float64
# check the ability to change the dtype
vect = HashingVectorizer(
analyzer="char", alternate_sign=False, binary=True, norm=None, dtype=np.float64
)
X = vect.transform(test_data)
assert X.dtype == np.float64
@pytest.mark.parametrize("Vectorizer", (CountVectorizer, TfidfVectorizer))
def test_vectorizer_inverse_transform(Vectorizer):
# raw documents
data = ALL_FOOD_DOCS
vectorizer = Vectorizer()
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
assert isinstance(inversed_data, list)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
assert sparse.issparse(transformed_data)
assert transformed_data.format == "csr"
# Test that inverse_transform also works with numpy arrays and
# scipy
transformed_data2 = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data2)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
# Check that inverse_transform also works on non CSR sparse data:
transformed_data3 = transformed_data.tocsc()
inversed_data3 = vectorizer.inverse_transform(transformed_data3)
for terms, terms3 in zip(inversed_data, inversed_data3):
assert_array_equal(np.sort(terms), np.sort(terms3))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=0.2, random_state=0
)
pipeline = Pipeline([("vect", CountVectorizer()), ("svc", LinearSVC())])
parameters = {
"vect__ngram_range": [(1, 1), (1, 2)],
"svc__loss": ("hinge", "squared_hinge"),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1, cv=3)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert grid_search.best_score_ == 1.0
best_vectorizer = grid_search.best_estimator_.named_steps["vect"]
assert best_vectorizer.ngram_range == (1, 1)
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=0.1, random_state=0
)
pipeline = Pipeline([("vect", TfidfVectorizer()), ("svc", LinearSVC())])
parameters = {
"vect__ngram_range": [(1, 1), (1, 2)],
"vect__norm": ("l1", "l2"),
"svc__loss": ("hinge", "squared_hinge"),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert grid_search.best_score_ == 1.0
best_vectorizer = grid_search.best_estimator_.named_steps["vect"]
assert best_vectorizer.ngram_range == (1, 1)
assert best_vectorizer.norm == "l2"
assert not best_vectorizer.fixed_vocabulary_
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([("vect", TfidfVectorizer()), ("svc", LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1.0, 1.0, 1.0])
@fails_if_pypy
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"Машинное обучение — обширный подраздел искусственного "
"интеллекта, изучающий методы построения алгоритмов, "
"способных обучаться."
)
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert X_counted.shape == (1, 12)
vect = HashingVectorizer(norm=None, alternate_sign=False)
X_hashed = vect.transform([document])
assert X_hashed.shape == (1, 2**20)
# No collisions on such a small dataset
assert X_counted.nnz == X_hashed.nnz
# When norm is None and not alternate_sign, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ["pizza", "celeri"]
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert vect.fixed_vocabulary_
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm="l1"),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert type(copy) == orig.__class__
assert copy.get_params() == orig.get_params()
if IS_PYPY and isinstance(orig, HashingVectorizer):
continue
else:
assert_allclose_dense_sparse(
copy.fit_transform(JUNK_FOOD_DOCS),
orig.fit_transform(JUNK_FOOD_DOCS),
)
@pytest.mark.parametrize(
"factory",
[
CountVectorizer.build_analyzer,
CountVectorizer.build_preprocessor,
CountVectorizer.build_tokenizer,
],
)
def test_pickling_built_processors(factory):
"""Tokenizers cannot be pickled
https://github.com/scikit-learn/scikit-learn/issues/12833
"""
vec = CountVectorizer()
function = factory(vec)
text = "J'ai mangé du kangourou ce midi, c'était pas très bon."
roundtripped_function = pickle.loads(pickle.dumps(function))
expected = function(text)
result = roundtripped_function(text)
assert result == expected
# TODO: Remove in 1.2 when get_feature_names is removed.
@pytest.mark.filterwarnings("ignore::FutureWarning:sklearn")
@pytest.mark.parametrize("get_names", ["get_feature_names", "get_feature_names_out"])
def test_countvectorizer_vocab_sets_when_pickling(get_names):
# ensure that vocabulary of type set is coerced to a list to
# preserve iteration ordering after deserialization
rng = np.random.RandomState(0)
vocab_words = np.array(
[
"beer",
"burger",
"celeri",
"coke",
"pizza",
"salad",
"sparkling",
"tomato",
"water",
]
)
for x in range(0, 100):
vocab_set = set(rng.choice(vocab_words, size=5, replace=False))
cv = CountVectorizer(vocabulary=vocab_set)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_array_equal(getattr(cv, get_names)(), getattr(unpickled_cv, get_names)())
# TODO: Remove in 1.2 when get_feature_names is removed.
@pytest.mark.filterwarnings("ignore::FutureWarning:sklearn")
@pytest.mark.parametrize("get_names", ["get_feature_names", "get_feature_names_out"])
def test_countvectorizer_vocab_dicts_when_pickling(get_names):
rng = np.random.RandomState(0)
vocab_words = np.array(
[
"beer",
"burger",
"celeri",
"coke",
"pizza",
"salad",
"sparkling",
"tomato",
"water",
]
)
for x in range(0, 100):
vocab_dict = dict()
words = rng.choice(vocab_words, size=5, replace=False)
for y in range(0, 5):
vocab_dict[words[y]] = y
cv = CountVectorizer(vocabulary=vocab_dict)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_array_equal(getattr(cv, get_names)(), getattr(unpickled_cv, get_names)())
def test_stop_words_removal():
# Ensure that deleting the stop_words_ attribute doesn't affect transform
fitted_vectorizers = (
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
)
for vect in fitted_vectorizers:
vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
vect.stop_words_ = None
stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
delattr(vect, "stop_words_")
stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
assert_array_equal(stop_None_transform, vect_transform)
assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert type(copy) == orig.__class__
assert_array_equal(copy.fit_transform(X).toarray(), orig.fit_transform(X).toarray())
def test_transformer_idf_setter():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
copy = TfidfTransformer()
copy.idf_ = orig.idf_
assert_array_equal(copy.transform(X).toarray(), orig.transform(X).toarray())
def test_tfidf_vectorizer_setter():
orig = TfidfVectorizer(use_idf=True)
orig.fit(JUNK_FOOD_DOCS)
copy = TfidfVectorizer(vocabulary=orig.vocabulary_, use_idf=True)
copy.idf_ = orig.idf_
assert_array_equal(
copy.transform(JUNK_FOOD_DOCS).toarray(),
orig.transform(JUNK_FOOD_DOCS).toarray(),
)
# `idf_` cannot be set with `use_idf=False`
copy = TfidfVectorizer(vocabulary=orig.vocabulary_, use_idf=False)
err_msg = "`idf_` cannot be set when `user_idf=False`."
with pytest.raises(ValueError, match=err_msg):
copy.idf_ = orig.idf_
def test_tfidfvectorizer_invalid_idf_attr():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
copy = TfidfVectorizer(vocabulary=vect.vocabulary_, use_idf=True)
expected_idf_len = len(vect.idf_)
invalid_idf = [1.0] * (expected_idf_len + 1)
with pytest.raises(ValueError):
setattr(copy, "idf_", invalid_idf)
def test_non_unique_vocab():
vocab = ["a", "b", "c", "a", "a"]
vect = CountVectorizer(vocabulary=vocab)
with pytest.raises(ValueError):
vect.fit([])
@fails_if_pypy
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(["hello world", np.nan, "hello hello"])
with pytest.raises(exception, match=message):
func()
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert v.binary
X = v.fit_transform(["hello world", "hello hello"]).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(["hello world", "hello hello"]).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert vect_vocab_clone.vocabulary_ == vect_vocab.vocabulary_
@pytest.mark.parametrize(
"Vectorizer", (CountVectorizer, TfidfVectorizer, HashingVectorizer)
)
def test_vectorizer_string_object_as_input(Vectorizer):
message = "Iterable over raw text documents expected, string object received."
vec = Vectorizer()
with pytest.raises(ValueError, match=message):
vec.fit_transform("hello world!")
with pytest.raises(ValueError, match=message):
vec.fit("hello world!")
vec.fit(["some text", "some other text"])
with pytest.raises(ValueError, match=message):
vec.transform("hello world!")
@pytest.mark.parametrize("X_dtype", [np.float32, np.float64])
def test_tfidf_transformer_type(X_dtype):
X = sparse.rand(10, 20000, dtype=X_dtype, random_state=42)
X_trans = TfidfTransformer().fit_transform(X)
assert X_trans.dtype == X.dtype
def test_tfidf_transformer_sparse():
X = sparse.rand(10, 20000, dtype=np.float64, random_state=42)
X_csc = sparse.csc_matrix(X)
X_csr = sparse.csr_matrix(X)
X_trans_csc = TfidfTransformer().fit_transform(X_csc)
X_trans_csr = TfidfTransformer().fit_transform(X_csr)
assert_allclose_dense_sparse(X_trans_csc, X_trans_csr)
assert X_trans_csc.format == X_trans_csr.format
@pytest.mark.parametrize(
"vectorizer_dtype, output_dtype, warning_expected",
[
(np.int32, np.float64, True),
(np.int64, np.float64, True),
(np.float32, np.float32, False),
(np.float64, np.float64, False),
],
)
def test_tfidf_vectorizer_type(vectorizer_dtype, output_dtype, warning_expected):
X = np.array(["numpy", "scipy", "sklearn"])
vectorizer = TfidfVectorizer(dtype=vectorizer_dtype)
warning_msg_match = "'dtype' should be used."
if warning_expected:
with pytest.warns(UserWarning, match=warning_msg_match):
X_idf = vectorizer.fit_transform(X)
else:
with warnings.catch_warnings():
warnings.simplefilter("error", UserWarning)
X_idf = vectorizer.fit_transform(X)
assert X_idf.dtype == output_dtype
@pytest.mark.parametrize(
"vec",
[
HashingVectorizer(ngram_range=(2, 1)),
CountVectorizer(ngram_range=(2, 1)),
TfidfVectorizer(ngram_range=(2, 1)),
],
)
def test_vectorizers_invalid_ngram_range(vec):
# vectorizers could be initialized with invalid ngram range
# test for raising error message
invalid_range = vec.ngram_range
message = re.escape(
f"Invalid value for ngram_range={invalid_range} "
"lower boundary larger than the upper boundary."
)
if isinstance(vec, HashingVectorizer) and IS_PYPY:
pytest.xfail(reason="HashingVectorizer is not supported on PyPy")
with pytest.raises(ValueError, match=message):
vec.fit(["good news everyone"])
with pytest.raises(ValueError, match=message):
vec.fit_transform(["good news everyone"])
if isinstance(vec, HashingVectorizer):
with pytest.raises(ValueError, match=message):
vec.transform(["good news everyone"])
def _check_stop_words_consistency(estimator):
stop_words = estimator.get_stop_words()
tokenize = estimator.build_tokenizer()
preprocess = estimator.build_preprocessor()
return estimator._check_stop_words_consistency(stop_words, preprocess, tokenize)
@fails_if_pypy
def test_vectorizer_stop_words_inconsistent():
lstr = r"\['and', 'll', 've'\]"
message = (
"Your stop_words may be inconsistent with your "
"preprocessing. Tokenizing the stop words generated "
"tokens %s not in stop_words." % lstr
)
for vec in [CountVectorizer(), TfidfVectorizer(), HashingVectorizer()]:
vec.set_params(stop_words=["you've", "you", "you'll", "AND"])
with pytest.warns(UserWarning, match=message):
vec.fit_transform(["hello world"])
# reset stop word validation
del vec._stop_words_id
assert _check_stop_words_consistency(vec) is False
# Only one warning per stop list
with warnings.catch_warnings():
warnings.simplefilter("error", UserWarning)
vec.fit_transform(["hello world"])
assert _check_stop_words_consistency(vec) is None
# Test caching of inconsistency assessment
vec.set_params(stop_words=["you've", "you", "you'll", "blah", "AND"])
with pytest.warns(UserWarning, match=message):
vec.fit_transform(["hello world"])
@skip_if_32bit
def test_countvectorizer_sort_features_64bit_sparse_indices():
"""
Check that CountVectorizer._sort_features preserves the dtype of its sparse
feature matrix.
This test is skipped on 32bit platforms, see:
https://github.com/scikit-learn/scikit-learn/pull/11295
for more details.
"""
X = sparse.csr_matrix((5, 5), dtype=np.int64)
# force indices and indptr to int64.
INDICES_DTYPE = np.int64
X.indices = X.indices.astype(INDICES_DTYPE)
X.indptr = X.indptr.astype(INDICES_DTYPE)
vocabulary = {"scikit-learn": 0, "is": 1, "great!": 2}
Xs = CountVectorizer()._sort_features(X, vocabulary)
assert INDICES_DTYPE == Xs.indices.dtype
@fails_if_pypy
@pytest.mark.parametrize(
"Estimator", [CountVectorizer, TfidfVectorizer, HashingVectorizer]
)
def test_stop_word_validation_custom_preprocessor(Estimator):
data = [{"text": "some text"}]
vec = Estimator()
assert _check_stop_words_consistency(vec) is True
vec = Estimator(preprocessor=lambda x: x["text"], stop_words=["and"])
assert _check_stop_words_consistency(vec) == "error"
# checks are cached
assert _check_stop_words_consistency(vec) is None
vec.fit_transform(data)
class CustomEstimator(Estimator):
def build_preprocessor(self):
return lambda x: x["text"]
vec = CustomEstimator(stop_words=["and"])
assert _check_stop_words_consistency(vec) == "error"
vec = Estimator(
tokenizer=lambda doc: re.compile(r"\w{1,}").findall(doc), stop_words=["and"]
)
assert _check_stop_words_consistency(vec) is True
@pytest.mark.parametrize(
"Estimator", [CountVectorizer, TfidfVectorizer, HashingVectorizer]
)
@pytest.mark.parametrize(
"input_type, err_type, err_msg",
[
("filename", FileNotFoundError, ""),
("file", AttributeError, "'str' object has no attribute 'read'"),
],
)
def test_callable_analyzer_error(Estimator, input_type, err_type, err_msg):
if issubclass(Estimator, HashingVectorizer) and IS_PYPY:
pytest.xfail("HashingVectorizer is not supported on PyPy")
data = ["this is text, not file or filename"]
with pytest.raises(err_type, match=err_msg):
Estimator(analyzer=lambda x: x.split(), input=input_type).fit_transform(data)
@pytest.mark.parametrize(
"Estimator",
[
CountVectorizer,
TfidfVectorizer,
pytest.param(HashingVectorizer, marks=fails_if_pypy),
],
)
@pytest.mark.parametrize(
"analyzer", [lambda doc: open(doc, "r"), lambda doc: doc.read()]
)
@pytest.mark.parametrize("input_type", ["file", "filename"])
def test_callable_analyzer_change_behavior(Estimator, analyzer, input_type):
data = ["this is text, not file or filename"]
with pytest.raises((FileNotFoundError, AttributeError)):
Estimator(analyzer=analyzer, input=input_type).fit_transform(data)
@pytest.mark.parametrize(
"Estimator", [CountVectorizer, TfidfVectorizer, HashingVectorizer]
)
def test_callable_analyzer_reraise_error(tmpdir, Estimator):
# check if a custom exception from the analyzer is shown to the user
def analyzer(doc):
raise Exception("testing")
if issubclass(Estimator, HashingVectorizer) and IS_PYPY:
pytest.xfail("HashingVectorizer is not supported on PyPy")
f = tmpdir.join("file.txt")
f.write("sample content\n")
with pytest.raises(Exception, match="testing"):
Estimator(analyzer=analyzer, input="file").fit_transform([f])
@pytest.mark.parametrize(
"Vectorizer", [CountVectorizer, HashingVectorizer, TfidfVectorizer]
)
@pytest.mark.parametrize(
"stop_words, tokenizer, preprocessor, ngram_range, token_pattern,"
"analyzer, unused_name, ovrd_name, ovrd_msg",
[
(
["you've", "you'll"],
None,
None,
(1, 1),
None,
"char",
"'stop_words'",
"'analyzer'",
"!= 'word'",
),
(
None,
lambda s: s.split(),
None,
(1, 1),
None,
"char",
"'tokenizer'",
"'analyzer'",
"!= 'word'",
),
(
None,
lambda s: s.split(),
None,
(1, 1),
r"\w+",
"word",
"'token_pattern'",
"'tokenizer'",
"is not None",
),
(
None,
None,
lambda s: s.upper(),
(1, 1),
r"\w+",
lambda s: s.upper(),
"'preprocessor'",
"'analyzer'",
"is callable",
),
(
None,
None,
None,
(1, 2),
None,
lambda s: s.upper(),
"'ngram_range'",
"'analyzer'",
"is callable",
),
(
None,
None,
None,
(1, 1),
r"\w+",
"char",
"'token_pattern'",
"'analyzer'",
"!= 'word'",
),
],
)
def test_unused_parameters_warn(
Vectorizer,
stop_words,
tokenizer,
preprocessor,
ngram_range,
token_pattern,
analyzer,
unused_name,
ovrd_name,
ovrd_msg,
):
train_data = JUNK_FOOD_DOCS
# setting parameter and checking for corresponding warning messages
vect = Vectorizer()
vect.set_params(
stop_words=stop_words,
tokenizer=tokenizer,
preprocessor=preprocessor,
ngram_range=ngram_range,
token_pattern=token_pattern,
analyzer=analyzer,
)
msg = "The parameter %s will not be used since %s %s" % (
unused_name,
ovrd_name,
ovrd_msg,
)
with pytest.warns(UserWarning, match=msg):
vect.fit(train_data)
@pytest.mark.parametrize(
"Vectorizer, X",
(
(HashingVectorizer, [{"foo": 1, "bar": 2}, {"foo": 3, "baz": 1}]),
(CountVectorizer, JUNK_FOOD_DOCS),
),
)
def test_n_features_in(Vectorizer, X):
# For vectorizers, n_features_in_ does not make sense
vectorizer = Vectorizer()
assert not hasattr(vectorizer, "n_features_in_")
vectorizer.fit(X)
assert not hasattr(vectorizer, "n_features_in_")
def test_tie_breaking_sample_order_invariance():
# Checks the sample order invariance when setting max_features
# non-regression test for #17939
vec = CountVectorizer(max_features=1)
vocab1 = vec.fit(["hello", "world"]).vocabulary_
vocab2 = vec.fit(["world", "hello"]).vocabulary_
assert vocab1 == vocab2
# TODO: Remove in 1.2 when get_feature_names is removed
def test_get_feature_names_deprecated():
cv = CountVectorizer(max_df=0.5).fit(ALL_FOOD_DOCS)
msg = "get_feature_names is deprecated in 1.0"
with pytest.warns(FutureWarning, match=msg):
cv.get_feature_names()
@fails_if_pypy
def test_nonnegative_hashing_vectorizer_result_indices():
# add test for pr 19035
hashing = HashingVectorizer(n_features=1000000, ngram_range=(2, 3))
indices = hashing.transform(["22pcs efuture"]).indices
assert indices[0] >= 0
| bsd-3-clause |
florian-f/sklearn | sklearn/gaussian_process/gaussian_process.py | 4 | 33781 | from __future__ import print_function
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Author: Vincent Dubourg <[email protected]>
# (mostly translation, see implementation details)
# License: BSD style
import numpy as np
from scipy import linalg, optimize, rand
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import array2d, check_random_state
from . import regression_models as regression
from . import correlation_models as correlation
MACHINE_EPSILON = np.finfo(np.double).eps
if hasattr(linalg, 'solve_triangular'):
# only in scipy since 0.9
solve_triangular = linalg.solve_triangular
else:
# slower, but works
def solve_triangular(x, y, lower=True):
return linalg.solve(x, y)
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = array2d(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) / 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij.astype(np.int)
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The Gaussian Process model class.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood rstimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state: integer or numpy.RandomState, optional
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
Attributes
----------
`theta_`: array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
`reduced_likelihood_function_value_`: array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://www2.imm.dtu.dk/~hbn/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/pss/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) with the observations of the
scalar output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
# Run input checks
self._check_params()
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X = array2d(X)
y = np.asarray(y).ravel()[:, np.newaxis]
# Check shapes of DOE & observations
n_samples_X, n_features = X.shape
n_samples_y = y.shape[0]
if n_samples_X != n_samples_y:
raise ValueError("X and y must have the same number of rows.")
else:
n_samples = n_samples_X
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple input features cannot have the same"
" value.")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
"autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
"Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
"Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
"Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simulatneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like
An array with shape (n_eval, ) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) with the Mean Squared Error at x.
"""
# Check input shapes
X = array2d(X)
n_eval, n_features_X = X.shape
n_samples, n_features = self.X.shape
# Run input checks
self._check_params(n_samples)
if n_features_X != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
"should match the sample size used for fit() "
"which is %d.") % (n_features_X, n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
"at instanciation. Need to recompute "
"autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T)
else:
# Ordinary Kriging
u = np.zeros(y.shape)
MSE = self.sigma2 * (1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = solve_triangular(C, F, lower=True)
try:
Q, G = linalg.qr(Ft, econ=True)
except:
#/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177:
# DeprecationWarning: qr econ argument will be removed after scipy
# 0.7. The economy transform will then be available through the
# mode='economic' argument.
Q, G = linalg.qr(Ft, mode='economic')
pass
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
"of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print("The chosen optimizer is: " + str(self.optimizer))
if self.random_start > 1:
print(str(self.random_start) + " random starts are required.")
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(
theta=10. ** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t:
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t:
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = np.log10(self.thetaL) \
+ rand(self.theta0.size).reshape(self.theta0.shape) \
* np.log10(self.thetaU / self.thetaL)
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0), constraints,
iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_minus_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
optimal_rlf_value = - optimal_minus_rlf_value
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print("%s completed" % (5 * percent_completed))
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given atrributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = array2d(self.theta0.min())
self.thetaL = array2d(self.thetaL.min())
self.thetaU = array2d(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print("Proceeding along dimension %d..." % (i + 1))
self.theta0 = array2d(theta_iso)
self.thetaL = array2d(thetaL[0, i])
self.thetaU = array2d(thetaU[0, i])
def corr_cut(t, d):
return corr(array2d(np.hstack([optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i + 1)::]]
)), d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given atrributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError("This optimizer ('%s') is not "
"implemented yet. Please contribute!"
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError("regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = array2d(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError("corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
"'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = array2d(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = array2d(self.thetaL)
self.thetaU = array2d(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
"same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
"thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
"neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if not self.optimizer in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
| bsd-3-clause |
peastman/msmbuilder | msmbuilder/example_datasets/base.py | 9 | 8471 | import numbers
import shutil
import sys
import time
from functools import wraps
from io import BytesIO
from os import environ
from os import makedirs
from os.path import exists
from os.path import expanduser
from os.path import join
from zipfile import ZipFile
import numpy as np
from six.moves.urllib.error import HTTPError
from six.moves.urllib.request import urlopen
from sklearn.utils import check_random_state
def retry(max_retries=1):
""" Retry a function `max_retries` times. """
def retry_func(func):
@wraps(func)
def wrapper(*args, **kwargs):
num_retries = 0
while num_retries <= max_retries:
try:
ret = func(*args, **kwargs)
break
except HTTPError:
if num_retries == max_retries:
raise
num_retries += 1
time.sleep(5)
return ret
return wrapper
return retry_func
class Dataset(object):
@classmethod
def description(cls):
"""Get a description from the Notes section of the docstring."""
lines = [s.strip() for s in cls.__doc__.splitlines()]
note_i = lines.index("Notes")
return "\n".join(lines[note_i + 2:])
def cache(self):
raise NotImplementedError
def get(self):
raise NotImplementedError
def get_cached(self):
raise NotImplementedError
class _MDDataset(Dataset):
target_directory = "" # set in subclass
data_url = "" # set in subclass
def __init__(self, data_home=None, verbose=True):
self.data_home = get_data_home(data_home)
self.data_dir = join(self.data_home, self.target_directory)
self.cached = False
self.verbose = verbose
def _msmbdata_cache(self):
if self.verbose:
print("Copying {} from msmb_data package to {}"
.format(self.target_directory, self.data_home))
msmb_data = has_msmb_data()
assert msmb_data is not None
shutil.copytree("{}/{}".format(msmb_data, self.target_directory),
self.data_dir)
@retry(3)
def _figshare_cache(self):
if self.verbose:
print('downloading {} from {} to {}'
.format(self.target_directory, self.data_url,
self.data_home))
fhandle = urlopen(self.data_url)
buf = BytesIO(fhandle.read())
zip_file = ZipFile(buf)
makedirs(self.data_dir)
for name in zip_file.namelist():
zip_file.extract(name, path=self.data_dir)
@retry(3)
def cache(self):
if not exists(self.data_home):
makedirs(self.data_home)
if not exists(self.data_dir):
if has_msmb_data() is not None:
self._msmbdata_cache()
else:
self._figshare_cache()
elif self.verbose:
print("{} already is cached".format(self.target_directory))
self.cached = True
def get_cached(self):
raise NotImplementedError
def get(self):
if not self.cached:
self.cache()
return self.get_cached()
class _NWell(Dataset):
"""Base class for brownian dynamics on a potential
Parameters
----------
data_home : optional, default: None
Specify another cache folder for the datasets. By default
all MSMBuilder data is stored in '~/msmbuilder_data' subfolders.
random_state : {int, None}, default: None
Seed the psuedorandom number generator to generate trajectories. If
seed is None, the global numpy PRNG is used. If random_state is an
int, the simulations will be cached in ``data_home``, or loaded from
``data_home`` if simulations with that seed have been performed already.
With random_state=None, new simulations will be performed and the
trajectories will not be cached.
"""
target_name = "" # define in subclass
n_trajectories = 0 # define in subclass
version = 1 # override in subclass if parameters are updated
def __init__(self, data_home=None, random_state=None):
self.data_home = get_data_home(data_home)
self.data_dir = join(self.data_home, self.target_name)
self.random_state = random_state
self.cache_path = self._get_cache_path(random_state)
def _get_cache_path(self, random_state):
path = "{}/version-{}/randomstate-{}".format(self.data_dir,
self.version,
self.random_state)
return path
def _load(self, path):
return [np.load("{}/{}.npy".format(path, i))
for i in range(self.n_trajectories)]
def _save(self, path, trajectories):
assert len(trajectories) == self.n_trajectories
if not exists(path):
makedirs(path)
for i, traj in enumerate(trajectories):
np.save("{}/{}.npy".format(path, i), traj)
def cache(self):
random = check_random_state(self.random_state)
if not exists(self.data_dir):
makedirs(self.data_dir)
if self.random_state is None:
trajectories = self.simulate_func(random)
return trajectories
if not isinstance(self.random_state, numbers.Integral):
raise TypeError('random_state must be an int')
if exists(self.cache_path):
return self._load(self.cache_path)
trajectories = self.simulate_func(random)
self._save(self.cache_path, trajectories)
return trajectories
def get_cached(self):
if self.cache_path is None:
raise ValueError("You must specify a random state to get "
"cached trajectories.")
trajectories = self._load(self.cache_path)
return Bunch(trajectories=trajectories, DESCR=self.description())
def get(self):
trajectories = self.cache()
return Bunch(trajectories=trajectories, DESCR=self.description())
def simulate_func(self, random):
# Implement in subclass
raise NotImplementedError
def potential(self, x):
# Implement in subclass
raise NotImplementedError
class Bunch(dict):
"""Container object for datasets: dictionary-like object that
exposes its keys as attributes."""
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
self.__dict__ = self
def has_msmb_data():
"""We provide a conda package containing the saved data.
This package was introduced because the figshare downloads could
be 'iffy' at times.
Returns
-------
path : str or None
The path (if it exists). otherwise None
"""
msmb_data_dir = join(sys.prefix, 'share', 'msmb_data')
if exists(msmb_data_dir):
return msmb_data_dir
else:
return None
def _expand_and_makedir(data_home):
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def get_data_home(data_home=None):
"""Return the path of the msmbuilder data dir.
As of msmbuilder v3.6, this function will prefer data downloaded via
the msmb_data conda package (and located within the python installation
directory). If this package exists, we will use its data directory as
the data home. Otherwise, we use the old logic:
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'msmbuilder_data'
in the user's home folder.
Alternatively, it can be set by the 'MSMBUILDER_DATA' environment
variable or programmatically by giving an explicit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is not None:
return _expand_and_makedir(data_home)
msmb_data = has_msmb_data()
if msmb_data is not None:
return _expand_and_makedir(msmb_data)
data_home = environ.get('MSMBUILDER_DATA', join('~', 'msmbuilder_data'))
return _expand_and_makedir(data_home)
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
| lgpl-2.1 |
pytorch/fairseq | fairseq_cli/interactive.py | 1 | 11465 | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Translate raw text with a trained model. Batches data on-the-fly.
"""
import ast
import fileinput
import logging
import math
import os
import sys
import time
from argparse import Namespace
from collections import namedtuple
import numpy as np
import torch
from fairseq import checkpoint_utils, distributed_utils, options, tasks, utils
from fairseq.dataclass.configs import FairseqConfig
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.token_generation_constraints import pack_constraints, unpack_constraints
from fairseq_cli.generate import get_symbols_to_strip_from_output
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("fairseq_cli.interactive")
Batch = namedtuple("Batch", "ids src_tokens src_lengths constraints")
Translation = namedtuple("Translation", "src_str hypos pos_scores alignments")
def buffered_read(input, buffer_size):
buffer = []
with fileinput.input(files=[input], openhook=fileinput.hook_encoded("utf-8")) as h:
for src_str in h:
buffer.append(src_str.strip())
if len(buffer) >= buffer_size:
yield buffer
buffer = []
if len(buffer) > 0:
yield buffer
def make_batches(lines, cfg, task, max_positions, encode_fn):
def encode_fn_target(x):
return encode_fn(x)
if cfg.generation.constraints:
# Strip (tab-delimited) contraints, if present, from input lines,
# store them in batch_constraints
batch_constraints = [list() for _ in lines]
for i, line in enumerate(lines):
if "\t" in line:
lines[i], *batch_constraints[i] = line.split("\t")
# Convert each List[str] to List[Tensor]
for i, constraint_list in enumerate(batch_constraints):
batch_constraints[i] = [
task.target_dictionary.encode_line(
encode_fn_target(constraint),
append_eos=False,
add_if_not_exist=False,
)
for constraint in constraint_list
]
if cfg.generation.constraints:
constraints_tensor = pack_constraints(batch_constraints)
else:
constraints_tensor = None
tokens, lengths = task.get_interactive_tokens_and_lengths(lines, encode_fn)
itr = task.get_batch_iterator(
dataset=task.build_dataset_for_inference(
tokens, lengths, constraints=constraints_tensor
),
max_tokens=cfg.dataset.max_tokens,
max_sentences=cfg.dataset.batch_size,
max_positions=max_positions,
ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test,
).next_epoch_itr(shuffle=False)
for batch in itr:
ids = batch["id"]
src_tokens = batch["net_input"]["src_tokens"]
src_lengths = batch["net_input"]["src_lengths"]
constraints = batch.get("constraints", None)
yield Batch(
ids=ids,
src_tokens=src_tokens,
src_lengths=src_lengths,
constraints=constraints,
)
def main(cfg: FairseqConfig):
if isinstance(cfg, Namespace):
cfg = convert_namespace_to_omegaconf(cfg)
start_time = time.time()
total_translate_time = 0
utils.import_user_module(cfg.common)
if cfg.interactive.buffer_size < 1:
cfg.interactive.buffer_size = 1
if cfg.dataset.max_tokens is None and cfg.dataset.batch_size is None:
cfg.dataset.batch_size = 1
assert (
not cfg.generation.sampling or cfg.generation.nbest == cfg.generation.beam
), "--sampling requires --nbest to be equal to --beam"
assert (
not cfg.dataset.batch_size
or cfg.dataset.batch_size <= cfg.interactive.buffer_size
), "--batch-size cannot be larger than --buffer-size"
logger.info(cfg)
# Fix seed for stochastic decoding
if cfg.common.seed is not None and not cfg.generation.no_seed_provided:
np.random.seed(cfg.common.seed)
utils.set_torch_seed(cfg.common.seed)
use_cuda = torch.cuda.is_available() and not cfg.common.cpu
# Setup task, e.g., translation
task = tasks.setup_task(cfg.task)
# Load ensemble
overrides = ast.literal_eval(cfg.common_eval.model_overrides)
logger.info("loading model(s) from {}".format(cfg.common_eval.path))
models, _model_args = checkpoint_utils.load_model_ensemble(
utils.split_paths(cfg.common_eval.path),
arg_overrides=overrides,
task=task,
suffix=cfg.checkpoint.checkpoint_suffix,
strict=(cfg.checkpoint.checkpoint_shard_count == 1),
num_shards=cfg.checkpoint.checkpoint_shard_count,
)
# Set dictionaries
src_dict = task.source_dictionary
tgt_dict = task.target_dictionary
# Optimize ensemble for generation
for model in models:
if model is None:
continue
if cfg.common.fp16:
model.half()
if use_cuda and not cfg.distributed_training.pipeline_model_parallel:
model.cuda()
model.prepare_for_inference_(cfg)
# Initialize generator
generator = task.build_generator(models, cfg.generation)
# Handle tokenization and BPE
tokenizer = task.build_tokenizer(cfg.tokenizer)
bpe = task.build_bpe(cfg.bpe)
def encode_fn(x):
if tokenizer is not None:
x = tokenizer.encode(x)
if bpe is not None:
x = bpe.encode(x)
return x
def decode_fn(x):
if bpe is not None:
x = bpe.decode(x)
if tokenizer is not None:
x = tokenizer.decode(x)
return x
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
align_dict = utils.load_align_dict(cfg.generation.replace_unk)
max_positions = utils.resolve_max_positions(
task.max_positions(), *[model.max_positions() for model in models]
)
if cfg.generation.constraints:
logger.warning(
"NOTE: Constrained decoding currently assumes a shared subword vocabulary."
)
if cfg.interactive.buffer_size > 1:
logger.info("Sentence buffer size: %s", cfg.interactive.buffer_size)
logger.info("NOTE: hypothesis and token scores are output in base 2")
logger.info("Type the input sentence and press return:")
start_id = 0
for inputs in buffered_read(cfg.interactive.input, cfg.interactive.buffer_size):
results = []
for batch in make_batches(inputs, cfg, task, max_positions, encode_fn):
bsz = batch.src_tokens.size(0)
src_tokens = batch.src_tokens
src_lengths = batch.src_lengths
constraints = batch.constraints
if use_cuda:
src_tokens = src_tokens.cuda()
src_lengths = src_lengths.cuda()
if constraints is not None:
constraints = constraints.cuda()
sample = {
"net_input": {
"src_tokens": src_tokens,
"src_lengths": src_lengths,
},
}
translate_start_time = time.time()
translations = task.inference_step(
generator, models, sample, constraints=constraints
)
translate_time = time.time() - translate_start_time
total_translate_time += translate_time
list_constraints = [[] for _ in range(bsz)]
if cfg.generation.constraints:
list_constraints = [unpack_constraints(c) for c in constraints]
for i, (id, hypos) in enumerate(zip(batch.ids.tolist(), translations)):
src_tokens_i = utils.strip_pad(src_tokens[i], tgt_dict.pad())
constraints = list_constraints[i]
results.append(
(
start_id + id,
src_tokens_i,
hypos,
{
"constraints": constraints,
"time": translate_time / len(translations),
},
)
)
# sort output to match input order
for id_, src_tokens, hypos, info in sorted(results, key=lambda x: x[0]):
src_str = ""
if src_dict is not None:
src_str = src_dict.string(src_tokens, cfg.common_eval.post_process)
print("S-{}\t{}".format(id_, src_str))
print("W-{}\t{:.3f}\tseconds".format(id_, info["time"]))
for constraint in info["constraints"]:
print(
"C-{}\t{}".format(
id_,
tgt_dict.string(constraint, cfg.common_eval.post_process),
)
)
# Process top predictions
for hypo in hypos[: min(len(hypos), cfg.generation.nbest)]:
hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
hypo_tokens=hypo["tokens"].int().cpu(),
src_str=src_str,
alignment=hypo["alignment"],
align_dict=align_dict,
tgt_dict=tgt_dict,
remove_bpe=cfg.common_eval.post_process,
extra_symbols_to_ignore=get_symbols_to_strip_from_output(generator),
)
detok_hypo_str = decode_fn(hypo_str)
score = hypo["score"] / math.log(2) # convert to base 2
# original hypothesis (after tokenization and BPE)
print("H-{}\t{}\t{}".format(id_, score, hypo_str))
# detokenized hypothesis
print("D-{}\t{}\t{}".format(id_, score, detok_hypo_str))
print(
"P-{}\t{}".format(
id_,
" ".join(
map(
lambda x: "{:.4f}".format(x),
# convert from base e to base 2
hypo["positional_scores"].div_(math.log(2)).tolist(),
)
),
)
)
if cfg.generation.print_alignment:
alignment_str = " ".join(
["{}-{}".format(src, tgt) for src, tgt in alignment]
)
print("A-{}\t{}".format(id_, alignment_str))
# update running id_ counter
start_id += len(inputs)
logger.info(
"Total time: {:.3f} seconds; translation time: {:.3f}".format(
time.time() - start_time, total_translate_time
)
)
def cli_main():
parser = options.get_interactive_generation_parser()
args = options.parse_args_and_arch(parser)
distributed_utils.call_main(convert_namespace_to_omegaconf(args), main)
if __name__ == "__main__":
cli_main()
| mit |
anntzer/scikit-learn | sklearn/datasets/_kddcup99.py | 11 | 12731 | """KDDCUP 99 dataset.
A classic dataset for anomaly detection.
The dataset page is available from UCI Machine Learning Repository
https://archive.ics.uci.edu/ml/machine-learning-databases/kddcup99-mld/kddcup.data.gz
"""
import errno
from gzip import GzipFile
import logging
import os
from os.path import exists, join
import numpy as np
import joblib
from ._base import _fetch_remote
from ._base import _convert_data_dataframe
from . import get_data_home
from ._base import RemoteFileMetadata
from ._base import load_descr
from ..utils import Bunch
from ..utils import check_random_state
from ..utils import shuffle as shuffle_method
# The original data can be found at:
# https://archive.ics.uci.edu/ml/machine-learning-databases/kddcup99-mld/kddcup.data.gz
ARCHIVE = RemoteFileMetadata(
filename="kddcup99_data",
url="https://ndownloader.figshare.com/files/5976045",
checksum="3b6c942aa0356c0ca35b7b595a26c89d343652c9db428893e7494f837b274292",
)
# The original data can be found at:
# https://archive.ics.uci.edu/ml/machine-learning-databases/kddcup99-mld/kddcup.data_10_percent.gz
ARCHIVE_10_PERCENT = RemoteFileMetadata(
filename="kddcup99_10_data",
url="https://ndownloader.figshare.com/files/5976042",
checksum="8045aca0d84e70e622d1148d7df782496f6333bf6eb979a1b0837c42a9fd9561",
)
logger = logging.getLogger(__name__)
def fetch_kddcup99(
*,
subset=None,
data_home=None,
shuffle=False,
random_state=None,
percent10=True,
download_if_missing=True,
return_X_y=False,
as_frame=False,
):
"""Load the kddcup99 dataset (classification).
Download it if necessary.
================= ====================================
Classes 23
Samples total 4898431
Dimensionality 41
Features discrete (int) or continuous (float)
================= ====================================
Read more in the :ref:`User Guide <kddcup99_dataset>`.
.. versionadded:: 0.18
Parameters
----------
subset : {'SA', 'SF', 'http', 'smtp'}, default=None
To return the corresponding classical subsets of kddcup 99.
If None, return the entire kddcup 99 dataset.
data_home : str, default=None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
.. versionadded:: 0.19
shuffle : bool, default=False
Whether to shuffle dataset.
random_state : int, RandomState instance or None, default=None
Determines random number generation for dataset shuffling and for
selection of abnormal samples if `subset='SA'`. Pass an int for
reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
percent10 : bool, default=True
Whether to load only 10 percent of the data.
download_if_missing : bool, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object. See
below for more information about the `data` and `target` object.
.. versionadded:: 0.20
as_frame : bool, default=False
If `True`, returns a pandas Dataframe for the ``data`` and ``target``
objects in the `Bunch` returned object; `Bunch` return object will also
have a ``frame`` member.
.. versionadded:: 0.24
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : {ndarray, dataframe} of shape (494021, 41)
The data matrix to learn. If `as_frame=True`, `data` will be a
pandas DataFrame.
target : {ndarray, series} of shape (494021,)
The regression target for each sample. If `as_frame=True`, `target`
will be a pandas Series.
frame : dataframe of shape (494021, 42)
Only present when `as_frame=True`. Contains `data` and `target`.
DESCR : str
The full description of the dataset.
feature_names : list
The names of the dataset columns
target_names: list
The names of the target columns
(data, target) : tuple if ``return_X_y`` is True
A tuple of two ndarray. The first containing a 2D array of
shape (n_samples, n_features) with each row representing one
sample and each column representing the features. The second
ndarray of shape (n_samples,) containing the target samples.
.. versionadded:: 0.20
"""
data_home = get_data_home(data_home=data_home)
kddcup99 = _fetch_brute_kddcup99(
data_home=data_home,
percent10=percent10,
download_if_missing=download_if_missing,
)
data = kddcup99.data
target = kddcup99.target
feature_names = kddcup99.feature_names
target_names = kddcup99.target_names
if subset == "SA":
s = target == b"normal."
t = np.logical_not(s)
normal_samples = data[s, :]
normal_targets = target[s]
abnormal_samples = data[t, :]
abnormal_targets = target[t]
n_samples_abnormal = abnormal_samples.shape[0]
# selected abnormal samples:
random_state = check_random_state(random_state)
r = random_state.randint(0, n_samples_abnormal, 3377)
abnormal_samples = abnormal_samples[r]
abnormal_targets = abnormal_targets[r]
data = np.r_[normal_samples, abnormal_samples]
target = np.r_[normal_targets, abnormal_targets]
if subset == "SF" or subset == "http" or subset == "smtp":
# select all samples with positive logged_in attribute:
s = data[:, 11] == 1
data = np.c_[data[s, :11], data[s, 12:]]
feature_names = feature_names[:11] + feature_names[12:]
target = target[s]
data[:, 0] = np.log((data[:, 0] + 0.1).astype(float, copy=False))
data[:, 4] = np.log((data[:, 4] + 0.1).astype(float, copy=False))
data[:, 5] = np.log((data[:, 5] + 0.1).astype(float, copy=False))
if subset == "http":
s = data[:, 2] == b"http"
data = data[s]
target = target[s]
data = np.c_[data[:, 0], data[:, 4], data[:, 5]]
feature_names = [feature_names[0], feature_names[4], feature_names[5]]
if subset == "smtp":
s = data[:, 2] == b"smtp"
data = data[s]
target = target[s]
data = np.c_[data[:, 0], data[:, 4], data[:, 5]]
feature_names = [feature_names[0], feature_names[4], feature_names[5]]
if subset == "SF":
data = np.c_[data[:, 0], data[:, 2], data[:, 4], data[:, 5]]
feature_names = [
feature_names[0],
feature_names[2],
feature_names[4],
feature_names[5],
]
if shuffle:
data, target = shuffle_method(data, target, random_state=random_state)
fdescr = load_descr("kddcup99.rst")
frame = None
if as_frame:
frame, data, target = _convert_data_dataframe(
"fetch_kddcup99", data, target, feature_names, target_names
)
if return_X_y:
return data, target
return Bunch(
data=data,
target=target,
frame=frame,
target_names=target_names,
feature_names=feature_names,
DESCR=fdescr,
)
def _fetch_brute_kddcup99(data_home=None, download_if_missing=True, percent10=True):
"""Load the kddcup99 dataset, downloading it if necessary.
Parameters
----------
data_home : str, default=None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : bool, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
percent10 : bool, default=True
Whether to load only 10 percent of the data.
Returns
-------
dataset : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : ndarray of shape (494021, 41)
Each row corresponds to the 41 features in the dataset.
target : ndarray of shape (494021,)
Each value corresponds to one of the 21 attack types or to the
label 'normal.'.
feature_names : list
The names of the dataset columns
target_names: list
The names of the target columns
DESCR : str
Description of the kddcup99 dataset.
"""
data_home = get_data_home(data_home=data_home)
dir_suffix = "-py3"
if percent10:
kddcup_dir = join(data_home, "kddcup99_10" + dir_suffix)
archive = ARCHIVE_10_PERCENT
else:
kddcup_dir = join(data_home, "kddcup99" + dir_suffix)
archive = ARCHIVE
samples_path = join(kddcup_dir, "samples")
targets_path = join(kddcup_dir, "targets")
available = exists(samples_path)
dt = [
("duration", int),
("protocol_type", "S4"),
("service", "S11"),
("flag", "S6"),
("src_bytes", int),
("dst_bytes", int),
("land", int),
("wrong_fragment", int),
("urgent", int),
("hot", int),
("num_failed_logins", int),
("logged_in", int),
("num_compromised", int),
("root_shell", int),
("su_attempted", int),
("num_root", int),
("num_file_creations", int),
("num_shells", int),
("num_access_files", int),
("num_outbound_cmds", int),
("is_host_login", int),
("is_guest_login", int),
("count", int),
("srv_count", int),
("serror_rate", float),
("srv_serror_rate", float),
("rerror_rate", float),
("srv_rerror_rate", float),
("same_srv_rate", float),
("diff_srv_rate", float),
("srv_diff_host_rate", float),
("dst_host_count", int),
("dst_host_srv_count", int),
("dst_host_same_srv_rate", float),
("dst_host_diff_srv_rate", float),
("dst_host_same_src_port_rate", float),
("dst_host_srv_diff_host_rate", float),
("dst_host_serror_rate", float),
("dst_host_srv_serror_rate", float),
("dst_host_rerror_rate", float),
("dst_host_srv_rerror_rate", float),
("labels", "S16"),
]
column_names = [c[0] for c in dt]
target_names = column_names[-1]
feature_names = column_names[:-1]
if available:
try:
X = joblib.load(samples_path)
y = joblib.load(targets_path)
except Exception as e:
raise IOError(
"The cache for fetch_kddcup99 is invalid, please delete "
f"{str(kddcup_dir)} and run the fetch_kddcup99 again"
) from e
elif download_if_missing:
_mkdirp(kddcup_dir)
logger.info("Downloading %s" % archive.url)
_fetch_remote(archive, dirname=kddcup_dir)
DT = np.dtype(dt)
logger.debug("extracting archive")
archive_path = join(kddcup_dir, archive.filename)
file_ = GzipFile(filename=archive_path, mode="r")
Xy = []
for line in file_.readlines():
line = line.decode()
Xy.append(line.replace("\n", "").split(","))
file_.close()
logger.debug("extraction done")
os.remove(archive_path)
Xy = np.asarray(Xy, dtype=object)
for j in range(42):
Xy[:, j] = Xy[:, j].astype(DT[j])
X = Xy[:, :-1]
y = Xy[:, -1]
# XXX bug when compress!=0:
# (error: 'Incorrect data length while decompressing[...] the file
# could be corrupted.')
joblib.dump(X, samples_path, compress=0)
joblib.dump(y, targets_path, compress=0)
else:
raise IOError("Data not found and `download_if_missing` is False")
return Bunch(
data=X,
target=y,
feature_names=feature_names,
target_names=[target_names],
)
def _mkdirp(d):
"""Ensure directory d exists (like mkdir -p on Unix)
No guarantee that the directory is writable.
"""
try:
os.makedirs(d)
except OSError as e:
if e.errno != errno.EEXIST:
raise
| bsd-3-clause |
anntzer/scikit-learn | sklearn/cluster/_mean_shift.py | 9 | 18376 | """Mean shift clustering algorithm.
Mean shift clustering aims to discover *blobs* in a smooth density of
samples. It is a centroid based algorithm, which works by updating candidates
for centroids to be the mean of the points within a given region. These
candidates are then filtered in a post-processing stage to eliminate
near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
"""
# Authors: Conrad Lee <[email protected]>
# Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Martino Sorbaro <[email protected]>
import numpy as np
import warnings
from joblib import Parallel
from numbers import Integral, Real
from collections import defaultdict
from ..utils._param_validation import Interval
from ..utils.validation import check_is_fitted
from ..utils.fixes import delayed
from ..utils import check_random_state, gen_batches, check_array
from ..base import BaseEstimator, ClusterMixin
from ..neighbors import NearestNeighbors
from ..metrics.pairwise import pairwise_distances_argmin
from .._config import config_context
def estimate_bandwidth(X, *, quantile=0.3, n_samples=None, random_state=0, n_jobs=None):
"""Estimate the bandwidth to use with the mean-shift algorithm.
That this function takes time at least quadratic in n_samples. For large
datasets, it's wise to set that parameter to a small value.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input points.
quantile : float, default=0.3
Should be between [0, 1]
0.5 means that the median of all pairwise distances is used.
n_samples : int, default=None
The number of samples to use. If not given, all samples are used.
random_state : int, RandomState instance, default=None
The generator used to randomly select the samples from input points
for bandwidth estimation. Use an int to make the randomness
deterministic.
See :term:`Glossary <random_state>`.
n_jobs : int, default=None
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Returns
-------
bandwidth : float
The bandwidth parameter.
"""
X = check_array(X)
random_state = check_random_state(random_state)
if n_samples is not None:
idx = random_state.permutation(X.shape[0])[:n_samples]
X = X[idx]
n_neighbors = int(X.shape[0] * quantile)
if n_neighbors < 1: # cannot fit NearestNeighbors with n_neighbors = 0
n_neighbors = 1
nbrs = NearestNeighbors(n_neighbors=n_neighbors, n_jobs=n_jobs)
nbrs.fit(X)
bandwidth = 0.0
for batch in gen_batches(len(X), 500):
d, _ = nbrs.kneighbors(X[batch, :], return_distance=True)
bandwidth += np.max(d, axis=1).sum()
return bandwidth / X.shape[0]
# separate function for each seed's iterative loop
def _mean_shift_single_seed(my_mean, X, nbrs, max_iter):
# For each seed, climb gradient until convergence or max_iter
bandwidth = nbrs.get_params()["radius"]
stop_thresh = 1e-3 * bandwidth # when mean has converged
completed_iterations = 0
while True:
# Find mean of points within bandwidth
i_nbrs = nbrs.radius_neighbors([my_mean], bandwidth, return_distance=False)[0]
points_within = X[i_nbrs]
if len(points_within) == 0:
break # Depending on seeding strategy this condition may occur
my_old_mean = my_mean # save the old mean
my_mean = np.mean(points_within, axis=0)
# If converged or at max_iter, adds the cluster
if (
np.linalg.norm(my_mean - my_old_mean) < stop_thresh
or completed_iterations == max_iter
):
break
completed_iterations += 1
return tuple(my_mean), len(points_within), completed_iterations
def mean_shift(
X,
*,
bandwidth=None,
seeds=None,
bin_seeding=False,
min_bin_freq=1,
cluster_all=True,
max_iter=300,
n_jobs=None,
):
"""Perform mean shift clustering of data using a flat kernel.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
bandwidth : float, default=None
Kernel bandwidth.
If bandwidth is not given, it is determined using a heuristic based on
the median of all pairwise distances. This will take quadratic time in
the number of samples. The sklearn.cluster.estimate_bandwidth function
can be used to do this more efficiently.
seeds : array-like of shape (n_seeds, n_features) or None
Point used as initial kernel locations. If None and bin_seeding=False,
each data point is used as a seed. If None and bin_seeding=True,
see bin_seeding.
bin_seeding : bool, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : bool, default=True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
max_iter : int, default=300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
n_jobs : int, default=None
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionadded:: 0.17
Parallel Execution using *n_jobs*.
Returns
-------
cluster_centers : ndarray of shape (n_clusters, n_features)
Coordinates of cluster centers.
labels : ndarray of shape (n_samples,)
Cluster labels for each point.
Notes
-----
For an example, see :ref:`examples/cluster/plot_mean_shift.py
<sphx_glr_auto_examples_cluster_plot_mean_shift.py>`.
"""
model = MeanShift(
bandwidth=bandwidth,
seeds=seeds,
min_bin_freq=min_bin_freq,
bin_seeding=bin_seeding,
cluster_all=cluster_all,
n_jobs=n_jobs,
max_iter=max_iter,
).fit(X)
return model.cluster_centers_, model.labels_
def get_bin_seeds(X, bin_size, min_bin_freq=1):
"""Find seeds for mean_shift.
Finds seeds by first binning data onto a grid whose lines are
spaced bin_size apart, and then choosing those bins with at least
min_bin_freq points.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input points, the same points that will be used in mean_shift.
bin_size : float
Controls the coarseness of the binning. Smaller values lead
to more seeding (which is computationally more expensive). If you're
not sure how to set this, set it to the value of the bandwidth used
in clustering.mean_shift.
min_bin_freq : int, default=1
Only bins with at least min_bin_freq will be selected as seeds.
Raising this value decreases the number of seeds found, which
makes mean_shift computationally cheaper.
Returns
-------
bin_seeds : array-like of shape (n_samples, n_features)
Points used as initial kernel positions in clustering.mean_shift.
"""
if bin_size == 0:
return X
# Bin points
bin_sizes = defaultdict(int)
for point in X:
binned_point = np.round(point / bin_size)
bin_sizes[tuple(binned_point)] += 1
# Select only those bins as seeds which have enough members
bin_seeds = np.array(
[point for point, freq in bin_sizes.items() if freq >= min_bin_freq],
dtype=np.float32,
)
if len(bin_seeds) == len(X):
warnings.warn(
"Binning data failed with provided bin_size=%f, using data points as seeds."
% bin_size
)
return X
bin_seeds = bin_seeds * bin_size
return bin_seeds
class MeanShift(ClusterMixin, BaseEstimator):
"""Mean shift clustering using a flat kernel.
Mean shift clustering aims to discover "blobs" in a smooth density of
samples. It is a centroid-based algorithm, which works by updating
candidates for centroids to be the mean of the points within a given
region. These candidates are then filtered in a post-processing stage to
eliminate near-duplicates to form the final set of centroids.
Seeding is performed using a binning technique for scalability.
Read more in the :ref:`User Guide <mean_shift>`.
Parameters
----------
bandwidth : float, default=None
Bandwidth used in the RBF kernel.
If not given, the bandwidth is estimated using
sklearn.cluster.estimate_bandwidth; see the documentation for that
function for hints on scalability (see also the Notes, below).
seeds : array-like of shape (n_samples, n_features), default=None
Seeds used to initialize kernels. If not set,
the seeds are calculated by clustering.get_bin_seeds
with bandwidth as the grid size and default values for
other parameters.
bin_seeding : bool, default=False
If true, initial kernel locations are not locations of all
points, but rather the location of the discretized version of
points, where points are binned onto a grid whose coarseness
corresponds to the bandwidth. Setting this option to True will speed
up the algorithm because fewer seeds will be initialized.
The default value is False.
Ignored if seeds argument is not None.
min_bin_freq : int, default=1
To speed up the algorithm, accept only those bins with at least
min_bin_freq points as seeds.
cluster_all : bool, default=True
If true, then all points are clustered, even those orphans that are
not within any kernel. Orphans are assigned to the nearest kernel.
If false, then orphans are given cluster label -1.
n_jobs : int, default=None
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
max_iter : int, default=300
Maximum number of iterations, per seed point before the clustering
operation terminates (for that seed point), if has not converged yet.
.. versionadded:: 0.22
Attributes
----------
cluster_centers_ : ndarray of shape (n_clusters, n_features)
Coordinates of cluster centers.
labels_ : ndarray of shape (n_samples,)
Labels of each point.
n_iter_ : int
Maximum number of iterations performed on each seed.
.. versionadded:: 0.22
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
KMeans : K-Means clustering.
Notes
-----
Scalability:
Because this implementation uses a flat kernel and
a Ball Tree to look up members of each kernel, the complexity will tend
towards O(T*n*log(n)) in lower dimensions, with n the number of samples
and T the number of points. In higher dimensions the complexity will
tend towards O(T*n^2).
Scalability can be boosted by using fewer seeds, for example by using
a higher value of min_bin_freq in the get_bin_seeds function.
Note that the estimate_bandwidth function is much less scalable than the
mean shift algorithm and will be the bottleneck if it is used.
References
----------
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
Examples
--------
>>> from sklearn.cluster import MeanShift
>>> import numpy as np
>>> X = np.array([[1, 1], [2, 1], [1, 0],
... [4, 7], [3, 5], [3, 6]])
>>> clustering = MeanShift(bandwidth=2).fit(X)
>>> clustering.labels_
array([1, 1, 1, 0, 0, 0])
>>> clustering.predict([[0, 0], [5, 5]])
array([1, 0])
>>> clustering
MeanShift(bandwidth=2)
"""
_parameter_constraints: dict = {
"bandwidth": [Interval(Real, 0, None, closed="neither"), None],
"seeds": ["array-like", None],
"bin_seeding": ["boolean"],
"min_bin_freq": [Interval(Integral, 1, None, closed="left")],
"cluster_all": ["boolean"],
"n_jobs": [Integral, None],
"max_iter": [Interval(Integral, 0, None, closed="left")],
}
def __init__(
self,
*,
bandwidth=None,
seeds=None,
bin_seeding=False,
min_bin_freq=1,
cluster_all=True,
n_jobs=None,
max_iter=300,
):
self.bandwidth = bandwidth
self.seeds = seeds
self.bin_seeding = bin_seeding
self.cluster_all = cluster_all
self.min_bin_freq = min_bin_freq
self.n_jobs = n_jobs
self.max_iter = max_iter
def fit(self, X, y=None):
"""Perform clustering.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Samples to cluster.
y : Ignored
Not used, present for API consistency by convention.
Returns
-------
self : object
Fitted instance.
"""
self._validate_params()
X = self._validate_data(X)
bandwidth = self.bandwidth
if bandwidth is None:
bandwidth = estimate_bandwidth(X, n_jobs=self.n_jobs)
seeds = self.seeds
if seeds is None:
if self.bin_seeding:
seeds = get_bin_seeds(X, bandwidth, self.min_bin_freq)
else:
seeds = X
n_samples, n_features = X.shape
center_intensity_dict = {}
# We use n_jobs=1 because this will be used in nested calls under
# parallel calls to _mean_shift_single_seed so there is no need for
# for further parallelism.
nbrs = NearestNeighbors(radius=bandwidth, n_jobs=1).fit(X)
# execute iterations on all seeds in parallel
all_res = Parallel(n_jobs=self.n_jobs)(
delayed(_mean_shift_single_seed)(seed, X, nbrs, self.max_iter)
for seed in seeds
)
# copy results in a dictionary
for i in range(len(seeds)):
if all_res[i][1]: # i.e. len(points_within) > 0
center_intensity_dict[all_res[i][0]] = all_res[i][1]
self.n_iter_ = max([x[2] for x in all_res])
if not center_intensity_dict:
# nothing near seeds
raise ValueError(
"No point was within bandwidth=%f of any seed. Try a different seeding"
" strategy or increase the bandwidth."
% bandwidth
)
# POST PROCESSING: remove near duplicate points
# If the distance between two kernels is less than the bandwidth,
# then we have to remove one because it is a duplicate. Remove the
# one with fewer points.
sorted_by_intensity = sorted(
center_intensity_dict.items(),
key=lambda tup: (tup[1], tup[0]),
reverse=True,
)
sorted_centers = np.array([tup[0] for tup in sorted_by_intensity])
unique = np.ones(len(sorted_centers), dtype=bool)
nbrs = NearestNeighbors(radius=bandwidth, n_jobs=self.n_jobs).fit(
sorted_centers
)
for i, center in enumerate(sorted_centers):
if unique[i]:
neighbor_idxs = nbrs.radius_neighbors([center], return_distance=False)[
0
]
unique[neighbor_idxs] = 0
unique[i] = 1 # leave the current point as unique
cluster_centers = sorted_centers[unique]
# ASSIGN LABELS: a point belongs to the cluster that it is closest to
nbrs = NearestNeighbors(n_neighbors=1, n_jobs=self.n_jobs).fit(cluster_centers)
labels = np.zeros(n_samples, dtype=int)
distances, idxs = nbrs.kneighbors(X)
if self.cluster_all:
labels = idxs.flatten()
else:
labels.fill(-1)
bool_selector = distances.flatten() <= bandwidth
labels[bool_selector] = idxs.flatten()[bool_selector]
self.cluster_centers_, self.labels_ = cluster_centers, labels
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : array-like of shape (n_samples, n_features)
New data to predict.
Returns
-------
labels : ndarray of shape (n_samples,)
Index of the cluster each sample belongs to.
"""
check_is_fitted(self)
X = self._validate_data(X, reset=False)
with config_context(assume_finite=True):
return pairwise_distances_argmin(X, self.cluster_centers_)
| bsd-3-clause |
anntzer/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting_loss_functions.py | 8 | 12631 | """
Testing for the gradient boosting loss functions and initial estimators.
"""
from itertools import product
import numpy as np
from numpy.testing import assert_allclose
import pytest
from pytest import approx
from sklearn.utils import check_random_state
from sklearn.metrics import mean_pinball_loss
from sklearn.ensemble._gb_losses import RegressionLossFunction
from sklearn.ensemble._gb_losses import LeastSquaresError
from sklearn.ensemble._gb_losses import LeastAbsoluteError
from sklearn.ensemble._gb_losses import HuberLossFunction
from sklearn.ensemble._gb_losses import QuantileLossFunction
from sklearn.ensemble._gb_losses import BinomialDeviance
from sklearn.ensemble._gb_losses import MultinomialDeviance
from sklearn.ensemble._gb_losses import ExponentialLoss
from sklearn.ensemble._gb_losses import LOSS_FUNCTIONS
def test_binomial_deviance():
# Check binomial deviance loss.
# Check against alternative definitions in ESLII.
bd = BinomialDeviance(2)
# pred has the same BD for y in {0, 1}
assert bd(np.array([0.0]), np.array([0.0])) == bd(np.array([1.0]), np.array([0.0]))
assert bd(np.array([1.0, 1, 1]), np.array([100.0, 100, 100])) == approx(0)
assert bd(np.array([1.0, 0, 0]), np.array([100.0, -100, -100])) == approx(0)
# check if same results as alternative definition of deviance, from ESLII
# Eq. (10.18): -loglike = log(1 + exp(-2*z*f))
# Note:
# - We use y = {0, 1}, ESL (10.18) uses z in {-1, 1}, hence y=2*y-1
# - ESL 2*f = pred_raw, hence the factor 2 of ESL disappears.
# - Deviance = -2*loglike + .., hence a factor of 2 in front.
def alt_dev(y, raw_pred):
z = 2 * y - 1
return 2 * np.mean(np.log(1 + np.exp(-z * raw_pred)))
test_data = product(
(np.array([0.0, 0, 0]), np.array([1.0, 1, 1])),
(np.array([-5.0, -5, -5]), np.array([3.0, 3, 3])),
)
for datum in test_data:
assert bd(*datum) == approx(alt_dev(*datum))
# check the negative gradient against alternative formula from ESLII
# Note: negative_gradient is half the negative gradient.
def alt_ng(y, raw_pred):
z = 2 * y - 1
return z / (1 + np.exp(z * raw_pred))
for datum in test_data:
assert bd.negative_gradient(*datum) == approx(alt_ng(*datum))
def test_sample_weight_smoke():
rng = check_random_state(13)
y = rng.rand(100)
pred = rng.rand(100)
# least squares
loss = LeastSquaresError()
loss_wo_sw = loss(y, pred)
loss_w_sw = loss(y, pred, np.ones(pred.shape[0], dtype=np.float32))
assert loss_wo_sw == approx(loss_w_sw)
def test_sample_weight_init_estimators():
# Smoke test for init estimators with sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
y = reg_y
loss = Loss()
else:
k = 2
y = clf_y
if Loss.is_multi_class:
# skip multiclass
continue
loss = Loss(k)
init_est = loss.init_estimator()
init_est.fit(X, y)
out = loss.get_init_raw_predictions(X, init_est)
assert out.shape == (y.shape[0], 1)
sw_init_est = loss.init_estimator()
sw_init_est.fit(X, y, sample_weight=sample_weight)
sw_out = loss.get_init_raw_predictions(X, sw_init_est)
assert sw_out.shape == (y.shape[0], 1)
# check if predictions match
assert_allclose(out, sw_out, rtol=1e-2)
def test_quantile_loss_function():
# Non regression test for the QuantileLossFunction object
# There was a sign problem when evaluating the function
# for negative values of 'ytrue - ypred'
x = np.asarray([-1.0, 0.0, 1.0])
y_found = QuantileLossFunction(0.9)(x, np.zeros_like(x))
y_expected = np.asarray([0.1, 0.0, 0.9]).mean()
np.testing.assert_allclose(y_found, y_expected)
y_found_p = mean_pinball_loss(x, np.zeros_like(x), alpha=0.9)
np.testing.assert_allclose(y_found, y_found_p)
def test_sample_weight_deviance():
# Test if deviance supports sample weights.
rng = check_random_state(13)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
mclf_y = rng.randint(0, 3, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
y = reg_y
p = reg_y
loss = Loss()
else:
k = 2
y = clf_y
p = clf_y
if Loss.is_multi_class:
k = 3
y = mclf_y
# one-hot encoding
p = np.zeros((y.shape[0], k), dtype=np.float64)
for i in range(k):
p[:, i] = y == i
loss = Loss(k)
deviance_w_w = loss(y, p, sample_weight)
deviance_wo_w = loss(y, p)
assert_allclose(deviance_wo_w, deviance_w_w)
@pytest.mark.parametrize("n_classes, n_samples", [(3, 100), (5, 57), (7, 13)])
def test_multinomial_deviance(n_classes, n_samples, global_random_seed):
# Check multinomial deviance with and without sample weights.
rng = np.random.RandomState(global_random_seed)
sample_weight = np.ones(n_samples)
y_true = rng.randint(0, n_classes, size=n_samples)
y_pred = np.zeros((n_samples, n_classes), dtype=np.float64)
for klass in range(y_pred.shape[1]):
y_pred[:, klass] = y_true == klass
loss = MultinomialDeviance(n_classes)
loss_wo_sw = loss(y_true, y_pred)
assert loss_wo_sw > 0
loss_w_sw = loss(y_true, y_pred, sample_weight=sample_weight)
assert loss_wo_sw == approx(loss_w_sw)
# Multinomial deviance uses weighted average loss rather than
# weighted sum loss, so we make sure that the value remains the same
# when we device the weight by 2.
loss_w_sw = loss(y_true, y_pred, sample_weight=0.5 * sample_weight)
assert loss_wo_sw == approx(loss_w_sw)
def test_mdl_computation_weighted():
raw_predictions = np.array([[1.0, -1.0, -0.1], [-2.0, 1.0, 2.0]])
y_true = np.array([0, 1])
weights = np.array([1, 3])
expected_loss = 1.0909323
# MultinomialDeviance loss computation with weights.
loss = MultinomialDeviance(3)
assert loss(y_true, raw_predictions, weights) == approx(expected_loss)
@pytest.mark.parametrize("n", [0, 1, 2])
def test_mdl_exception(n):
# Check that MultinomialDeviance throws an exception when n_classes <= 2
err_msg = "MultinomialDeviance requires more than 2 classes."
with pytest.raises(ValueError, match=err_msg):
MultinomialDeviance(n)
def test_init_raw_predictions_shapes():
# Make sure get_init_raw_predictions returns float64 arrays with shape
# (n_samples, K) where K is 1 for binary classification and regression, and
# K = n_classes for multiclass classification
rng = np.random.RandomState(0)
n_samples = 100
X = rng.normal(size=(n_samples, 5))
y = rng.normal(size=n_samples)
for loss in (
LeastSquaresError(),
LeastAbsoluteError(),
QuantileLossFunction(),
HuberLossFunction(),
):
init_estimator = loss.init_estimator().fit(X, y)
raw_predictions = loss.get_init_raw_predictions(y, init_estimator)
assert raw_predictions.shape == (n_samples, 1)
assert raw_predictions.dtype == np.float64
y = rng.randint(0, 2, size=n_samples)
for loss in (BinomialDeviance(n_classes=2), ExponentialLoss(n_classes=2)):
init_estimator = loss.init_estimator().fit(X, y)
raw_predictions = loss.get_init_raw_predictions(y, init_estimator)
assert raw_predictions.shape == (n_samples, 1)
assert raw_predictions.dtype == np.float64
for n_classes in range(3, 5):
y = rng.randint(0, n_classes, size=n_samples)
loss = MultinomialDeviance(n_classes=n_classes)
init_estimator = loss.init_estimator().fit(X, y)
raw_predictions = loss.get_init_raw_predictions(y, init_estimator)
assert raw_predictions.shape == (n_samples, n_classes)
assert raw_predictions.dtype == np.float64
def test_init_raw_predictions_values(global_random_seed):
# Make sure the get_init_raw_predictions() returns the expected values for
# each loss.
rng = np.random.RandomState(global_random_seed)
n_samples = 100
X = rng.normal(size=(n_samples, 5))
y = rng.normal(size=n_samples)
# Least squares loss
loss = LeastSquaresError()
init_estimator = loss.init_estimator().fit(X, y)
raw_predictions = loss.get_init_raw_predictions(y, init_estimator)
# Make sure baseline prediction is the mean of all targets
assert_allclose(raw_predictions, y.mean())
# Least absolute and huber loss
for Loss in (LeastAbsoluteError, HuberLossFunction):
loss = Loss()
init_estimator = loss.init_estimator().fit(X, y)
raw_predictions = loss.get_init_raw_predictions(y, init_estimator)
# Make sure baseline prediction is the median of all targets
assert_allclose(raw_predictions, np.median(y))
# Quantile loss
for alpha in (0.1, 0.5, 0.9):
loss = QuantileLossFunction(alpha=alpha)
init_estimator = loss.init_estimator().fit(X, y)
raw_predictions = loss.get_init_raw_predictions(y, init_estimator)
# Make sure baseline prediction is the alpha-quantile of all targets
assert_allclose(raw_predictions, np.percentile(y, alpha * 100))
y = rng.randint(0, 2, size=n_samples)
# Binomial deviance
loss = BinomialDeviance(n_classes=2)
init_estimator = loss.init_estimator().fit(X, y)
# Make sure baseline prediction is equal to link_function(p), where p
# is the proba of the positive class. We want predict_proba() to return p,
# and by definition
# p = inverse_link_function(raw_prediction) = sigmoid(raw_prediction)
# So we want raw_prediction = link_function(p) = log(p / (1 - p))
raw_predictions = loss.get_init_raw_predictions(y, init_estimator)
p = y.mean()
assert_allclose(raw_predictions, np.log(p / (1 - p)))
# Exponential loss
loss = ExponentialLoss(n_classes=2)
init_estimator = loss.init_estimator().fit(X, y)
raw_predictions = loss.get_init_raw_predictions(y, init_estimator)
p = y.mean()
assert_allclose(raw_predictions, 0.5 * np.log(p / (1 - p)))
# Multinomial deviance loss
for n_classes in range(3, 5):
y = rng.randint(0, n_classes, size=n_samples)
loss = MultinomialDeviance(n_classes=n_classes)
init_estimator = loss.init_estimator().fit(X, y)
raw_predictions = loss.get_init_raw_predictions(y, init_estimator)
for k in range(n_classes):
p = (y == k).mean()
assert_allclose(raw_predictions[:, k], np.log(p))
@pytest.mark.parametrize("alpha", [0.4, 0.5, 0.6])
def test_lad_equals_quantiles(global_random_seed, alpha):
# Make sure quantile loss with alpha = .5 is equivalent to LAD
lad = LeastAbsoluteError()
ql = QuantileLossFunction(alpha=alpha)
n_samples = 50
rng = np.random.RandomState(global_random_seed)
raw_predictions = rng.normal(size=(n_samples))
y_true = rng.normal(size=(n_samples))
lad_loss = lad(y_true, raw_predictions)
ql_loss = ql(y_true, raw_predictions)
if alpha == 0.5:
assert lad_loss == approx(2 * ql_loss)
weights = np.linspace(0, 1, n_samples) ** 2
lad_weighted_loss = lad(y_true, raw_predictions, sample_weight=weights)
ql_weighted_loss = ql(y_true, raw_predictions, sample_weight=weights)
if alpha == 0.5:
assert lad_weighted_loss == approx(2 * ql_weighted_loss)
pbl_weighted_loss = mean_pinball_loss(
y_true, raw_predictions, sample_weight=weights, alpha=alpha
)
assert pbl_weighted_loss == approx(ql_weighted_loss)
def test_exponential_loss():
"""Check that we compute the negative gradient of the exponential loss.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/9666
"""
loss = ExponentialLoss(n_classes=2)
y_true = np.array([0])
y_pred = np.array([0])
# we expect to have loss = exp(0) = 1
assert loss(y_true, y_pred) == pytest.approx(1)
# we expect to have negative gradient = -1 * (1 * exp(0)) = -1
assert_allclose(loss.negative_gradient(y_true, y_pred), -1)
| bsd-3-clause |
ssat335/GuiPlotting | ClassifySlowWavesScikit.py | 1 | 1030 | """
Author: Shameer Sathar
"""
import numpy as np
from sklearn import svm
#from SVM import SVM
class ClassifySlowWavesScikit:
def __init__(self):
self.data = []
self.len = 0
self.features = []
def classify_data(self, training_set, events, test_data):
"""
The data is classified based on the training data.
:param plot: Input values to be processed for generating features
:return: predictions for the entire data set
"""
train_array = np.asarray(training_set)
event_array = np.asarray(events)
test_array = np.asarray(test_data)
clf = svm.SVC()
clf.fit(np.transpose(train_array), np.transpose(event_array))
prediction = clf.predict(np.transpose(test_array))
return prediction
'''
model = SVM(max_iter=10000, kernel_type='linear', C=1.0, epsilon=0.001)
model.fit(np.transpose(train_array), np.transpose(event_array))
return model.predict(np.transpose(test_array))
'''
| mit |
neilhan/tensorflow | tensorflow/contrib/learn/python/learn/estimators/classifier_test.py | 6 | 6312 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Classifier."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow.contrib import learn
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.session_bundle import manifest_pb2
def iris_input_fn(num_epochs=None):
iris = tf.contrib.learn.datasets.load_iris()
features = tf.reshape(tf.constant(iris.data), [-1, 4])
if num_epochs:
features = tf.train.limit_epochs(features, num_epochs=num_epochs)
target = tf.reshape(tf.constant(iris.target), [-1])
return features, target
def logistic_model_fn(features, target, unused_mode):
target = tf.one_hot(target, 3, 1, 0)
prediction, loss = tf.contrib.learn.models.logistic_regression_zero_init(
features, target)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
learning_rate=0.1)
return prediction, loss, train_op
def logistic_model_params_fn(features, target, unused_mode, params):
target = tf.one_hot(target, 3, 1, 0)
prediction, loss = tf.contrib.learn.models.logistic_regression_zero_init(
features, target)
train_op = tf.contrib.layers.optimize_loss(
loss, tf.contrib.framework.get_global_step(), optimizer='Adagrad',
learning_rate=params['learning_rate'])
return prediction, loss, train_op
class ClassifierTest(tf.test.TestCase):
def testIrisAll(self):
est = tf.contrib.learn.Classifier(model_fn=logistic_model_fn, n_classes=3)
self._runIrisAll(est)
def testIrisAllWithParams(self):
est = tf.contrib.learn.Classifier(model_fn=logistic_model_params_fn,
n_classes=3,
params={'learning_rate': 0.01})
self._runIrisAll(est)
def testIrisPredictAsIterable(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Classifier(model_fn=logistic_model_fn, n_classes=3)
est.fit(iris.data, iris.target, steps=100)
scores = est.evaluate(x=iris.data, y=iris.target, name='eval')
predictions = list(est.predict(x=iris.data, as_iterable=True))
predictions_proba = list(est.predict_proba(x=iris.data, as_iterable=True))
self.assertEqual(len(predictions), iris.target.shape[0])
self.assertAllEqual(predictions, np.argmax(predictions_proba, axis=1))
other_score = _sklearn.accuracy_score(iris.target, predictions)
self.assertAllClose(other_score, scores['accuracy'])
def testIrisInputFn(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Classifier(model_fn=logistic_model_fn, n_classes=3)
est.fit(input_fn=iris_input_fn, steps=100)
est.evaluate(input_fn=iris_input_fn, steps=1, name='eval')
predictions = est.predict(input_fn=iris_input_fn)
self.assertEqual(predictions.shape[0], iris.target.shape[0])
def testIrisPredictInputFnAsIterable(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Classifier(model_fn=logistic_model_fn, n_classes=3)
est.fit(input_fn=iris_input_fn, steps=100)
est.evaluate(input_fn=iris_input_fn, steps=1, name='eval')
predict_input_fn = functools.partial(iris_input_fn, num_epochs=1)
predictions = list(est.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertEqual(len(predictions), iris.target.shape[0])
def _runIrisAll(self, est):
iris = tf.contrib.learn.datasets.load_iris()
est.fit(iris.data, iris.target, steps=100)
scores = est.evaluate(x=iris.data, y=iris.target, name='eval')
predictions = est.predict(x=iris.data)
predictions_proba = est.predict_proba(x=iris.data)
self.assertEqual(predictions.shape[0], iris.target.shape[0])
self.assertAllEqual(predictions, np.argmax(predictions_proba, axis=1))
other_score = _sklearn.accuracy_score(iris.target, predictions)
self.assertAllClose(other_score, scores['accuracy'])
def _get_default_signature(self, export_meta_filename):
"""Gets the default signature from the export.meta file."""
with tf.Session():
save = tf.train.import_meta_graph(export_meta_filename)
meta_graph_def = save.export_meta_graph()
collection_def = meta_graph_def.collection_def
signatures_any = collection_def['serving_signatures'].any_list.value
self.assertEquals(len(signatures_any), 1)
signatures = manifest_pb2.Signatures()
signatures_any[0].Unpack(signatures)
default_signature = signatures.default_signature
return default_signature
# Disable this test case until b/31032996 is fixed.
def _testExportMonitorRegressionSignature(self):
iris = tf.contrib.learn.datasets.load_iris()
est = tf.contrib.learn.Classifier(model_fn=logistic_model_fn, n_classes=3)
export_dir = tempfile.mkdtemp() + 'export/'
export_monitor = learn.monitors.ExportMonitor(
every_n_steps=1,
export_dir=export_dir,
exports_to_keep=1,
signature_fn=tf.contrib.learn.classifier.classification_signature_fn)
est.fit(iris.data, iris.target, steps=2, monitors=[export_monitor])
self.assertTrue(tf.gfile.Exists(export_dir))
self.assertFalse(tf.gfile.Exists(export_dir + '00000000/export'))
self.assertTrue(tf.gfile.Exists(export_dir + '00000002/export'))
# Validate the signature
signature = self._get_default_signature(export_dir + '00000002/export.meta')
self.assertTrue(signature.HasField('classification_signature'))
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
pytorch/fairseq | fairseq/distributed/module_proxy_wrapper.py | 1 | 1965 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from torch import nn
class ModuleProxyWrapper(nn.Module):
"""
Wrap a DistributedDataParallel module and forward requests for missing
attributes to the module wrapped by DDP (the twice-wrapped module).
Also forward calls to :func:`state_dict` and :func:`load_state_dict`.
Usage::
module.xyz = "hello world"
wrapped_module = DistributedDataParallel(module, **ddp_args)
wrapped_module = ModuleProxyWrapper(wrapped_module)
assert wrapped_module.xyz == "hello world"
assert wrapped_module.state_dict().keys() == module.state_dict().keys()
Args:
module (nn.Module): module to wrap
"""
def __init__(self, module: nn.Module):
super().__init__()
assert hasattr(
module, "module"
), "ModuleProxyWrapper expects input to wrap another module"
self.module = module
def __getattr__(self, name):
"""Forward missing attributes to twice-wrapped module."""
try:
# defer to nn.Module's logic
return super().__getattr__(name)
except AttributeError:
try:
# forward to the once-wrapped module
return getattr(self.module, name)
except AttributeError:
# forward to the twice-wrapped module
return getattr(self.module.module, name)
def state_dict(self, *args, **kwargs):
"""Forward to the twice-wrapped module."""
return self.module.module.state_dict(*args, **kwargs)
def load_state_dict(self, *args, **kwargs):
"""Forward to the twice-wrapped module."""
return self.module.module.load_state_dict(*args, **kwargs)
def forward(self, *args, **kwargs):
return self.module(*args, **kwargs)
| mit |
GoogleCloudPlatform/python-docs-samples | automl/beta/list_datasets.py | 2 | 2085 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START automl_video_classification_list_datasets_beta]
# [START automl_video_object_tracking_list_datasets_beta]
from google.cloud import automl_v1beta1 as automl
def list_datasets(project_id="YOUR_PROJECT_ID"):
"""List datasets."""
client = automl.AutoMlClient()
# A resource that represents Google Cloud Platform location.
project_location = f"projects/{project_id}/locations/us-central1"
# List all the datasets available in the region.
request = automl.ListDatasetsRequest(parent=project_location, filter="")
response = client.list_datasets(request=request)
print("List of datasets:")
for dataset in response:
print("Dataset name: {}".format(dataset.name))
print("Dataset id: {}".format(dataset.name.split("/")[-1]))
print("Dataset display name: {}".format(dataset.display_name))
print("Dataset create time: {}".format(dataset.create_time))
# [END automl_video_object_tracking_list_datasets_beta]
print(
"Video classification dataset metadata: {}".format(
dataset.video_classification_dataset_metadata
)
)
# [END automl_video_classification_list_datasets_beta]
# [START automl_video_object_tracking_list_datasets_beta]
print(
"Video object tracking dataset metadata: {}".format(
dataset.video_object_tracking_dataset_metadata
)
)
# [END automl_video_object_tracking_list_datasets_beta]
| apache-2.0 |
haramoz/RND-ss14 | best_hyper_parameter_estimation.py | 1 | 4781 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
import glob
import csv
import os
from sklearn.svm import SVC
from sklearn.svm import SVR
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.grid_search import GridSearchCV
import math
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
##############################################################################
# Load my experiment data
def loadExperimentData():
path = "./tuft_real_data/25June/extractedFeatures/"
list_of_data_files = glob.glob(path+'data/*.csv')
list_of_data_files = sorted(list_of_data_files)
flagInitial = True
for file_name in list_of_data_files:
featureFileName = os.path.splitext(file_name)[0].split("/")[-1]
#print featureFileName
data = np.loadtxt(fname=file_name,delimiter=',')
if flagInitial:
flagInitial = False
trainData = data
else:
trainData = np.vstack((trainData,data))
#For reading the labels
list_of_label_files = glob.glob(path+'labels/*.csv')
list_of_label_files = sorted(list_of_label_files)
flagInitial = True
for file_name in list_of_label_files:
featureFileName = os.path.splitext(file_name)[0].split("/")[-1]
#print featureFileName
labels = np.loadtxt(fname=file_name,delimiter=',')
if flagInitial:
flagInitial = False
trainLabel = labels
else:
trainLabel = np.concatenate((trainLabel,labels),axis=0)
return trainData,trainLabel
##############################################################################
# Load and prepare data set
#
# dataset for grid search
traindata,trainlabel = loadExperimentData()
X = traindata
y = trainlabel
# Dataset for decision function visualization: we only keep the first two
# features in X and sub-sample the dataset to keep only 2 class to has
# to make it a binary classification problem.
'''X_2d = X[:, :2]
X_2d = X_2d[y > 0]
y_2d = y[y > 0]
y_2d -= 1'''
# It is usually a good idea to scale the data for SVM training.
# We are cheating a bit in this example in scaling all of the data,
# instead of fitting the transformation on the training set and
# just applying it on the test set.
print 1
scaler = StandardScaler()
X = scaler.fit_transform(X)
#X_2d = scaler.fit_transform(X_2d)
##############################################################################
# Train classifiers
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
C_range = np.logspace(-4, 3, 50)
gamma_range = np.logspace(-4, 3, 50)
print C_range,gamma_range
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedShuffleSplit(y, n_iter=1, test_size=0.5, random_state=42)
grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
grid.fit(X, y)
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
# plot the scores of the grid
# grid_scores_ contains parameter settings and scores
# We extract just the scores
scores = [100*(1 - x[1]) for x in grid.grid_scores_]
scores = np.array(scores).reshape(len(C_range), len(gamma_range))
# Draw heatmap of the validation accuracy as a function of gamma and C
#
# The score are encoded as colors with the hot colormap which varies from dark
# red to bright yellow. As the most interesting scores are all located in the
# 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so
# as to make it easier to visualize the small variations of score values in the
# interesting range while not brutally collapsing all the low score values to
# the same color.
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
#plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,norm=MidpointNormalize(vmin=0.2, midpoint=0.92))
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,norm=MidpointNormalize(vmin=2.0, midpoint=12.0))
counter = 0
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), np.round(gamma_range,4), rotation=90)
plt.yticks(np.arange(len(C_range)), np.round(C_range,4))
plt.title('Misclassification %')
plt.show() | gpl-2.0 |
cxhernandez/msmbuilder | setup.py | 1 | 7479 | """MSMBuilder: Statistical models for Biomolecular Dynamics
"""
from __future__ import print_function, absolute_import
DOCLINES = __doc__.split("\n")
import sys
import traceback
import numpy as np
from os.path import join as pjoin
from setuptools import setup, Extension, find_packages
try:
sys.dont_write_bytecode = True
sys.path.insert(0, '.')
from basesetup import write_version_py, CompilerDetection, \
check_dependencies
finally:
sys.dont_write_bytecode = False
try:
import mdtraj
mdtraj_capi = mdtraj.capi()
except (ImportError, AttributeError):
print('=' * 80)
print('MDTraj version 1.1.X or later is required')
print('=' * 80)
traceback.print_exc()
sys.exit(1)
if '--debug' in sys.argv:
sys.argv.remove('--debug')
DEBUG = True
else:
DEBUG = False
if '--disable-openmp' in sys.argv:
sys.argv.remove('--disable-openmp')
DISABLE_OPENMP = True
else:
DISABLE_OPENMP = False
try:
import Cython
from Cython.Distutils import build_ext
if Cython.__version__ < '0.18':
raise ImportError()
except ImportError:
print(
'Cython version 0.18 or later is required. Try "conda install cython"')
sys.exit(1)
# #########################
VERSION = '3.6.0.dev0'
ISRELEASED = False
__version__ = VERSION
# #########################
CLASSIFIERS = """\
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)
Programming Language :: C++
Programming Language :: Python
Development Status :: 5 - Production/Stable
Topic :: Software Development
Topic :: Scientific/Engineering
Operating System :: POSIX
Operating System :: Unix
Operating System :: MacOS
Programming Language :: Python :: 2
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.4
Programming Language :: Python :: 3.5
"""
if any(cmd in sys.argv for cmd in ('install', 'build', 'develop')):
check_dependencies((
('numpy',),
('scipy',),
('pandas',),
('six',),
('mdtraj',),
('sklearn', 'scikit-learn'),
('numpydoc',),
('tables', 'pytables'),
))
# Where to find extensions
MSMDIR = 'msmbuilder/msm/'
HMMDIR = 'msmbuilder/hmm/'
CLUSTERDIR = 'msmbuilder/cluster/'
compiler = CompilerDetection(DISABLE_OPENMP)
with open('msmbuilder/src/config.pxi', 'w') as f:
f.write('''
DEF DEBUG = {debug}
DEF OPENMP = {openmp}
'''.format(openmp=compiler.openmp_enabled, debug=DEBUG))
extensions = []
extensions.append(
Extension('msmbuilder.example_datasets._muller',
sources=[pjoin('msmbuilder', 'example_datasets', '_muller.pyx')],
include_dirs=[np.get_include()]))
extensions.append(
Extension('msmbuilder.msm._markovstatemodel',
sources=[pjoin(MSMDIR, '_markovstatemodel.pyx'),
pjoin(MSMDIR, 'src/transmat_mle_prinz.c')],
include_dirs=[pjoin(MSMDIR, 'src'), np.get_include()]))
extensions.append(
Extension('msmbuilder.tests.test_cyblas',
sources=['msmbuilder/tests/test_cyblas.pyx'],
include_dirs=['msmbuilder/src', np.get_include()]))
extensions.append(
Extension('msmbuilder.msm._ratematrix',
sources=[pjoin(MSMDIR, '_ratematrix.pyx')],
language='c++',
extra_compile_args=compiler.compiler_args_openmp,
libraries=compiler.compiler_libraries_openmp,
include_dirs=['msmbuilder/src', np.get_include()]))
extensions.append(
Extension('msmbuilder.decomposition._speigh',
sources=[pjoin('msmbuilder', 'decomposition', '_speigh.pyx')],
language='c++',
extra_compile_args=compiler.compiler_args_openmp,
libraries=compiler.compiler_libraries_openmp,
include_dirs=['msmbuilder/src', np.get_include()]))
extensions.append(
Extension('msmbuilder.msm._metzner_mcmc_fast',
sources=[pjoin(MSMDIR, '_metzner_mcmc_fast.pyx'),
pjoin(MSMDIR, 'src/metzner_mcmc.c')],
libraries=compiler.compiler_libraries_openmp,
extra_compile_args=compiler.compiler_args_openmp,
include_dirs=[pjoin(MSMDIR, 'src'), np.get_include()]))
extensions.append(
Extension('msmbuilder.libdistance',
language='c++',
sources=['msmbuilder/libdistance/libdistance.pyx'],
# msvc needs to be told "libtheobald", gcc wants just "theobald"
libraries=['%stheobald' % ('lib' if compiler.msvc else '')],
include_dirs=["msmbuilder/libdistance/src",
mdtraj_capi['include_dir'], np.get_include()],
library_dirs=[mdtraj_capi['lib_dir']],
))
extensions.append(
Extension('msmbuilder.cluster._kmedoids',
language='c++',
sources=[pjoin(CLUSTERDIR, '_kmedoids.pyx'),
pjoin(CLUSTERDIR, 'src', 'kmedoids.cc')],
include_dirs=[np.get_include()]))
# To get debug symbols on Windows, use
# extra_link_args=['/DEBUG']
# extra_compile_args=['/Zi']
extensions.append(
Extension('msmbuilder.hmm.gaussian',
language='c++',
sources=[pjoin(HMMDIR, 'gaussian.pyx'),
pjoin(HMMDIR, 'src/GaussianHMMFitter.cpp')],
libraries=compiler.compiler_libraries_openmp,
extra_compile_args=compiler.compiler_args_sse3
+ compiler.compiler_args_openmp,
include_dirs=[np.get_include(),
HMMDIR,
pjoin(HMMDIR, 'src/include/'),
pjoin(HMMDIR, 'src/')]))
extensions.append(
Extension('msmbuilder.hmm.vonmises',
language='c++',
sources=[pjoin(HMMDIR, 'vonmises.pyx'),
pjoin(HMMDIR, 'src/VonMisesHMMFitter.cpp'),
pjoin(HMMDIR, 'cephes/i0.c'),
pjoin(HMMDIR, 'cephes/chbevl.c')],
libraries=compiler.compiler_libraries_openmp,
extra_compile_args=compiler.compiler_args_sse3
+ compiler.compiler_args_openmp,
include_dirs=[np.get_include(),
HMMDIR,
pjoin(HMMDIR, 'src/include/'),
pjoin(HMMDIR, 'src/'),
pjoin(HMMDIR, 'cephes/')]))
write_version_py(VERSION, ISRELEASED, filename='msmbuilder/version.py')
setup(name='msmbuilder',
author='Robert McGibbon',
author_email='[email protected]',
description=DOCLINES[0],
long_description="\n".join(DOCLINES[2:]),
version=__version__,
url='https://github.com/msmbuilder/msmbuilder',
platforms=['Linux', 'Mac OS-X', 'Unix'],
classifiers=CLASSIFIERS.splitlines(),
packages=find_packages(),
package_data={
'msmbuilder.tests': ['workflows/*'],
'msmbuilder': ['project_templates/*.*',
'project_templates/*/*',
'io_templates/*',
],
},
entry_points={'console_scripts':
['msmb = msmbuilder.scripts.msmb:main']},
zip_safe=False,
ext_modules=extensions,
cmdclass={'build_ext': build_ext})
| lgpl-2.1 |
matthewzimmer/carnd-behavioral-cloning | basic.py | 1 | 3875 | import os
import csv
import shutil
import pandas as pd
import tensorflow as tf
import numpy as np
from keras.applications import VGG16
from keras.layers import Dense, Flatten, Dropout, ELU
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import np_utils
import math
import cv2
import pickle
from scipy import misc, random
from sklearn.model_selection import train_test_split
from training import TrainTrackA, CommaAI, SimpleConvnet, Nvidia, Udacity, Basic
from zimpy.camera_preprocessor import preprocess_image, predict_images
from zimpy.generators.csv_image_provider import batch_generator, load_image
from zimpy.serializers.trained_data_serializer import TrainedDataSerializer
flags = tf.app.flags
FLAGS = flags.FLAGS
# command line flags
flags.DEFINE_integer('epochs', 2, "The number of epochs.")
flags.DEFINE_integer('batch_size', 32, "The batch size.")
flags.DEFINE_integer('samples_per_epoch', None, "The number of samples per epoch during training.")
flags.DEFINE_boolean('use_weights', False, "Whether to use prior trained weights.")
flags.DEFINE_float('lr', 0.0001, "Optimizer learning rate.")
train_samples_seen = []
X_train, y_train, X_val, y_val = None, None, None, None
img_rows, img_cols = None, None
def move_training_images(classifier):
drive_log_path = './driving_log.csv'
img_path = './IMG'
shutil.move(drive_log_path, drive_log_path + '_' + classifier.uuid)
shutil.move(img_path, img_path + '_' + classifier.uuid)
# os.remove(drive_log_path)
def load_track_csv():
X_train, y_train = [], []
# Only look at latest driving_log.csv
drive_log_path = './driving_log.csv'
if os.path.isfile(drive_log_path):
df = pd.read_csv(drive_log_path)
headers = list(df.columns.values)
print(headers)
for index, row in df.iterrows():
c = row['center'].strip()
l = row['left'].strip()
r = row['right'].strip()
a = float(row['steering'])
if os.path.isfile(c):
# casts absolute path to relative to remain env agnostic
l, c, r = [('IMG/' + os.path.split(file_path)[1]) for file_path in (l, c, r)]
# single string in memory
x = '{}:{}:{}'.format(l, c, r)
X_train.append(x)
y_train.append(a)
# Split some of the training data into a validation dataset
X_train, X_val, y_train, y_val = train_test_split(
X_train,
y_train,
test_size=0.15,
random_state=0)
X_train, y_train, X_val, y_val = np.array(X_train), np.array(y_train, dtype=np.float32), np.array(X_val), np.array(
y_val, dtype=np.float32)
return X_train, y_train, X_val, y_val
def main(_):
output_shape = (40, 80, 3)
X_train, y_train, X_val, y_val = load_track_csv()
print('population: ', len(X_train))
# train model
clf = Basic()
model = clf.get_model(input_shape=output_shape, output_shape=output_shape, use_weights=FLAGS.use_weights,
learning_rate=FLAGS.lr)
samples_per_epoch = len(X_train)
if FLAGS.samples_per_epoch is not None:
print('overriding samples per epoch from {} to {}'.format(samples_per_epoch, FLAGS.samples_per_epoch))
samples_per_epoch = FLAGS.samples_per_epoch
history = model.fit_generator(
batch_generator(X=X_train, Y=y_train, label='train set', num_epochs=FLAGS.epochs, flip_images=True,
batch_size=FLAGS.batch_size,
output_shape=output_shape),
nb_epoch=FLAGS.epochs,
samples_per_epoch=samples_per_epoch,
validation_data=None,
verbose=2)
print(history.history)
clf.save()
# move_training_images(clf)
# parses flags and calls the `main` function above
if __name__ == '__main__':
tf.app.run()
| mit |
kirangonella/BuildingMachineLearningSystemsWithPython | ch02/stump.py | 24 | 1604 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
from sklearn.datasets import load_iris
data = load_iris()
features = data.data
labels = data.target_names[data.target]
is_setosa = (labels == 'setosa')
features = features[~is_setosa]
labels = labels[~is_setosa]
is_virginica = (labels == 'virginica')
# Initialize to a value that is worse than any possible test
best_acc = -1.0
# Loop over all the features
for fi in range(features.shape[1]):
# Test every possible threshold value for feature fi
thresh = features[:, fi].copy()
# Test them in order
thresh.sort()
for t in thresh:
# Generate predictions using t as a threshold
pred = (features[:, fi] > t)
# Accuracy is the fraction of predictions that match reality
acc = (pred == is_virginica).mean()
# We test whether negating the test is a better threshold:
acc_neg = ((~pred) == is_virginica).mean()
if acc_neg > acc:
acc = acc_neg
negated = True
else:
negated = False
# If this is better than previous best, then this is now the new best:
if acc > best_acc:
best_acc = acc
best_fi = fi
best_t = t
best_is_negated = negated
print('Best threshold is {0} on feature {1} (index {2}), which achieves accuracy of {3:.1%}.'.format(
best_t, data.feature_names[best_fi], best_fi, best_acc))
| mit |
anntzer/scikit-learn | sklearn/datasets/tests/test_lfw.py | 8 | 7493 | """This test for the LFW require medium-size data downloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
import pytest
from functools import partial
from sklearn.datasets import fetch_lfw_pairs
from sklearn.datasets import fetch_lfw_people
from sklearn.utils._testing import assert_array_equal
from sklearn.datasets.tests.test_common import check_return_X_y
SCIKIT_LEARN_DATA = None
SCIKIT_LEARN_EMPTY_DATA = None
LFW_HOME = None
FAKE_NAMES = [
"Abdelatif_Smith",
"Abhati_Kepler",
"Camara_Alvaro",
"Chen_Dupont",
"John_Lee",
"Lin_Bauman",
"Onur_Lopez",
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
Image = pytest.importorskip("PIL.Image")
global SCIKIT_LEARN_DATA, SCIKIT_LEARN_EMPTY_DATA, LFW_HOME
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, "lfw_home")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, "lfw_funneled", name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + "_%04d.jpg" % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
img = Image.fromarray(uniface.astype(np.uint8))
img.save(file_path)
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, "lfw_funneled", ".test.swp"), "wb") as f:
f.write(b"Text file to be ignored by the dataset loader.")
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, "pairsDevTrain.txt"), "wb") as f:
f.write(b"10\n")
more_than_two = [name for name, count in counts.items() if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(("%s\t%d\t%d\n" % (name, first, second)).encode())
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(
(
"%s\t%d\t%s\t%d\n"
% (first_name, first_index, second_name, second_index)
).encode()
)
with open(os.path.join(LFW_HOME, "pairsDevTest.txt"), "wb") as f:
f.write(b"Fake place holder that won't be tested")
with open(os.path.join(LFW_HOME, "pairs.txt"), "wb") as f:
f.write(b"Fake place holder that won't be tested")
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
def test_load_empty_lfw_people():
with pytest.raises(IOError):
fetch_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_fake_lfw_people():
lfw_people = fetch_lfw_people(
data_home=SCIKIT_LEARN_DATA, min_faces_per_person=3, download_if_missing=False
)
# The data is croped around the center as a rectangular bounding box
# around the face. Colors are converted to gray levels:
assert lfw_people.images.shape == (10, 62, 47)
assert lfw_people.data.shape == (10, 2914)
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ["Abdelatif Smith", "Abhati Kepler", "Onur Lopez"]
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = fetch_lfw_people(
data_home=SCIKIT_LEARN_DATA,
resize=None,
slice_=None,
color=True,
download_if_missing=False,
)
assert lfw_people.images.shape == (17, 250, 250, 3)
assert lfw_people.DESCR.startswith(".. _labeled_faces_in_the_wild_dataset:")
# the ids and class names are the same as previously
assert_array_equal(
lfw_people.target, [0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2]
)
assert_array_equal(
lfw_people.target_names,
[
"Abdelatif Smith",
"Abhati Kepler",
"Camara Alvaro",
"Chen Dupont",
"John Lee",
"Lin Bauman",
"Onur Lopez",
],
)
# test return_X_y option
fetch_func = partial(
fetch_lfw_people,
data_home=SCIKIT_LEARN_DATA,
resize=None,
slice_=None,
color=True,
download_if_missing=False,
)
check_return_X_y(lfw_people, fetch_func)
def test_load_fake_lfw_people_too_restrictive():
with pytest.raises(ValueError):
fetch_lfw_people(
data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=100,
download_if_missing=False,
)
def test_load_empty_lfw_pairs():
with pytest.raises(IOError):
fetch_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_fake_lfw_pairs():
lfw_pairs_train = fetch_lfw_pairs(
data_home=SCIKIT_LEARN_DATA, download_if_missing=False
)
# The data is croped around the center as a rectangular bounding box
# around the face. Colors are converted to gray levels:
assert lfw_pairs_train.pairs.shape == (10, 2, 62, 47)
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ["Different persons", "Same person"]
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = fetch_lfw_pairs(
data_home=SCIKIT_LEARN_DATA,
resize=None,
slice_=None,
color=True,
download_if_missing=False,
)
assert lfw_pairs_train.pairs.shape == (10, 2, 250, 250, 3)
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
assert lfw_pairs_train.DESCR.startswith(".. _labeled_faces_in_the_wild_dataset:")
| bsd-3-clause |
anntzer/scikit-learn | examples/feature_selection/plot_feature_selection_pipeline.py | 8 | 2757 | """
==================
Pipeline ANOVA SVM
==================
This example shows how a feature selection can be easily integrated within
a machine learning pipeline.
We also show that you can easily inspect part of the pipeline.
"""
# %%
# We will start by generating a binary classification dataset. Subsequently, we
# will divide the dataset into two subsets.
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
X, y = make_classification(
n_features=20,
n_informative=3,
n_redundant=0,
n_classes=2,
n_clusters_per_class=2,
random_state=42,
)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
# %%
# A common mistake done with feature selection is to search a subset of
# discriminative features on the full dataset, instead of only using the
# training set. The usage of scikit-learn :func:`~sklearn.pipeline.Pipeline`
# prevents to make such mistake.
#
# Here, we will demonstrate how to build a pipeline where the first step will
# be the feature selection.
#
# When calling `fit` on the training data, a subset of feature will be selected
# and the index of these selected features will be stored. The feature selector
# will subsequently reduce the number of features, and pass this subset to the
# classifier which will be trained.
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.pipeline import make_pipeline
from sklearn.svm import LinearSVC
anova_filter = SelectKBest(f_classif, k=3)
clf = LinearSVC()
anova_svm = make_pipeline(anova_filter, clf)
anova_svm.fit(X_train, y_train)
# %%
# Once the training is complete, we can predict on new unseen samples. In this
# case, the feature selector will only select the most discriminative features
# based on the information stored during training. Then, the data will be
# passed to the classifier which will make the prediction.
#
# Here, we show the final metrics via a classification report.
from sklearn.metrics import classification_report
y_pred = anova_svm.predict(X_test)
print(classification_report(y_test, y_pred))
# %%
# Be aware that you can inspect a step in the pipeline. For instance, we might
# be interested about the parameters of the classifier. Since we selected
# three features, we expect to have three coefficients.
anova_svm[-1].coef_
# %%
# However, we do not know which features were selected from the original
# dataset. We could proceed by several manners. Here, we will invert the
# transformation of these coefficients to get information about the original
# space.
anova_svm[:-1].inverse_transform(anova_svm[-1].coef_)
# %%
# We can see that the features with non-zero coefficients are the selected
# features by the first step.
| bsd-3-clause |
anntzer/scikit-learn | sklearn/tests/test_naive_bayes.py | 8 | 34518 | import re
import numpy as np
import scipy.sparse
import pytest
import warnings
from scipy.special import logsumexp
from sklearn.datasets import load_digits, load_iris
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_allclose
from sklearn.naive_bayes import GaussianNB, BernoulliNB
from sklearn.naive_bayes import MultinomialNB, ComplementNB
from sklearn.naive_bayes import CategoricalNB
DISCRETE_NAIVE_BAYES_CLASSES = [BernoulliNB, CategoricalNB, ComplementNB, MultinomialNB]
ALL_NAIVE_BAYES_CLASSES = DISCRETE_NAIVE_BAYES_CLASSES + [GaussianNB]
msg = "The default value for `force_alpha` will change"
pytestmark = pytest.mark.filterwarnings(f"ignore:{msg}:FutureWarning")
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
# Gaussian Naive Bayes classification.
# This checks that GaussianNB implements fit and predict and returns
# correct values for a simple toy dataset.
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Test whether label mismatch between target y and classes raises
# an Error
# FIXME Remove this test once the more general partial_fit tests are merged
with pytest.raises(
ValueError, match="The target label.* in y do not exist in the initial classes"
):
GaussianNB().partial_fit(X, y, classes=[0, 1])
def test_gnb_prior():
# Test whether class priors are properly set.
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0, clf.class_prior_, 8)
clf = GaussianNB().fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_gnb_sample_weight():
"""Test whether sample weights are properly used in GNB."""
# Sample weights all being 1 should not change results
sw = np.ones(6)
clf = GaussianNB().fit(X, y)
clf_sw = GaussianNB().fit(X, y, sw)
assert_array_almost_equal(clf.theta_, clf_sw.theta_)
assert_array_almost_equal(clf.var_, clf_sw.var_)
# Fitting twice with half sample-weights should result
# in same result as fitting once with full weights
sw = rng.rand(y.shape[0])
clf1 = GaussianNB().fit(X, y, sample_weight=sw)
clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2)
clf2.partial_fit(X, y, sample_weight=sw / 2)
assert_array_almost_equal(clf1.theta_, clf2.theta_)
assert_array_almost_equal(clf1.var_, clf2.var_)
# Check that duplicate entries and correspondingly increased sample
# weights yield the same result
ind = rng.randint(0, X.shape[0], 20)
sample_weight = np.bincount(ind, minlength=X.shape[0])
clf_dupl = GaussianNB().fit(X[ind], y[ind])
clf_sw = GaussianNB().fit(X, y, sample_weight)
assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_)
assert_array_almost_equal(clf_dupl.var_, clf_sw.var_)
def test_gnb_neg_priors():
"""Test whether an error is raised in case of negative priors"""
clf = GaussianNB(priors=np.array([-1.0, 2.0]))
msg = "Priors must be non-negative"
with pytest.raises(ValueError, match=msg):
clf.fit(X, y)
def test_gnb_priors():
"""Test whether the class prior override is properly used"""
clf = GaussianNB(priors=np.array([0.3, 0.7])).fit(X, y)
assert_array_almost_equal(
clf.predict_proba([[-0.1, -0.1]]),
np.array([[0.825303662161683, 0.174696337838317]]),
8,
)
assert_array_almost_equal(clf.class_prior_, np.array([0.3, 0.7]))
def test_gnb_priors_sum_isclose():
# test whether the class prior sum is properly tested"""
X = np.array(
[
[-1, -1],
[-2, -1],
[-3, -2],
[-4, -5],
[-5, -4],
[1, 1],
[2, 1],
[3, 2],
[4, 4],
[5, 5],
]
)
priors = np.array([0.08, 0.14, 0.03, 0.16, 0.11, 0.16, 0.07, 0.14, 0.11, 0.0])
Y = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
clf = GaussianNB(priors=priors)
# smoke test for issue #9633
clf.fit(X, Y)
def test_gnb_wrong_nb_priors():
"""Test whether an error is raised if the number of prior is different
from the number of class"""
clf = GaussianNB(priors=np.array([0.25, 0.25, 0.25, 0.25]))
msg = "Number of priors must match number of classes"
with pytest.raises(ValueError, match=msg):
clf.fit(X, y)
def test_gnb_prior_greater_one():
"""Test if an error is raised if the sum of prior greater than one"""
clf = GaussianNB(priors=np.array([2.0, 1.0]))
msg = "The sum of the priors should be 1"
with pytest.raises(ValueError, match=msg):
clf.fit(X, y)
def test_gnb_prior_large_bias():
"""Test if good prediction when class prior favor largely one class"""
clf = GaussianNB(priors=np.array([0.01, 0.99]))
clf.fit(X, y)
assert clf.predict([[-0.1, -0.1]]) == np.array([2])
def test_gnb_check_update_with_no_data():
"""Test when the partial fit is called without any data"""
# Create an empty array
prev_points = 100
mean = 0.0
var = 1.0
x_empty = np.empty((0, X.shape[1]))
tmean, tvar = GaussianNB._update_mean_variance(prev_points, mean, var, x_empty)
assert tmean == mean
assert tvar == var
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.var_, clf_pf.var_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.var_, clf_pf2.var_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_gnb_naive_bayes_scale_invariance():
# Scaling the data should not change the prediction results
iris = load_iris()
X, y = iris.data, iris.target
labels = [GaussianNB().fit(f * X, y).predict(f * X) for f in [1e-10, 1, 1e10]]
assert_array_equal(labels[0], labels[1])
assert_array_equal(labels[1], labels[2])
@pytest.mark.parametrize("DiscreteNaiveBayes", DISCRETE_NAIVE_BAYES_CLASSES)
def test_discretenb_prior(DiscreteNaiveBayes):
# Test whether class priors are properly set.
clf = DiscreteNaiveBayes().fit(X2, y2)
assert_array_almost_equal(
np.log(np.array([2, 2, 2]) / 6.0), clf.class_log_prior_, 8
)
@pytest.mark.parametrize("DiscreteNaiveBayes", DISCRETE_NAIVE_BAYES_CLASSES)
def test_discretenb_partial_fit(DiscreteNaiveBayes):
clf1 = DiscreteNaiveBayes()
clf1.fit([[0, 1], [1, 0], [1, 1]], [0, 1, 1])
clf2 = DiscreteNaiveBayes()
clf2.partial_fit([[0, 1], [1, 0], [1, 1]], [0, 1, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
if DiscreteNaiveBayes is CategoricalNB:
for i in range(len(clf1.category_count_)):
assert_array_equal(clf1.category_count_[i], clf2.category_count_[i])
else:
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = DiscreteNaiveBayes()
# all categories have to appear in the first partial fit
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
clf3.partial_fit([[1, 1]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
if DiscreteNaiveBayes is CategoricalNB:
# the categories for each feature of CategoricalNB are mapped to an
# index chronologically with each call of partial fit and therefore
# the category_count matrices cannot be compared for equality
for i in range(len(clf1.category_count_)):
assert_array_equal(
clf1.category_count_[i].shape, clf3.category_count_[i].shape
)
assert_array_equal(
np.sum(clf1.category_count_[i], axis=1),
np.sum(clf3.category_count_[i], axis=1),
)
# assert category 0 occurs 1x in the first class and 0x in the 2nd
# class
assert_array_equal(clf1.category_count_[0][0], np.array([1, 0]))
# assert category 1 occurs 0x in the first class and 2x in the 2nd
# class
assert_array_equal(clf1.category_count_[0][1], np.array([0, 2]))
# assert category 0 occurs 0x in the first class and 1x in the 2nd
# class
assert_array_equal(clf1.category_count_[1][0], np.array([0, 1]))
# assert category 1 occurs 1x in the first class and 1x in the 2nd
# class
assert_array_equal(clf1.category_count_[1][1], np.array([1, 1]))
else:
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
@pytest.mark.parametrize("NaiveBayes", ALL_NAIVE_BAYES_CLASSES)
def test_NB_partial_fit_no_first_classes(NaiveBayes):
# classes is required for first call to partial fit
with pytest.raises(
ValueError, match="classes must be passed on the first call to partial_fit."
):
NaiveBayes().partial_fit(X2, y2)
# check consistency of consecutive classes values
clf = NaiveBayes()
clf.partial_fit(X2, y2, classes=np.unique(y2))
with pytest.raises(
ValueError, match="is not the same as on last call to partial_fit"
):
clf.partial_fit(X2, y2, classes=np.arange(42))
def test_discretenb_predict_proba():
# Test discrete NB classes' probability scores
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for DiscreteNaiveBayes, X in zip(
[BernoulliNB, MultinomialNB], [X_bernoulli, X_multinomial]
):
clf = DiscreteNaiveBayes().fit(X, y)
assert clf.predict(X[-1:]) == 2
assert clf.predict_proba([X[0]]).shape == (1, 2)
assert_array_almost_equal(
clf.predict_proba(X[:2]).sum(axis=1), np.array([1.0, 1.0]), 6
)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for DiscreteNaiveBayes, X in zip(
[BernoulliNB, MultinomialNB], [X_bernoulli, X_multinomial]
):
clf = DiscreteNaiveBayes().fit(X, y)
assert clf.predict_proba(X[0:1]).shape == (1, 3)
assert clf.predict_proba(X[:2]).shape == (2, 3)
assert_almost_equal(np.sum(clf.predict_proba([X[1]])), 1)
assert_almost_equal(np.sum(clf.predict_proba([X[-1]])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
@pytest.mark.parametrize("DiscreteNaiveBayes", DISCRETE_NAIVE_BAYES_CLASSES)
def test_discretenb_uniform_prior(DiscreteNaiveBayes):
# Test whether discrete NB classes fit a uniform prior
# when fit_prior=False and class_prior=None
clf = DiscreteNaiveBayes()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_almost_equal(prior, np.array([0.5, 0.5]))
@pytest.mark.parametrize("DiscreteNaiveBayes", DISCRETE_NAIVE_BAYES_CLASSES)
def test_discretenb_provide_prior(DiscreteNaiveBayes):
# Test whether discrete NB classes use provided prior
clf = DiscreteNaiveBayes(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_almost_equal(prior, np.array([0.5, 0.5]))
# Inconsistent number of classes with prior
msg = "Number of priors must match number of classes"
with pytest.raises(ValueError, match=msg):
clf.fit([[0], [1], [2]], [0, 1, 2])
msg = "is not the same as on last call to partial_fit"
with pytest.raises(ValueError, match=msg):
clf.partial_fit([[0], [1]], [0, 1], classes=[0, 1, 1])
@pytest.mark.parametrize("DiscreteNaiveBayes", DISCRETE_NAIVE_BAYES_CLASSES)
def test_discretenb_provide_prior_with_partial_fit(DiscreteNaiveBayes):
# Test whether discrete NB classes use provided prior
# when using partial_fit
iris = load_iris()
iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split(
iris.data, iris.target, test_size=0.4, random_state=415
)
for prior in [None, [0.3, 0.3, 0.4]]:
clf_full = DiscreteNaiveBayes(class_prior=prior)
clf_full.fit(iris.data, iris.target)
clf_partial = DiscreteNaiveBayes(class_prior=prior)
clf_partial.partial_fit(iris_data1, iris_target1, classes=[0, 1, 2])
clf_partial.partial_fit(iris_data2, iris_target2)
assert_array_almost_equal(
clf_full.class_log_prior_, clf_partial.class_log_prior_
)
@pytest.mark.parametrize("DiscreteNaiveBayes", DISCRETE_NAIVE_BAYES_CLASSES)
def test_discretenb_sample_weight_multiclass(DiscreteNaiveBayes):
# check shape consistency for number of samples at fit time
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float64)
sample_weight /= sample_weight.sum()
clf = DiscreteNaiveBayes().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = DiscreteNaiveBayes()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2], sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
@pytest.mark.parametrize("DiscreteNaiveBayes", DISCRETE_NAIVE_BAYES_CLASSES)
@pytest.mark.parametrize("use_partial_fit", [False, True])
@pytest.mark.parametrize("train_on_single_class_y", [False, True])
def test_discretenb_degenerate_one_class_case(
DiscreteNaiveBayes,
use_partial_fit,
train_on_single_class_y,
):
# Most array attributes of a discrete naive Bayes classifier should have a
# first-axis length equal to the number of classes. Exceptions include:
# ComplementNB.feature_all_, CategoricalNB.n_categories_.
# Confirm that this is the case for binary problems and the degenerate
# case of a single class in the training set, when fitting with `fit` or
# `partial_fit`.
# Non-regression test for handling degenerate one-class case:
# https://github.com/scikit-learn/scikit-learn/issues/18974
X = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
y = [1, 1, 2]
if train_on_single_class_y:
X = X[:-1]
y = y[:-1]
classes = sorted(list(set(y)))
num_classes = len(classes)
clf = DiscreteNaiveBayes()
if use_partial_fit:
clf.partial_fit(X, y, classes=classes)
else:
clf.fit(X, y)
assert clf.predict(X[:1]) == y[0]
# Check that attributes have expected first-axis lengths
attribute_names = [
"classes_",
"class_count_",
"class_log_prior_",
"feature_count_",
"feature_log_prob_",
]
for attribute_name in attribute_names:
attribute = getattr(clf, attribute_name, None)
if attribute is None:
# CategoricalNB has no feature_count_ attribute
continue
if isinstance(attribute, np.ndarray):
assert attribute.shape[0] == num_classes
else:
# CategoricalNB.feature_log_prob_ is a list of arrays
for element in attribute:
assert element.shape[0] == num_classes
@pytest.mark.parametrize("kind", ("dense", "sparse"))
def test_mnnb(kind):
# Test Multinomial Naive Bayes classification.
# This checks that MultinomialNB implements fit and predict and returns
# correct values for a simple toy dataset.
if kind == "dense":
X = X2
elif kind == "sparse":
X = scipy.sparse.csr_matrix(X2)
# Check the ability to predict the learning set.
clf = MultinomialNB()
msg = "Negative values in data passed to"
with pytest.raises(ValueError, match=msg):
clf.fit(-X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def test_mnb_prior_unobserved_targets():
# test smoothing of prior for yet unobserved targets
# Create toy training data
X = np.array([[0, 1], [1, 0]])
y = np.array([0, 1])
clf = MultinomialNB()
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
clf.partial_fit(X, y, classes=[0, 1, 2])
assert clf.predict([[0, 1]]) == 0
assert clf.predict([[1, 0]]) == 1
assert clf.predict([[1, 1]]) == 0
# add a training example with previously unobserved class
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
clf.partial_fit([[1, 1]], [2])
assert clf.predict([[0, 1]]) == 0
assert clf.predict([[1, 0]]) == 1
assert clf.predict([[1, 1]]) == 2
def test_bnb():
# Tests that BernoulliNB when alpha=1.0 gives the same values as
# those given for the toy example in Manning, Raghavan, and
# Schuetze's "Introduction to Information Retrieval" book:
# https://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo
X = np.array(
[[1, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 1, 0, 1, 0, 0], [0, 1, 1, 0, 0, 1]]
)
# Classes are China (0), Japan (1)
Y = np.array([0, 0, 0, 1])
# Fit BernoulliBN w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Check the class prior is correct
class_prior = np.array([0.75, 0.25])
assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior)
# Check the feature probabilities are correct
feature_prob = np.array(
[
[0.4, 0.8, 0.2, 0.4, 0.4, 0.2],
[1 / 3.0, 2 / 3.0, 2 / 3.0, 1 / 3.0, 1 / 3.0, 2 / 3.0],
]
)
assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob)
# Testing data point is:
# Chinese Chinese Chinese Tokyo Japan
X_test = np.array([[0, 1, 1, 0, 0, 1]])
# Check the predictive probabilities are correct
unnorm_predict_proba = np.array([[0.005183999999999999, 0.02194787379972565]])
predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba)
assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
def test_bnb_feature_log_prob():
# Test for issue #4268.
# Tests that the feature log prob value computed by BernoulliNB when
# alpha=1.0 is equal to the expression given in Manning, Raghavan,
# and Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]])
Y = np.array([0, 0, 1, 2, 2])
# Fit Bernoulli NB w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Manually form the (log) numerator and denominator that
# constitute P(feature presence | class)
num = np.log(clf.feature_count_ + 1.0)
denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T
# Check manual estimate matches
assert_array_almost_equal(clf.feature_log_prob_, (num - denom))
def test_cnb():
# Tests ComplementNB when alpha=1.0 for the toy example in Manning,
# Raghavan, and Schuetze's "Introduction to Information Retrieval" book:
# https://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo.
X = np.array(
[[1, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 1, 0, 1, 0, 0], [0, 1, 1, 0, 0, 1]]
)
# Classes are China (0), Japan (1).
Y = np.array([0, 0, 0, 1])
# Check that weights are correct. See steps 4-6 in Table 4 of
# Rennie et al. (2003).
theta = np.array(
[
[
(0 + 1) / (3 + 6),
(1 + 1) / (3 + 6),
(1 + 1) / (3 + 6),
(0 + 1) / (3 + 6),
(0 + 1) / (3 + 6),
(1 + 1) / (3 + 6),
],
[
(1 + 1) / (6 + 6),
(3 + 1) / (6 + 6),
(0 + 1) / (6 + 6),
(1 + 1) / (6 + 6),
(1 + 1) / (6 + 6),
(0 + 1) / (6 + 6),
],
]
)
weights = np.zeros(theta.shape)
normed_weights = np.zeros(theta.shape)
for i in range(2):
weights[i] = -np.log(theta[i])
normed_weights[i] = weights[i] / weights[i].sum()
# Verify inputs are nonnegative.
clf = ComplementNB(alpha=1.0)
msg = re.escape("Negative values in data passed to ComplementNB (input X)")
with pytest.raises(ValueError, match=msg):
clf.fit(-X, Y)
clf.fit(X, Y)
# Check that counts/weights are correct.
feature_count = np.array([[1, 3, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1]])
assert_array_equal(clf.feature_count_, feature_count)
class_count = np.array([3, 1])
assert_array_equal(clf.class_count_, class_count)
feature_all = np.array([1, 4, 1, 1, 1, 1])
assert_array_equal(clf.feature_all_, feature_all)
assert_array_almost_equal(clf.feature_log_prob_, weights)
clf = ComplementNB(alpha=1.0, norm=True)
clf.fit(X, Y)
assert_array_almost_equal(clf.feature_log_prob_, normed_weights)
def test_categoricalnb():
# Check the ability to predict the training set.
clf = CategoricalNB()
y_pred = clf.fit(X2, y2).predict(X2)
assert_array_equal(y_pred, y2)
X3 = np.array([[1, 4], [2, 5]])
y3 = np.array([1, 2])
clf = CategoricalNB(alpha=1, fit_prior=False)
clf.fit(X3, y3)
assert_array_equal(clf.n_categories_, np.array([3, 6]))
# Check error is raised for X with negative entries
X = np.array([[0, -1]])
y = np.array([1])
error_msg = re.escape("Negative values in data passed to CategoricalNB (input X)")
with pytest.raises(ValueError, match=error_msg):
clf.predict(X)
with pytest.raises(ValueError, match=error_msg):
clf.fit(X, y)
# Test alpha
X3_test = np.array([[2, 5]])
# alpha=1 increases the count of all categories by one so the final
# probability for each category is not 50/50 but 1/3 to 2/3
bayes_numerator = np.array([[1 / 3 * 1 / 3, 2 / 3 * 2 / 3]])
bayes_denominator = bayes_numerator.sum()
assert_array_almost_equal(
clf.predict_proba(X3_test), bayes_numerator / bayes_denominator
)
# Assert category_count has counted all features
assert len(clf.category_count_) == X3.shape[1]
# Check sample_weight
X = np.array([[0, 0], [0, 1], [0, 0], [1, 1]])
y = np.array([1, 1, 2, 2])
clf = CategoricalNB(alpha=1, fit_prior=False)
clf.fit(X, y)
assert_array_equal(clf.predict(np.array([[0, 0]])), np.array([1]))
assert_array_equal(clf.n_categories_, np.array([2, 2]))
for factor in [1.0, 0.3, 5, 0.0001]:
X = np.array([[0, 0], [0, 1], [0, 0], [1, 1]])
y = np.array([1, 1, 2, 2])
sample_weight = np.array([1, 1, 10, 0.1]) * factor
clf = CategoricalNB(alpha=1, fit_prior=False)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(np.array([[0, 0]])), np.array([2]))
assert_array_equal(clf.n_categories_, np.array([2, 2]))
@pytest.mark.parametrize(
"min_categories, exp_X1_count, exp_X2_count, new_X, exp_n_categories_",
[
# check min_categories with int > observed categories
(
3,
np.array([[2, 0, 0], [1, 1, 0]]),
np.array([[1, 1, 0], [1, 1, 0]]),
np.array([[0, 2]]),
np.array([3, 3]),
),
# check with list input
(
[3, 4],
np.array([[2, 0, 0], [1, 1, 0]]),
np.array([[1, 1, 0, 0], [1, 1, 0, 0]]),
np.array([[0, 3]]),
np.array([3, 4]),
),
# check min_categories with min less than actual
(
[
1,
np.array([[2, 0], [1, 1]]),
np.array([[1, 1], [1, 1]]),
np.array([[0, 1]]),
np.array([2, 2]),
]
),
],
)
def test_categoricalnb_with_min_categories(
min_categories, exp_X1_count, exp_X2_count, new_X, exp_n_categories_
):
X_n_categories = np.array([[0, 0], [0, 1], [0, 0], [1, 1]])
y_n_categories = np.array([1, 1, 2, 2])
expected_prediction = np.array([1])
clf = CategoricalNB(alpha=1, fit_prior=False, min_categories=min_categories)
clf.fit(X_n_categories, y_n_categories)
X1_count, X2_count = clf.category_count_
assert_array_equal(X1_count, exp_X1_count)
assert_array_equal(X2_count, exp_X2_count)
predictions = clf.predict(new_X)
assert_array_equal(predictions, expected_prediction)
assert_array_equal(clf.n_categories_, exp_n_categories_)
@pytest.mark.parametrize(
"min_categories, error_msg",
[
([[3, 2], [2, 4]], "'min_categories' should have shape"),
],
)
def test_categoricalnb_min_categories_errors(min_categories, error_msg):
X = np.array([[0, 0], [0, 1], [0, 0], [1, 1]])
y = np.array([1, 1, 2, 2])
clf = CategoricalNB(alpha=1, fit_prior=False, min_categories=min_categories)
with pytest.raises(ValueError, match=error_msg):
clf.fit(X, y)
def test_alpha():
# Setting alpha=0 should not output nan results when p(x_i|y_j)=0 is a case
X = np.array([[1, 0], [1, 1]])
y = np.array([0, 1])
nb = BernoulliNB(alpha=0.0)
msg = "alpha too small will result in numeric errors, setting alpha = 1.0e-10"
with pytest.warns(UserWarning, match=msg):
nb.partial_fit(X, y, classes=[0, 1])
with pytest.warns(UserWarning, match=msg):
nb.fit(X, y)
prob = np.array([[1, 0], [0, 1]])
assert_array_almost_equal(nb.predict_proba(X), prob)
nb = MultinomialNB(alpha=0.0)
with pytest.warns(UserWarning, match=msg):
nb.partial_fit(X, y, classes=[0, 1])
with pytest.warns(UserWarning, match=msg):
nb.fit(X, y)
prob = np.array([[2.0 / 3, 1.0 / 3], [0, 1]])
assert_array_almost_equal(nb.predict_proba(X), prob)
nb = CategoricalNB(alpha=0.0)
with pytest.warns(UserWarning, match=msg):
nb.fit(X, y)
prob = np.array([[1.0, 0.0], [0.0, 1.0]])
assert_array_almost_equal(nb.predict_proba(X), prob)
# Test sparse X
X = scipy.sparse.csr_matrix(X)
nb = BernoulliNB(alpha=0.0)
with pytest.warns(UserWarning, match=msg):
nb.fit(X, y)
prob = np.array([[1, 0], [0, 1]])
assert_array_almost_equal(nb.predict_proba(X), prob)
nb = MultinomialNB(alpha=0.0)
with pytest.warns(UserWarning, match=msg):
nb.fit(X, y)
prob = np.array([[2.0 / 3, 1.0 / 3], [0, 1]])
assert_array_almost_equal(nb.predict_proba(X), prob)
def test_alpha_vector():
X = np.array([[1, 0], [1, 1]])
y = np.array([0, 1])
# Setting alpha=np.array with same length
# as number of features should be fine
alpha = np.array([1, 2])
nb = MultinomialNB(alpha=alpha)
nb.partial_fit(X, y, classes=[0, 1])
# Test feature probabilities uses pseudo-counts (alpha)
feature_prob = np.array([[1 / 2, 1 / 2], [2 / 5, 3 / 5]])
assert_array_almost_equal(nb.feature_log_prob_, np.log(feature_prob))
# Test predictions
prob = np.array([[5 / 9, 4 / 9], [25 / 49, 24 / 49]])
assert_array_almost_equal(nb.predict_proba(X), prob)
# Test alpha non-negative
alpha = np.array([1.0, -0.1])
m_nb = MultinomialNB(alpha=alpha)
expected_msg = "All values in alpha must be greater than 0."
with pytest.raises(ValueError, match=expected_msg):
m_nb.fit(X, y)
# Test that too small pseudo-counts are replaced
ALPHA_MIN = 1e-10
alpha = np.array([ALPHA_MIN / 2, 0.5])
m_nb = MultinomialNB(alpha=alpha)
m_nb.partial_fit(X, y, classes=[0, 1])
assert_array_almost_equal(m_nb._check_alpha(), [ALPHA_MIN, 0.5], decimal=12)
# Test correct dimensions
alpha = np.array([1.0, 2.0, 3.0])
m_nb = MultinomialNB(alpha=alpha)
expected_msg = "When alpha is an array, it should contains `n_features`"
with pytest.raises(ValueError, match=expected_msg):
m_nb.fit(X, y)
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
X, y = load_digits(return_X_y=True)
binary_3v8 = np.logical_or(y == 3, y == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert scores.mean() > 0.86
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert scores.mean() > 0.94
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert scores.mean() > 0.83
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert scores.mean() > 0.92
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert scores.mean() > 0.77
scores = cross_val_score(GaussianNB(var_smoothing=0.1), X, y, cv=10)
assert scores.mean() > 0.89
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert scores.mean() > 0.86
# TODO(1.4): Remove
@pytest.mark.parametrize("Estimator", DISCRETE_NAIVE_BAYES_CLASSES)
@pytest.mark.parametrize("alpha", [1, [0.1, 1e-11], 1e-12])
def test_force_alpha_deprecation(Estimator, alpha):
if Estimator is CategoricalNB and isinstance(alpha, list):
pytest.skip("CategoricalNB does not support array-like alpha values.")
X = np.array([[1, 2], [3, 4]])
y = np.array([1, 0])
alpha_min = 1e-10
msg = "The default value for `force_alpha` will change to `True`"
est = Estimator(alpha=alpha)
est_force = Estimator(alpha=alpha, force_alpha=True)
if np.min(alpha) < alpha_min:
with pytest.warns(FutureWarning, match=msg):
est.fit(X, y)
else:
est.fit(X, y)
est_force.fit(X, y)
def test_check_alpha():
"""The provided value for alpha must only be
used if alpha < _ALPHA_MIN and force_alpha is True.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/10772
"""
_ALPHA_MIN = 1e-10
b = BernoulliNB(alpha=0, force_alpha=True)
assert b._check_alpha() == 0
alphas = np.array([0.0, 1.0])
b = BernoulliNB(alpha=alphas, force_alpha=True)
# We manually set `n_features_in_` not to have `_check_alpha` err
b.n_features_in_ = alphas.shape[0]
assert_array_equal(b._check_alpha(), alphas)
msg = (
"alpha too small will result in numeric errors, setting alpha = %.1e"
% _ALPHA_MIN
)
b = BernoulliNB(alpha=0, force_alpha=False)
with pytest.warns(UserWarning, match=msg):
assert b._check_alpha() == _ALPHA_MIN
b = BernoulliNB(alpha=0)
with pytest.warns(UserWarning, match=msg):
assert b._check_alpha() == _ALPHA_MIN
b = BernoulliNB(alpha=alphas, force_alpha=False)
# We manually set `n_features_in_` not to have `_check_alpha` err
b.n_features_in_ = alphas.shape[0]
with pytest.warns(UserWarning, match=msg):
assert_array_equal(b._check_alpha(), np.array([_ALPHA_MIN, 1.0]))
@pytest.mark.parametrize("Estimator", ALL_NAIVE_BAYES_CLASSES)
def test_predict_joint_proba(Estimator):
est = Estimator().fit(X2, y2)
jll = est.predict_joint_log_proba(X2)
log_prob_x = logsumexp(jll, axis=1)
log_prob_x_y = jll - np.atleast_2d(log_prob_x).T
assert_allclose(est.predict_log_proba(X2), log_prob_x_y)
| bsd-3-clause |
anntzer/scikit-learn | sklearn/ensemble/_voting.py | 9 | 22265 | """
Soft Voting/Majority Rule classifier and Voting regressor.
This module contains:
- A Soft Voting/Majority Rule classifier for classification estimators.
- A Voting regressor for regression estimators.
"""
# Authors: Sebastian Raschka <[email protected]>,
# Gilles Louppe <[email protected]>,
# Ramil Nugmanov <[email protected]>
# Mohamed Ali Jamaoui <[email protected]>
#
# License: BSD 3 clause
from abc import abstractmethod
from numbers import Integral
import numpy as np
from joblib import Parallel
from ..base import ClassifierMixin
from ..base import RegressorMixin
from ..base import TransformerMixin
from ..base import clone
from ._base import _fit_single_estimator
from ._base import _BaseHeterogeneousEnsemble
from ..preprocessing import LabelEncoder
from ..utils import Bunch
from ..utils.metaestimators import available_if
from ..utils.validation import check_is_fitted
from ..utils.validation import _check_feature_names_in
from ..utils.multiclass import check_classification_targets
from ..utils.validation import column_or_1d
from ..utils._param_validation import StrOptions
from ..exceptions import NotFittedError
from ..utils._estimator_html_repr import _VisualBlock
from ..utils.fixes import delayed
class _BaseVoting(TransformerMixin, _BaseHeterogeneousEnsemble):
"""Base class for voting.
Warning: This class should not be used directly. Use derived classes
instead.
"""
_parameter_constraints: dict = {
"estimators": [list],
"weights": ["array-like", None],
"n_jobs": [None, Integral],
"verbose": ["verbose"],
}
def _log_message(self, name, idx, total):
if not self.verbose:
return None
return f"({idx} of {total}) Processing {name}"
@property
def _weights_not_none(self):
"""Get the weights of not `None` estimators."""
if self.weights is None:
return None
return [w for est, w in zip(self.estimators, self.weights) if est[1] != "drop"]
def _predict(self, X):
"""Collect results from clf.predict calls."""
return np.asarray([est.predict(X) for est in self.estimators_]).T
@abstractmethod
def fit(self, X, y, sample_weight=None):
"""Get common fit operations."""
names, clfs = self._validate_estimators()
if self.weights is not None and len(self.weights) != len(self.estimators):
raise ValueError(
"Number of `estimators` and weights must be equal; got"
f" {len(self.weights)} weights, {len(self.estimators)} estimators"
)
self.estimators_ = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_single_estimator)(
clone(clf),
X,
y,
sample_weight=sample_weight,
message_clsname="Voting",
message=self._log_message(names[idx], idx + 1, len(clfs)),
)
for idx, clf in enumerate(clfs)
if clf != "drop"
)
self.named_estimators_ = Bunch()
# Uses 'drop' as placeholder for dropped estimators
est_iter = iter(self.estimators_)
for name, est in self.estimators:
current_est = est if est == "drop" else next(est_iter)
self.named_estimators_[name] = current_est
if hasattr(current_est, "feature_names_in_"):
self.feature_names_in_ = current_est.feature_names_in_
return self
def fit_transform(self, X, y=None, **fit_params):
"""Return class labels or probabilities for each estimator.
Return predictions for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix, dataframe} of shape \
(n_samples, n_features)
Input samples.
y : ndarray of shape (n_samples,), default=None
Target values (None for unsupervised transformations).
**fit_params : dict
Additional fit parameters.
Returns
-------
X_new : ndarray array of shape (n_samples, n_features_new)
Transformed array.
"""
return super().fit_transform(X, y, **fit_params)
@property
def n_features_in_(self):
"""Number of features seen during :term:`fit`."""
# For consistency with other estimators we raise a AttributeError so
# that hasattr() fails if the estimator isn't fitted.
try:
check_is_fitted(self)
except NotFittedError as nfe:
raise AttributeError(
"{} object has no n_features_in_ attribute.".format(
self.__class__.__name__
)
) from nfe
return self.estimators_[0].n_features_in_
def _sk_visual_block_(self):
names, estimators = zip(*self.estimators)
return _VisualBlock("parallel", estimators, names=names)
def _more_tags(self):
return {"preserves_dtype": []}
class VotingClassifier(ClassifierMixin, _BaseVoting):
"""Soft Voting/Majority Rule classifier for unfitted estimators.
Read more in the :ref:`User Guide <voting_classifier>`.
.. versionadded:: 0.17
Parameters
----------
estimators : list of (str, estimator) tuples
Invoking the ``fit`` method on the ``VotingClassifier`` will fit clones
of those original estimators that will be stored in the class attribute
``self.estimators_``. An estimator can be set to ``'drop'`` using
:meth:`set_params`.
.. versionchanged:: 0.21
``'drop'`` is accepted. Using None was deprecated in 0.22 and
support was removed in 0.24.
voting : {'hard', 'soft'}, default='hard'
If 'hard', uses predicted class labels for majority rule voting.
Else if 'soft', predicts the class label based on the argmax of
the sums of the predicted probabilities, which is recommended for
an ensemble of well-calibrated classifiers.
weights : array-like of shape (n_classifiers,), default=None
Sequence of weights (`float` or `int`) to weight the occurrences of
predicted class labels (`hard` voting) or class probabilities
before averaging (`soft` voting). Uses uniform weights if `None`.
n_jobs : int, default=None
The number of jobs to run in parallel for ``fit``.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionadded:: 0.18
flatten_transform : bool, default=True
Affects shape of transform output only when voting='soft'
If voting='soft' and flatten_transform=True, transform method returns
matrix with shape (n_samples, n_classifiers * n_classes). If
flatten_transform=False, it returns
(n_classifiers, n_samples, n_classes).
verbose : bool, default=False
If True, the time elapsed while fitting will be printed as it
is completed.
.. versionadded:: 0.23
Attributes
----------
estimators_ : list of classifiers
The collection of fitted sub-estimators as defined in ``estimators``
that are not 'drop'.
named_estimators_ : :class:`~sklearn.utils.Bunch`
Attribute to access any fitted sub-estimators by name.
.. versionadded:: 0.20
le_ : :class:`~sklearn.preprocessing.LabelEncoder`
Transformer used to encode the labels during fit and decode during
prediction.
classes_ : ndarray of shape (n_classes,)
The classes labels.
n_features_in_ : int
Number of features seen during :term:`fit`. Only defined if the
underlying classifier exposes such an attribute when fit.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Only defined if the
underlying estimators expose such an attribute when fit.
.. versionadded:: 1.0
See Also
--------
VotingRegressor : Prediction voting regressor.
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.ensemble import RandomForestClassifier, VotingClassifier
>>> clf1 = LogisticRegression(multi_class='multinomial', random_state=1)
>>> clf2 = RandomForestClassifier(n_estimators=50, random_state=1)
>>> clf3 = GaussianNB()
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> eclf1 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)], voting='hard')
>>> eclf1 = eclf1.fit(X, y)
>>> print(eclf1.predict(X))
[1 1 1 2 2 2]
>>> np.array_equal(eclf1.named_estimators_.lr.predict(X),
... eclf1.named_estimators_['lr'].predict(X))
True
>>> eclf2 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft')
>>> eclf2 = eclf2.fit(X, y)
>>> print(eclf2.predict(X))
[1 1 1 2 2 2]
To drop an estimator, :meth:`set_params` can be used to remove it. Here we
dropped one of the estimators, resulting in 2 fitted estimators:
>>> eclf2 = eclf2.set_params(lr='drop')
>>> eclf2 = eclf2.fit(X, y)
>>> len(eclf2.estimators_)
2
Setting `flatten_transform=True` with `voting='soft'` flattens output shape of
`transform`:
>>> eclf3 = VotingClassifier(estimators=[
... ('lr', clf1), ('rf', clf2), ('gnb', clf3)],
... voting='soft', weights=[2,1,1],
... flatten_transform=True)
>>> eclf3 = eclf3.fit(X, y)
>>> print(eclf3.predict(X))
[1 1 1 2 2 2]
>>> print(eclf3.transform(X).shape)
(6, 6)
"""
_parameter_constraints: dict = {
**_BaseVoting._parameter_constraints,
"voting": [StrOptions({"hard", "soft"})],
"flatten_transform": ["boolean"],
}
def __init__(
self,
estimators,
*,
voting="hard",
weights=None,
n_jobs=None,
flatten_transform=True,
verbose=False,
):
super().__init__(estimators=estimators)
self.voting = voting
self.weights = weights
self.n_jobs = n_jobs
self.flatten_transform = flatten_transform
self.verbose = verbose
def fit(self, X, y, sample_weight=None):
"""Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Note that this is supported only if all underlying estimators
support sample weights.
.. versionadded:: 0.18
Returns
-------
self : object
Returns the instance itself.
"""
self._validate_params()
check_classification_targets(y)
if isinstance(y, np.ndarray) and len(y.shape) > 1 and y.shape[1] > 1:
raise NotImplementedError(
"Multilabel and multi-output classification is not supported."
)
self.le_ = LabelEncoder().fit(y)
self.classes_ = self.le_.classes_
transformed_y = self.le_.transform(y)
return super().fit(X, transformed_y, sample_weight)
def predict(self, X):
"""Predict class labels for X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples.
Returns
-------
maj : array-like of shape (n_samples,)
Predicted class labels.
"""
check_is_fitted(self)
if self.voting == "soft":
maj = np.argmax(self.predict_proba(X), axis=1)
else: # 'hard' voting
predictions = self._predict(X)
maj = np.apply_along_axis(
lambda x: np.argmax(np.bincount(x, weights=self._weights_not_none)),
axis=1,
arr=predictions,
)
maj = self.le_.inverse_transform(maj)
return maj
def _collect_probas(self, X):
"""Collect results from clf.predict calls."""
return np.asarray([clf.predict_proba(X) for clf in self.estimators_])
def _check_voting(self):
if self.voting == "hard":
raise AttributeError(
f"predict_proba is not available when voting={repr(self.voting)}"
)
return True
@available_if(_check_voting)
def predict_proba(self, X):
"""Compute probabilities of possible outcomes for samples in X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples.
Returns
-------
avg : array-like of shape (n_samples, n_classes)
Weighted average probability for each class per sample.
"""
check_is_fitted(self)
avg = np.average(
self._collect_probas(X), axis=0, weights=self._weights_not_none
)
return avg
def transform(self, X):
"""Return class labels or probabilities for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
Returns
-------
probabilities_or_labels
If `voting='soft'` and `flatten_transform=True`:
returns ndarray of shape (n_samples, n_classifiers * n_classes),
being class probabilities calculated by each classifier.
If `voting='soft' and `flatten_transform=False`:
ndarray of shape (n_classifiers, n_samples, n_classes)
If `voting='hard'`:
ndarray of shape (n_samples, n_classifiers), being
class labels predicted by each classifier.
"""
check_is_fitted(self)
if self.voting == "soft":
probas = self._collect_probas(X)
if not self.flatten_transform:
return probas
return np.hstack(probas)
else:
return self._predict(X)
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Not used, present here for API consistency by convention.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
if self.voting == "soft" and not self.flatten_transform:
raise ValueError(
"get_feature_names_out is not supported when `voting='soft'` and "
"`flatten_transform=False`"
)
_check_feature_names_in(self, input_features, generate_names=False)
class_name = self.__class__.__name__.lower()
active_names = [name for name, est in self.estimators if est != "drop"]
if self.voting == "hard":
return np.asarray(
[f"{class_name}_{name}" for name in active_names], dtype=object
)
# voting == "soft"
n_classes = len(self.classes_)
names_out = [
f"{class_name}_{name}{i}" for name in active_names for i in range(n_classes)
]
return np.asarray(names_out, dtype=object)
class VotingRegressor(RegressorMixin, _BaseVoting):
"""Prediction voting regressor for unfitted estimators.
A voting regressor is an ensemble meta-estimator that fits several base
regressors, each on the whole dataset. Then it averages the individual
predictions to form a final prediction.
Read more in the :ref:`User Guide <voting_regressor>`.
.. versionadded:: 0.21
Parameters
----------
estimators : list of (str, estimator) tuples
Invoking the ``fit`` method on the ``VotingRegressor`` will fit clones
of those original estimators that will be stored in the class attribute
``self.estimators_``. An estimator can be set to ``'drop'`` using
:meth:`set_params`.
.. versionchanged:: 0.21
``'drop'`` is accepted. Using None was deprecated in 0.22 and
support was removed in 0.24.
weights : array-like of shape (n_regressors,), default=None
Sequence of weights (`float` or `int`) to weight the occurrences of
predicted values before averaging. Uses uniform weights if `None`.
n_jobs : int, default=None
The number of jobs to run in parallel for ``fit``.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : bool, default=False
If True, the time elapsed while fitting will be printed as it
is completed.
.. versionadded:: 0.23
Attributes
----------
estimators_ : list of regressors
The collection of fitted sub-estimators as defined in ``estimators``
that are not 'drop'.
named_estimators_ : :class:`~sklearn.utils.Bunch`
Attribute to access any fitted sub-estimators by name.
.. versionadded:: 0.20
n_features_in_ : int
Number of features seen during :term:`fit`. Only defined if the
underlying regressor exposes such an attribute when fit.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Only defined if the
underlying estimators expose such an attribute when fit.
.. versionadded:: 1.0
See Also
--------
VotingClassifier : Soft Voting/Majority Rule classifier.
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import LinearRegression
>>> from sklearn.ensemble import RandomForestRegressor
>>> from sklearn.ensemble import VotingRegressor
>>> from sklearn.neighbors import KNeighborsRegressor
>>> r1 = LinearRegression()
>>> r2 = RandomForestRegressor(n_estimators=10, random_state=1)
>>> r3 = KNeighborsRegressor()
>>> X = np.array([[1, 1], [2, 4], [3, 9], [4, 16], [5, 25], [6, 36]])
>>> y = np.array([2, 6, 12, 20, 30, 42])
>>> er = VotingRegressor([('lr', r1), ('rf', r2), ('r3', r3)])
>>> print(er.fit(X, y).predict(X))
[ 6.8... 8.4... 12.5... 17.8... 26... 34...]
In the following example, we drop the `'lr'` estimator with
:meth:`~VotingRegressor.set_params` and fit the remaining two estimators:
>>> er = er.set_params(lr='drop')
>>> er = er.fit(X, y)
>>> len(er.estimators_)
2
"""
def __init__(self, estimators, *, weights=None, n_jobs=None, verbose=False):
super().__init__(estimators=estimators)
self.weights = weights
self.n_jobs = n_jobs
self.verbose = verbose
def fit(self, X, y, sample_weight=None):
"""Fit the estimators.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target values.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights. If None, then samples are equally weighted.
Note that this is supported only if all underlying estimators
support sample weights.
Returns
-------
self : object
Fitted estimator.
"""
self._validate_params()
y = column_or_1d(y, warn=True)
return super().fit(X, y, sample_weight)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the estimators in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples.
Returns
-------
y : ndarray of shape (n_samples,)
The predicted values.
"""
check_is_fitted(self)
return np.average(self._predict(X), axis=1, weights=self._weights_not_none)
def transform(self, X):
"""Return predictions for X for each estimator.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples.
Returns
-------
predictions : ndarray of shape (n_samples, n_classifiers)
Values predicted by each regressor.
"""
check_is_fitted(self)
return self._predict(X)
def get_feature_names_out(self, input_features=None):
"""Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Not used, present here for API consistency by convention.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
"""
_check_feature_names_in(self, input_features, generate_names=False)
class_name = self.__class__.__name__.lower()
return np.asarray(
[f"{class_name}_{name}" for name, est in self.estimators if est != "drop"],
dtype=object,
)
| bsd-3-clause |
anntzer/scikit-learn | examples/model_selection/plot_randomized_search.py | 13 | 3040 | """
=========================================================================
Comparing randomized search and grid search for hyperparameter estimation
=========================================================================
Compare randomized search and grid search for optimizing hyperparameters of a
linear SVM with SGD training.
All parameters that influence the learning are searched simultaneously
(except for the number of estimators, which poses a time / quality tradeoff).
The randomized search and the grid search explore exactly the same space of
parameters. The result in parameter settings is quite similar, while the run
time for randomized search is drastically lower.
The performance is may slightly worse for the randomized search, and is likely
due to a noise effect and would not carry over to a held-out test set.
Note that in practice, one would not search over this many different parameters
simultaneously using grid search, but pick only the ones deemed most important.
"""
import numpy as np
from time import time
import scipy.stats as stats
from sklearn.utils.fixes import loguniform
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.datasets import load_digits
from sklearn.linear_model import SGDClassifier
# get some data
X, y = load_digits(return_X_y=True, n_class=3)
# build a classifier
clf = SGDClassifier(loss="hinge", penalty="elasticnet", fit_intercept=True)
# Utility function to report best scores
def report(results, n_top=3):
for i in range(1, n_top + 1):
candidates = np.flatnonzero(results["rank_test_score"] == i)
for candidate in candidates:
print("Model with rank: {0}".format(i))
print(
"Mean validation score: {0:.3f} (std: {1:.3f})".format(
results["mean_test_score"][candidate],
results["std_test_score"][candidate],
)
)
print("Parameters: {0}".format(results["params"][candidate]))
print("")
# specify parameters and distributions to sample from
param_dist = {
"average": [True, False],
"l1_ratio": stats.uniform(0, 1),
"alpha": loguniform(1e-2, 1e0),
}
# run randomized search
n_iter_search = 15
random_search = RandomizedSearchCV(
clf, param_distributions=param_dist, n_iter=n_iter_search
)
start = time()
random_search.fit(X, y)
print(
"RandomizedSearchCV took %.2f seconds for %d candidates parameter settings."
% ((time() - start), n_iter_search)
)
report(random_search.cv_results_)
# use a full grid over all parameters
param_grid = {
"average": [True, False],
"l1_ratio": np.linspace(0, 1, num=10),
"alpha": np.power(10, np.arange(-2, 1, dtype=float)),
}
# run grid search
grid_search = GridSearchCV(clf, param_grid=param_grid)
start = time()
grid_search.fit(X, y)
print(
"GridSearchCV took %.2f seconds for %d candidate parameter settings."
% (time() - start, len(grid_search.cv_results_["params"]))
)
report(grid_search.cv_results_)
| bsd-3-clause |
zvelo/msg | python/msg_pb2_grpc.py | 1 | 3554 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
import msg_pb2 as msg__pb2
class APIv1Stub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Query = channel.unary_unary(
'/zvelo.msg.APIv1/Query',
request_serializer=msg__pb2.QueryRequests.SerializeToString,
response_deserializer=msg__pb2.QueryReplies.FromString,
)
self.Result = channel.unary_unary(
'/zvelo.msg.APIv1/Result',
request_serializer=msg__pb2.RequestID.SerializeToString,
response_deserializer=msg__pb2.QueryResult.FromString,
)
self.Suggest = channel.unary_unary(
'/zvelo.msg.APIv1/Suggest',
request_serializer=msg__pb2.Suggestion.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.Stream = channel.unary_stream(
'/zvelo.msg.APIv1/Stream',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=msg__pb2.QueryResult.FromString,
)
class APIv1Servicer(object):
# missing associated documentation comment in .proto file
pass
def Query(self, request, context):
"""Create new query
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Result(self, request, context):
"""Results of active or unexpired query
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Suggest(self, request, context):
"""Suggest new datasets for a URL
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Stream(self, request, context):
"""Stream returns all QueryResult messages processed by zveloAPI
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_APIv1Servicer_to_server(servicer, server):
rpc_method_handlers = {
'Query': grpc.unary_unary_rpc_method_handler(
servicer.Query,
request_deserializer=msg__pb2.QueryRequests.FromString,
response_serializer=msg__pb2.QueryReplies.SerializeToString,
),
'Result': grpc.unary_unary_rpc_method_handler(
servicer.Result,
request_deserializer=msg__pb2.RequestID.FromString,
response_serializer=msg__pb2.QueryResult.SerializeToString,
),
'Suggest': grpc.unary_unary_rpc_method_handler(
servicer.Suggest,
request_deserializer=msg__pb2.Suggestion.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'Stream': grpc.unary_stream_rpc_method_handler(
servicer.Stream,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=msg__pb2.QueryResult.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'zvelo.msg.APIv1', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| mit |
anntzer/scikit-learn | sklearn/tests/test_calibration.py | 9 | 39439 | # Authors: Alexandre Gramfort <[email protected]>
# License: BSD 3 clause
import pytest
import numpy as np
from numpy.testing import assert_allclose
from scipy import sparse
from sklearn.base import BaseEstimator, clone
from sklearn.dummy import DummyClassifier
from sklearn.model_selection import LeaveOneOut, train_test_split
from sklearn.utils._testing import (
assert_array_almost_equal,
assert_almost_equal,
assert_array_equal,
)
from sklearn.utils.extmath import softmax
from sklearn.exceptions import NotFittedError
from sklearn.datasets import make_classification, make_blobs, load_iris
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import KFold, cross_val_predict
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import (
RandomForestClassifier,
VotingClassifier,
)
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.isotonic import IsotonicRegression
from sklearn.feature_extraction import DictVectorizer
from sklearn.impute import SimpleImputer
from sklearn.metrics import brier_score_loss
from sklearn.calibration import (
_CalibratedClassifier,
_SigmoidCalibration,
_sigmoid_calibration,
CalibratedClassifierCV,
CalibrationDisplay,
calibration_curve,
)
from sklearn.utils._mocking import CheckingClassifier
from sklearn.utils._testing import _convert_container
N_SAMPLES = 200
@pytest.fixture(scope="module")
def data():
X, y = make_classification(n_samples=N_SAMPLES, n_features=6, random_state=42)
return X, y
@pytest.mark.parametrize("method", ["sigmoid", "isotonic"])
@pytest.mark.parametrize("ensemble", [True, False])
def test_calibration(data, method, ensemble):
# Test calibration objects with isotonic and sigmoid
n_samples = N_SAMPLES // 2
X, y = data
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test, y_test = X[n_samples:], y[n_samples:]
# Naive-Bayes
clf = MultinomialNB(force_alpha=True).fit(X_train, y_train, sample_weight=sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
cal_clf = CalibratedClassifierCV(clf, cv=y.size + 1, ensemble=ensemble)
with pytest.raises(ValueError):
cal_clf.fit(X, y)
# Naive Bayes with calibration
for this_X_train, this_X_test in [
(X_train, X_test),
(sparse.csr_matrix(X_train), sparse.csr_matrix(X_test)),
]:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=5, ensemble=ensemble)
# Note that this fit overwrites the fit on the entire training
# set
cal_clf.fit(this_X_train, y_train, sample_weight=sw_train)
prob_pos_cal_clf = cal_clf.predict_proba(this_X_test)[:, 1]
# Check that brier score has improved after calibration
assert brier_score_loss(y_test, prob_pos_clf) > brier_score_loss(
y_test, prob_pos_cal_clf
)
# Check invariance against relabeling [0, 1] -> [1, 2]
cal_clf.fit(this_X_train, y_train + 1, sample_weight=sw_train)
prob_pos_cal_clf_relabeled = cal_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_cal_clf, prob_pos_cal_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [-1, 1]
cal_clf.fit(this_X_train, 2 * y_train - 1, sample_weight=sw_train)
prob_pos_cal_clf_relabeled = cal_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_cal_clf, prob_pos_cal_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [1, 0]
cal_clf.fit(this_X_train, (y_train + 1) % 2, sample_weight=sw_train)
prob_pos_cal_clf_relabeled = cal_clf.predict_proba(this_X_test)[:, 1]
if method == "sigmoid":
assert_array_almost_equal(prob_pos_cal_clf, 1 - prob_pos_cal_clf_relabeled)
else:
# Isotonic calibration is not invariant against relabeling
# but should improve in both cases
assert brier_score_loss(y_test, prob_pos_clf) > brier_score_loss(
(y_test + 1) % 2, prob_pos_cal_clf_relabeled
)
def test_calibration_default_estimator(data):
# Check estimator default is LinearSVC
X, y = data
calib_clf = CalibratedClassifierCV(cv=2)
calib_clf.fit(X, y)
base_est = calib_clf.calibrated_classifiers_[0].estimator
assert isinstance(base_est, LinearSVC)
@pytest.mark.parametrize("ensemble", [True, False])
def test_calibration_cv_splitter(data, ensemble):
# Check when `cv` is a CV splitter
X, y = data
splits = 5
kfold = KFold(n_splits=splits)
calib_clf = CalibratedClassifierCV(cv=kfold, ensemble=ensemble)
assert isinstance(calib_clf.cv, KFold)
assert calib_clf.cv.n_splits == splits
calib_clf.fit(X, y)
expected_n_clf = splits if ensemble else 1
assert len(calib_clf.calibrated_classifiers_) == expected_n_clf
@pytest.mark.parametrize("method", ["sigmoid", "isotonic"])
@pytest.mark.parametrize("ensemble", [True, False])
def test_sample_weight(data, method, ensemble):
n_samples = N_SAMPLES // 2
X, y = data
sample_weight = np.random.RandomState(seed=42).uniform(size=len(y))
X_train, y_train, sw_train = X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test = X[n_samples:]
estimator = LinearSVC(random_state=42)
calibrated_clf = CalibratedClassifierCV(estimator, method=method, ensemble=ensemble)
calibrated_clf.fit(X_train, y_train, sample_weight=sw_train)
probs_with_sw = calibrated_clf.predict_proba(X_test)
# As the weights are used for the calibration, they should still yield
# different predictions
calibrated_clf.fit(X_train, y_train)
probs_without_sw = calibrated_clf.predict_proba(X_test)
diff = np.linalg.norm(probs_with_sw - probs_without_sw)
assert diff > 0.1
@pytest.mark.parametrize("method", ["sigmoid", "isotonic"])
@pytest.mark.parametrize("ensemble", [True, False])
def test_parallel_execution(data, method, ensemble):
"""Test parallel calibration"""
X, y = data
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
estimator = LinearSVC(random_state=42)
cal_clf_parallel = CalibratedClassifierCV(
estimator, method=method, n_jobs=2, ensemble=ensemble
)
cal_clf_parallel.fit(X_train, y_train)
probs_parallel = cal_clf_parallel.predict_proba(X_test)
cal_clf_sequential = CalibratedClassifierCV(
estimator, method=method, n_jobs=1, ensemble=ensemble
)
cal_clf_sequential.fit(X_train, y_train)
probs_sequential = cal_clf_sequential.predict_proba(X_test)
assert_allclose(probs_parallel, probs_sequential)
@pytest.mark.parametrize("method", ["sigmoid", "isotonic"])
@pytest.mark.parametrize("ensemble", [True, False])
# increase the number of RNG seeds to assess the statistical stability of this
# test:
@pytest.mark.parametrize("seed", range(2))
def test_calibration_multiclass(method, ensemble, seed):
def multiclass_brier(y_true, proba_pred, n_classes):
Y_onehot = np.eye(n_classes)[y_true]
return np.sum((Y_onehot - proba_pred) ** 2) / Y_onehot.shape[0]
# Test calibration for multiclass with classifier that implements
# only decision function.
clf = LinearSVC(random_state=7)
X, y = make_blobs(
n_samples=500, n_features=100, random_state=seed, centers=10, cluster_std=15.0
)
# Use an unbalanced dataset by collapsing 8 clusters into one class
# to make the naive calibration based on a softmax more unlikely
# to work.
y[y > 2] = 2
n_classes = np.unique(y).shape[0]
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf.fit(X_train, y_train)
cal_clf = CalibratedClassifierCV(clf, method=method, cv=5, ensemble=ensemble)
cal_clf.fit(X_train, y_train)
probas = cal_clf.predict_proba(X_test)
# Check probabilities sum to 1
assert_allclose(np.sum(probas, axis=1), np.ones(len(X_test)))
# Check that the dataset is not too trivial, otherwise it's hard
# to get interesting calibration data during the internal
# cross-validation loop.
assert 0.65 < clf.score(X_test, y_test) < 0.95
# Check that the accuracy of the calibrated model is never degraded
# too much compared to the original classifier.
assert cal_clf.score(X_test, y_test) > 0.95 * clf.score(X_test, y_test)
# Check that Brier loss of calibrated classifier is smaller than
# loss obtained by naively turning OvR decision function to
# probabilities via a softmax
uncalibrated_brier = multiclass_brier(
y_test, softmax(clf.decision_function(X_test)), n_classes=n_classes
)
calibrated_brier = multiclass_brier(y_test, probas, n_classes=n_classes)
assert calibrated_brier < 1.1 * uncalibrated_brier
# Test that calibration of a multiclass classifier decreases log-loss
# for RandomForestClassifier
clf = RandomForestClassifier(n_estimators=30, random_state=42)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
uncalibrated_brier = multiclass_brier(y_test, clf_probs, n_classes=n_classes)
cal_clf = CalibratedClassifierCV(clf, method=method, cv=5, ensemble=ensemble)
cal_clf.fit(X_train, y_train)
cal_clf_probs = cal_clf.predict_proba(X_test)
calibrated_brier = multiclass_brier(y_test, cal_clf_probs, n_classes=n_classes)
assert calibrated_brier < 1.1 * uncalibrated_brier
def test_calibration_zero_probability():
# Test an edge case where _CalibratedClassifier avoids numerical errors
# in the multiclass normalization step if all the calibrators output
# are zero all at once for a given sample and instead fallback to uniform
# probabilities.
class ZeroCalibrator:
# This function is called from _CalibratedClassifier.predict_proba.
def predict(self, X):
return np.zeros(X.shape[0])
X, y = make_blobs(
n_samples=50, n_features=10, random_state=7, centers=10, cluster_std=15.0
)
clf = DummyClassifier().fit(X, y)
calibrator = ZeroCalibrator()
cal_clf = _CalibratedClassifier(
estimator=clf, calibrators=[calibrator], classes=clf.classes_
)
probas = cal_clf.predict_proba(X)
# Check that all probabilities are uniformly 1. / clf.n_classes_
assert_allclose(probas, 1.0 / clf.n_classes_)
def test_calibration_prefit():
"""Test calibration for prefitted classifiers"""
n_samples = 50
X, y = make_classification(n_samples=3 * n_samples, n_features=6, random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_calib, y_calib, sw_calib = (
X[n_samples : 2 * n_samples],
y[n_samples : 2 * n_samples],
sample_weight[n_samples : 2 * n_samples],
)
X_test, y_test = X[2 * n_samples :], y[2 * n_samples :]
# Naive-Bayes
clf = MultinomialNB(force_alpha=True)
# Check error if clf not prefit
unfit_clf = CalibratedClassifierCV(clf, cv="prefit")
with pytest.raises(NotFittedError):
unfit_clf.fit(X_calib, y_calib)
clf.fit(X_train, y_train, sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Naive Bayes with calibration
for this_X_calib, this_X_test in [
(X_calib, X_test),
(sparse.csr_matrix(X_calib), sparse.csr_matrix(X_test)),
]:
for method in ["isotonic", "sigmoid"]:
cal_clf = CalibratedClassifierCV(clf, method=method, cv="prefit")
for sw in [sw_calib, None]:
cal_clf.fit(this_X_calib, y_calib, sample_weight=sw)
y_prob = cal_clf.predict_proba(this_X_test)
y_pred = cal_clf.predict(this_X_test)
prob_pos_cal_clf = y_prob[:, 1]
assert_array_equal(y_pred, np.array([0, 1])[np.argmax(y_prob, axis=1)])
assert brier_score_loss(y_test, prob_pos_clf) > brier_score_loss(
y_test, prob_pos_cal_clf
)
@pytest.mark.parametrize("method", ["sigmoid", "isotonic"])
def test_calibration_ensemble_false(data, method):
# Test that `ensemble=False` is the same as using predictions from
# `cross_val_predict` to train calibrator.
X, y = data
clf = LinearSVC(random_state=7)
cal_clf = CalibratedClassifierCV(clf, method=method, cv=3, ensemble=False)
cal_clf.fit(X, y)
cal_probas = cal_clf.predict_proba(X)
# Get probas manually
unbiased_preds = cross_val_predict(clf, X, y, cv=3, method="decision_function")
if method == "isotonic":
calibrator = IsotonicRegression(out_of_bounds="clip")
else:
calibrator = _SigmoidCalibration()
calibrator.fit(unbiased_preds, y)
# Use `clf` fit on all data
clf.fit(X, y)
clf_df = clf.decision_function(X)
manual_probas = calibrator.predict(clf_df)
assert_allclose(cal_probas[:, 1], manual_probas)
def test_sigmoid_calibration():
"""Test calibration values with Platt sigmoid model"""
exF = np.array([5, -4, 1.0])
exY = np.array([1, -1, -1])
# computed from my python port of the C++ code in LibSVM
AB_lin_libsvm = np.array([-0.20261354391187855, 0.65236314980010512])
assert_array_almost_equal(AB_lin_libsvm, _sigmoid_calibration(exF, exY), 3)
lin_prob = 1.0 / (1.0 + np.exp(AB_lin_libsvm[0] * exF + AB_lin_libsvm[1]))
sk_prob = _SigmoidCalibration().fit(exF, exY).predict(exF)
assert_array_almost_equal(lin_prob, sk_prob, 6)
# check that _SigmoidCalibration().fit only accepts 1d array or 2d column
# arrays
with pytest.raises(ValueError):
_SigmoidCalibration().fit(np.vstack((exF, exF)), exY)
def test_calibration_curve():
"""Check calibration_curve function"""
y_true = np.array([0, 0, 0, 1, 1, 1])
y_pred = np.array([0.0, 0.1, 0.2, 0.8, 0.9, 1.0])
prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=2)
assert len(prob_true) == len(prob_pred)
assert len(prob_true) == 2
assert_almost_equal(prob_true, [0, 1])
assert_almost_equal(prob_pred, [0.1, 0.9])
# Probabilities outside [0, 1] should not be accepted at all.
with pytest.raises(ValueError):
calibration_curve([1], [-0.1])
# test that quantiles work as expected
y_true2 = np.array([0, 0, 0, 0, 1, 1])
y_pred2 = np.array([0.0, 0.1, 0.2, 0.5, 0.9, 1.0])
prob_true_quantile, prob_pred_quantile = calibration_curve(
y_true2, y_pred2, n_bins=2, strategy="quantile"
)
assert len(prob_true_quantile) == len(prob_pred_quantile)
assert len(prob_true_quantile) == 2
assert_almost_equal(prob_true_quantile, [0, 2 / 3])
assert_almost_equal(prob_pred_quantile, [0.1, 0.8])
# Check that error is raised when invalid strategy is selected
with pytest.raises(ValueError):
calibration_curve(y_true2, y_pred2, strategy="percentile")
# TODO(1.3): Remove this test.
def test_calibration_curve_with_unnormalized_proba():
"""Tests the `normalize` parameter of `calibration_curve`"""
y_true = np.array([0, 0, 0, 1, 1, 1])
y_pred = np.array([0.0, 0.1, 0.2, 0.8, 0.9, 1.0])
# Ensure `normalize` == False raises a FutureWarning.
with pytest.warns(FutureWarning):
calibration_curve(y_true, y_pred, n_bins=2, normalize=False)
# Ensure `normalize` == True raises a FutureWarning and behaves as expected.
with pytest.warns(FutureWarning):
prob_true_unnormalized, prob_pred_unnormalized = calibration_curve(
y_true, y_pred * 2, n_bins=2, normalize=True
)
prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=2)
assert_almost_equal(prob_true, prob_true_unnormalized)
assert_almost_equal(prob_pred, prob_pred_unnormalized)
@pytest.mark.parametrize("ensemble", [True, False])
def test_calibration_nan_imputer(ensemble):
"""Test that calibration can accept nan"""
X, y = make_classification(
n_samples=10, n_features=2, n_informative=2, n_redundant=0, random_state=42
)
X[0, 0] = np.nan
clf = Pipeline(
[("imputer", SimpleImputer()), ("rf", RandomForestClassifier(n_estimators=1))]
)
clf_c = CalibratedClassifierCV(clf, cv=2, method="isotonic", ensemble=ensemble)
clf_c.fit(X, y)
clf_c.predict(X)
@pytest.mark.parametrize("ensemble", [True, False])
def test_calibration_prob_sum(ensemble):
# Test that sum of probabilities is 1. A non-regression test for
# issue #7796
num_classes = 2
X, y = make_classification(n_samples=10, n_features=5, n_classes=num_classes)
clf = LinearSVC(C=1.0, random_state=7)
clf_prob = CalibratedClassifierCV(
clf, method="sigmoid", cv=LeaveOneOut(), ensemble=ensemble
)
clf_prob.fit(X, y)
probs = clf_prob.predict_proba(X)
assert_array_almost_equal(probs.sum(axis=1), np.ones(probs.shape[0]))
@pytest.mark.parametrize("ensemble", [True, False])
def test_calibration_less_classes(ensemble):
# Test to check calibration works fine when train set in a test-train
# split does not contain all classes
# Since this test uses LOO, at each iteration train set will not contain a
# class label
X = np.random.randn(10, 5)
y = np.arange(10)
clf = LinearSVC(C=1.0, random_state=7)
cal_clf = CalibratedClassifierCV(
clf, method="sigmoid", cv=LeaveOneOut(), ensemble=ensemble
)
cal_clf.fit(X, y)
for i, calibrated_classifier in enumerate(cal_clf.calibrated_classifiers_):
proba = calibrated_classifier.predict_proba(X)
if ensemble:
# Check that the unobserved class has proba=0
assert_array_equal(proba[:, i], np.zeros(len(y)))
# Check for all other classes proba>0
assert np.all(proba[:, :i] > 0)
assert np.all(proba[:, i + 1 :] > 0)
else:
# Check `proba` are all 1/n_classes
assert np.allclose(proba, 1 / proba.shape[0])
@pytest.mark.parametrize(
"X",
[
np.random.RandomState(42).randn(15, 5, 2),
np.random.RandomState(42).randn(15, 5, 2, 6),
],
)
def test_calibration_accepts_ndarray(X):
"""Test that calibration accepts n-dimensional arrays as input"""
y = [1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0]
class MockTensorClassifier(BaseEstimator):
"""A toy estimator that accepts tensor inputs"""
def fit(self, X, y):
self.classes_ = np.unique(y)
return self
def decision_function(self, X):
# toy decision function that just needs to have the right shape:
return X.reshape(X.shape[0], -1).sum(axis=1)
calibrated_clf = CalibratedClassifierCV(MockTensorClassifier())
# we should be able to fit this classifier with no error
calibrated_clf.fit(X, y)
@pytest.fixture
def dict_data():
dict_data = [
{"state": "NY", "age": "adult"},
{"state": "TX", "age": "adult"},
{"state": "VT", "age": "child"},
]
text_labels = [1, 0, 1]
return dict_data, text_labels
@pytest.fixture
def dict_data_pipeline(dict_data):
X, y = dict_data
pipeline_prefit = Pipeline(
[("vectorizer", DictVectorizer()), ("clf", RandomForestClassifier())]
)
return pipeline_prefit.fit(X, y)
def test_calibration_dict_pipeline(dict_data, dict_data_pipeline):
"""Test that calibration works in prefit pipeline with transformer
`X` is not array-like, sparse matrix or dataframe at the start.
See https://github.com/scikit-learn/scikit-learn/issues/8710
Also test it can predict without running into validation errors.
See https://github.com/scikit-learn/scikit-learn/issues/19637
"""
X, y = dict_data
clf = dict_data_pipeline
calib_clf = CalibratedClassifierCV(clf, cv="prefit")
calib_clf.fit(X, y)
# Check attributes are obtained from fitted estimator
assert_array_equal(calib_clf.classes_, clf.classes_)
# Neither the pipeline nor the calibration meta-estimator
# expose the n_features_in_ check on this kind of data.
assert not hasattr(clf, "n_features_in_")
assert not hasattr(calib_clf, "n_features_in_")
# Ensure that no error is thrown with predict and predict_proba
calib_clf.predict(X)
calib_clf.predict_proba(X)
@pytest.mark.parametrize(
"clf, cv",
[
pytest.param(LinearSVC(C=1), 2),
pytest.param(LinearSVC(C=1), "prefit"),
],
)
def test_calibration_attributes(clf, cv):
# Check that `n_features_in_` and `classes_` attributes created properly
X, y = make_classification(n_samples=10, n_features=5, n_classes=2, random_state=7)
if cv == "prefit":
clf = clf.fit(X, y)
calib_clf = CalibratedClassifierCV(clf, cv=cv)
calib_clf.fit(X, y)
if cv == "prefit":
assert_array_equal(calib_clf.classes_, clf.classes_)
assert calib_clf.n_features_in_ == clf.n_features_in_
else:
classes = LabelEncoder().fit(y).classes_
assert_array_equal(calib_clf.classes_, classes)
assert calib_clf.n_features_in_ == X.shape[1]
def test_calibration_inconsistent_prefit_n_features_in():
# Check that `n_features_in_` from prefit base estimator
# is consistent with training set
X, y = make_classification(n_samples=10, n_features=5, n_classes=2, random_state=7)
clf = LinearSVC(C=1).fit(X, y)
calib_clf = CalibratedClassifierCV(clf, cv="prefit")
msg = "X has 3 features, but LinearSVC is expecting 5 features as input."
with pytest.raises(ValueError, match=msg):
calib_clf.fit(X[:, :3], y)
def test_calibration_votingclassifier():
# Check that `CalibratedClassifier` works with `VotingClassifier`.
# The method `predict_proba` from `VotingClassifier` is dynamically
# defined via a property that only works when voting="soft".
X, y = make_classification(n_samples=10, n_features=5, n_classes=2, random_state=7)
vote = VotingClassifier(
estimators=[("lr" + str(i), LogisticRegression()) for i in range(3)],
voting="soft",
)
vote.fit(X, y)
calib_clf = CalibratedClassifierCV(estimator=vote, cv="prefit")
# smoke test: should not raise an error
calib_clf.fit(X, y)
@pytest.fixture(scope="module")
def iris_data():
return load_iris(return_X_y=True)
@pytest.fixture(scope="module")
def iris_data_binary(iris_data):
X, y = iris_data
return X[y < 2], y[y < 2]
def test_calibration_display_validation(pyplot, iris_data, iris_data_binary):
X, y = iris_data
X_binary, y_binary = iris_data_binary
reg = LinearRegression().fit(X, y)
msg = "'estimator' should be a fitted classifier"
with pytest.raises(ValueError, match=msg):
CalibrationDisplay.from_estimator(reg, X, y)
clf = LinearSVC().fit(X, y)
msg = "response method predict_proba is not defined in"
with pytest.raises(ValueError, match=msg):
CalibrationDisplay.from_estimator(clf, X, y)
clf = LogisticRegression()
with pytest.raises(NotFittedError):
CalibrationDisplay.from_estimator(clf, X, y)
@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"])
def test_calibration_display_non_binary(pyplot, iris_data, constructor_name):
X, y = iris_data
clf = DecisionTreeClassifier()
clf.fit(X, y)
y_prob = clf.predict_proba(X)
if constructor_name == "from_estimator":
msg = "to be a binary classifier, but got"
with pytest.raises(ValueError, match=msg):
CalibrationDisplay.from_estimator(clf, X, y)
else:
msg = "y should be a 1d array, got an array of shape"
with pytest.raises(ValueError, match=msg):
CalibrationDisplay.from_predictions(y, y_prob)
@pytest.mark.parametrize("n_bins", [5, 10])
@pytest.mark.parametrize("strategy", ["uniform", "quantile"])
def test_calibration_display_compute(pyplot, iris_data_binary, n_bins, strategy):
# Ensure `CalibrationDisplay.from_predictions` and `calibration_curve`
# compute the same results. Also checks attributes of the
# CalibrationDisplay object.
X, y = iris_data_binary
lr = LogisticRegression().fit(X, y)
viz = CalibrationDisplay.from_estimator(
lr, X, y, n_bins=n_bins, strategy=strategy, alpha=0.8
)
y_prob = lr.predict_proba(X)[:, 1]
prob_true, prob_pred = calibration_curve(
y, y_prob, n_bins=n_bins, strategy=strategy
)
assert_allclose(viz.prob_true, prob_true)
assert_allclose(viz.prob_pred, prob_pred)
assert_allclose(viz.y_prob, y_prob)
assert viz.estimator_name == "LogisticRegression"
# cannot fail thanks to pyplot fixture
import matplotlib as mpl # noqa
assert isinstance(viz.line_, mpl.lines.Line2D)
assert viz.line_.get_alpha() == 0.8
assert isinstance(viz.ax_, mpl.axes.Axes)
assert isinstance(viz.figure_, mpl.figure.Figure)
assert viz.ax_.get_xlabel() == "Mean predicted probability (Positive class: 1)"
assert viz.ax_.get_ylabel() == "Fraction of positives (Positive class: 1)"
expected_legend_labels = ["LogisticRegression", "Perfectly calibrated"]
legend_labels = viz.ax_.get_legend().get_texts()
assert len(legend_labels) == len(expected_legend_labels)
for labels in legend_labels:
assert labels.get_text() in expected_legend_labels
def test_plot_calibration_curve_pipeline(pyplot, iris_data_binary):
# Ensure pipelines are supported by CalibrationDisplay.from_estimator
X, y = iris_data_binary
clf = make_pipeline(StandardScaler(), LogisticRegression())
clf.fit(X, y)
viz = CalibrationDisplay.from_estimator(clf, X, y)
expected_legend_labels = [viz.estimator_name, "Perfectly calibrated"]
legend_labels = viz.ax_.get_legend().get_texts()
assert len(legend_labels) == len(expected_legend_labels)
for labels in legend_labels:
assert labels.get_text() in expected_legend_labels
@pytest.mark.parametrize(
"name, expected_label", [(None, "_line1"), ("my_est", "my_est")]
)
def test_calibration_display_default_labels(pyplot, name, expected_label):
prob_true = np.array([0, 1, 1, 0])
prob_pred = np.array([0.2, 0.8, 0.8, 0.4])
y_prob = np.array([])
viz = CalibrationDisplay(prob_true, prob_pred, y_prob, estimator_name=name)
viz.plot()
expected_legend_labels = [] if name is None else [name]
expected_legend_labels.append("Perfectly calibrated")
legend_labels = viz.ax_.get_legend().get_texts()
assert len(legend_labels) == len(expected_legend_labels)
for labels in legend_labels:
assert labels.get_text() in expected_legend_labels
def test_calibration_display_label_class_plot(pyplot):
# Checks that when instantiating `CalibrationDisplay` class then calling
# `plot`, `self.estimator_name` is the one given in `plot`
prob_true = np.array([0, 1, 1, 0])
prob_pred = np.array([0.2, 0.8, 0.8, 0.4])
y_prob = np.array([])
name = "name one"
viz = CalibrationDisplay(prob_true, prob_pred, y_prob, estimator_name=name)
assert viz.estimator_name == name
name = "name two"
viz.plot(name=name)
expected_legend_labels = [name, "Perfectly calibrated"]
legend_labels = viz.ax_.get_legend().get_texts()
assert len(legend_labels) == len(expected_legend_labels)
for labels in legend_labels:
assert labels.get_text() in expected_legend_labels
@pytest.mark.parametrize("constructor_name", ["from_estimator", "from_predictions"])
def test_calibration_display_name_multiple_calls(
constructor_name, pyplot, iris_data_binary
):
# Check that the `name` used when calling
# `CalibrationDisplay.from_predictions` or
# `CalibrationDisplay.from_estimator` is used when multiple
# `CalibrationDisplay.viz.plot()` calls are made.
X, y = iris_data_binary
clf_name = "my hand-crafted name"
clf = LogisticRegression().fit(X, y)
y_prob = clf.predict_proba(X)[:, 1]
constructor = getattr(CalibrationDisplay, constructor_name)
params = (clf, X, y) if constructor_name == "from_estimator" else (y, y_prob)
viz = constructor(*params, name=clf_name)
assert viz.estimator_name == clf_name
pyplot.close("all")
viz.plot()
expected_legend_labels = [clf_name, "Perfectly calibrated"]
legend_labels = viz.ax_.get_legend().get_texts()
assert len(legend_labels) == len(expected_legend_labels)
for labels in legend_labels:
assert labels.get_text() in expected_legend_labels
pyplot.close("all")
clf_name = "another_name"
viz.plot(name=clf_name)
assert len(legend_labels) == len(expected_legend_labels)
for labels in legend_labels:
assert labels.get_text() in expected_legend_labels
def test_calibration_display_ref_line(pyplot, iris_data_binary):
# Check that `ref_line` only appears once
X, y = iris_data_binary
lr = LogisticRegression().fit(X, y)
dt = DecisionTreeClassifier().fit(X, y)
viz = CalibrationDisplay.from_estimator(lr, X, y)
viz2 = CalibrationDisplay.from_estimator(dt, X, y, ax=viz.ax_)
labels = viz2.ax_.get_legend_handles_labels()[1]
assert labels.count("Perfectly calibrated") == 1
@pytest.mark.parametrize("dtype_y_str", [str, object])
def test_calibration_curve_pos_label_error_str(dtype_y_str):
"""Check error message when a `pos_label` is not specified with `str` targets."""
rng = np.random.RandomState(42)
y1 = np.array(["spam"] * 3 + ["eggs"] * 2, dtype=dtype_y_str)
y2 = rng.randint(0, 2, size=y1.size)
err_msg = (
"y_true takes value in {'eggs', 'spam'} and pos_label is not "
"specified: either make y_true take value in {0, 1} or {-1, 1} or "
"pass pos_label explicitly"
)
with pytest.raises(ValueError, match=err_msg):
calibration_curve(y1, y2)
@pytest.mark.parametrize("dtype_y_str", [str, object])
def test_calibration_curve_pos_label(dtype_y_str):
"""Check the behaviour when passing explicitly `pos_label`."""
y_true = np.array([0, 0, 0, 1, 1, 1, 1, 1, 1])
classes = np.array(["spam", "egg"], dtype=dtype_y_str)
y_true_str = classes[y_true]
y_pred = np.array([0.1, 0.2, 0.3, 0.4, 0.65, 0.7, 0.8, 0.9, 1.0])
# default case
prob_true, _ = calibration_curve(y_true, y_pred, n_bins=4)
assert_allclose(prob_true, [0, 0.5, 1, 1])
# if `y_true` contains `str`, then `pos_label` is required
prob_true, _ = calibration_curve(y_true_str, y_pred, n_bins=4, pos_label="egg")
assert_allclose(prob_true, [0, 0.5, 1, 1])
prob_true, _ = calibration_curve(y_true, 1 - y_pred, n_bins=4, pos_label=0)
assert_allclose(prob_true, [0, 0, 0.5, 1])
prob_true, _ = calibration_curve(y_true_str, 1 - y_pred, n_bins=4, pos_label="spam")
assert_allclose(prob_true, [0, 0, 0.5, 1])
@pytest.mark.parametrize("pos_label, expected_pos_label", [(None, 1), (0, 0), (1, 1)])
def test_calibration_display_pos_label(
pyplot, iris_data_binary, pos_label, expected_pos_label
):
"""Check the behaviour of `pos_label` in the `CalibrationDisplay`."""
X, y = iris_data_binary
lr = LogisticRegression().fit(X, y)
viz = CalibrationDisplay.from_estimator(lr, X, y, pos_label=pos_label)
y_prob = lr.predict_proba(X)[:, expected_pos_label]
prob_true, prob_pred = calibration_curve(y, y_prob, pos_label=pos_label)
assert_allclose(viz.prob_true, prob_true)
assert_allclose(viz.prob_pred, prob_pred)
assert_allclose(viz.y_prob, y_prob)
assert (
viz.ax_.get_xlabel()
== f"Mean predicted probability (Positive class: {expected_pos_label})"
)
assert (
viz.ax_.get_ylabel()
== f"Fraction of positives (Positive class: {expected_pos_label})"
)
expected_legend_labels = [lr.__class__.__name__, "Perfectly calibrated"]
legend_labels = viz.ax_.get_legend().get_texts()
assert len(legend_labels) == len(expected_legend_labels)
for labels in legend_labels:
assert labels.get_text() in expected_legend_labels
@pytest.mark.parametrize("method", ["sigmoid", "isotonic"])
@pytest.mark.parametrize("ensemble", [True, False])
def test_calibrated_classifier_cv_double_sample_weights_equivalence(method, ensemble):
"""Check that passing repeating twice the dataset `X` is equivalent to
passing a `sample_weight` with a factor 2."""
X, y = load_iris(return_X_y=True)
# Scale the data to avoid any convergence issue
X = StandardScaler().fit_transform(X)
# Only use 2 classes
X, y = X[:100], y[:100]
sample_weight = np.ones_like(y) * 2
# Interlace the data such that a 2-fold cross-validation will be equivalent
# to using the original dataset with a sample weights of 2
X_twice = np.zeros((X.shape[0] * 2, X.shape[1]), dtype=X.dtype)
X_twice[::2, :] = X
X_twice[1::2, :] = X
y_twice = np.zeros(y.shape[0] * 2, dtype=y.dtype)
y_twice[::2] = y
y_twice[1::2] = y
estimator = LogisticRegression()
calibrated_clf_without_weights = CalibratedClassifierCV(
estimator,
method=method,
ensemble=ensemble,
cv=2,
)
calibrated_clf_with_weights = clone(calibrated_clf_without_weights)
calibrated_clf_with_weights.fit(X, y, sample_weight=sample_weight)
calibrated_clf_without_weights.fit(X_twice, y_twice)
# Check that the underlying fitted estimators have the same coefficients
for est_with_weights, est_without_weights in zip(
calibrated_clf_with_weights.calibrated_classifiers_,
calibrated_clf_without_weights.calibrated_classifiers_,
):
assert_allclose(
est_with_weights.estimator.coef_,
est_without_weights.estimator.coef_,
)
# Check that the predictions are the same
y_pred_with_weights = calibrated_clf_with_weights.predict_proba(X)
y_pred_without_weights = calibrated_clf_without_weights.predict_proba(X)
assert_allclose(y_pred_with_weights, y_pred_without_weights)
@pytest.mark.parametrize("fit_params_type", ["list", "array"])
def test_calibration_with_fit_params(fit_params_type, data):
"""Tests that fit_params are passed to the underlying base estimator.
Non-regression test for:
https://github.com/scikit-learn/scikit-learn/issues/12384
"""
X, y = data
fit_params = {
"a": _convert_container(y, fit_params_type),
"b": _convert_container(y, fit_params_type),
}
clf = CheckingClassifier(expected_fit_params=["a", "b"])
pc_clf = CalibratedClassifierCV(clf)
pc_clf.fit(X, y, **fit_params)
@pytest.mark.parametrize(
"sample_weight",
[
[1.0] * N_SAMPLES,
np.ones(N_SAMPLES),
],
)
def test_calibration_with_sample_weight_base_estimator(sample_weight, data):
"""Tests that sample_weight is passed to the underlying base
estimator.
"""
X, y = data
clf = CheckingClassifier(expected_sample_weight=True)
pc_clf = CalibratedClassifierCV(clf)
pc_clf.fit(X, y, sample_weight=sample_weight)
def test_calibration_without_sample_weight_base_estimator(data):
"""Check that even if the estimator doesn't support
sample_weight, fitting with sample_weight still works.
There should be a warning, since the sample_weight is not passed
on to the estimator.
"""
X, y = data
sample_weight = np.ones_like(y)
class ClfWithoutSampleWeight(CheckingClassifier):
def fit(self, X, y, **fit_params):
assert "sample_weight" not in fit_params
return super().fit(X, y, **fit_params)
clf = ClfWithoutSampleWeight()
pc_clf = CalibratedClassifierCV(clf)
with pytest.warns(UserWarning):
pc_clf.fit(X, y, sample_weight=sample_weight)
def test_calibration_with_fit_params_inconsistent_length(data):
"""fit_params having different length than data should raise the
correct error message.
"""
X, y = data
fit_params = {"a": y[:5]}
clf = CheckingClassifier(expected_fit_params=fit_params)
pc_clf = CalibratedClassifierCV(clf)
msg = (
r"Found input variables with inconsistent numbers of "
r"samples: \[" + str(N_SAMPLES) + r", 5\]"
)
with pytest.raises(ValueError, match=msg):
pc_clf.fit(X, y, **fit_params)
@pytest.mark.parametrize("method", ["sigmoid", "isotonic"])
@pytest.mark.parametrize("ensemble", [True, False])
def test_calibrated_classifier_cv_zeros_sample_weights_equivalence(method, ensemble):
"""Check that passing removing some sample from the dataset `X` is
equivalent to passing a `sample_weight` with a factor 0."""
X, y = load_iris(return_X_y=True)
# Scale the data to avoid any convergence issue
X = StandardScaler().fit_transform(X)
# Only use 2 classes and select samples such that 2-fold cross-validation
# split will lead to an equivalence with a `sample_weight` of 0
X = np.vstack((X[:40], X[50:90]))
y = np.hstack((y[:40], y[50:90]))
sample_weight = np.zeros_like(y)
sample_weight[::2] = 1
estimator = LogisticRegression()
calibrated_clf_without_weights = CalibratedClassifierCV(
estimator,
method=method,
ensemble=ensemble,
cv=2,
)
calibrated_clf_with_weights = clone(calibrated_clf_without_weights)
calibrated_clf_with_weights.fit(X, y, sample_weight=sample_weight)
calibrated_clf_without_weights.fit(X[::2], y[::2])
# Check that the underlying fitted estimators have the same coefficients
for est_with_weights, est_without_weights in zip(
calibrated_clf_with_weights.calibrated_classifiers_,
calibrated_clf_without_weights.calibrated_classifiers_,
):
assert_allclose(
est_with_weights.estimator.coef_,
est_without_weights.estimator.coef_,
)
# Check that the predictions are the same
y_pred_with_weights = calibrated_clf_with_weights.predict_proba(X)
y_pred_without_weights = calibrated_clf_without_weights.predict_proba(X)
assert_allclose(y_pred_with_weights, y_pred_without_weights)
# TODO(1.4): Remove
def test_calibrated_classifier_error_base_estimator(data):
"""Check that we raise an error is a user set both `base_estimator` and
`estimator`."""
calibrated_classifier = CalibratedClassifierCV(
base_estimator=LogisticRegression(), estimator=LogisticRegression()
)
with pytest.raises(ValueError, match="Both `base_estimator` and `estimator`"):
calibrated_classifier.fit(*data)
# TODO(1.4): Remove
def test_calibrated_classifier_deprecation_base_estimator(data):
"""Check that we raise a warning regarding the deprecation of
`base_estimator`."""
calibrated_classifier = CalibratedClassifierCV(base_estimator=LogisticRegression())
warn_msg = "`base_estimator` was renamed to `estimator`"
with pytest.warns(FutureWarning, match=warn_msg):
calibrated_classifier.fit(*data)
| bsd-3-clause |
anntzer/scikit-learn | examples/cluster/plot_optics.py | 11 | 3572 | """
===================================
Demo of OPTICS clustering algorithm
===================================
.. currentmodule:: sklearn
Finds core samples of high density and expands clusters from them.
This example uses data that is generated so that the clusters have
different densities.
The :class:`~cluster.OPTICS` is first used with its Xi cluster detection
method, and then setting specific thresholds on the reachability, which
corresponds to :class:`~cluster.DBSCAN`. We can see that the different
clusters of OPTICS's Xi method can be recovered with different choices of
thresholds in DBSCAN.
"""
# Authors: Shane Grigsby <[email protected]>
# Adrin Jalali <[email protected]>
# License: BSD 3 clause
from sklearn.cluster import OPTICS, cluster_optics_dbscan
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
# Generate sample data
np.random.seed(0)
n_points_per_cluster = 250
C1 = [-5, -2] + 0.8 * np.random.randn(n_points_per_cluster, 2)
C2 = [4, -1] + 0.1 * np.random.randn(n_points_per_cluster, 2)
C3 = [1, -2] + 0.2 * np.random.randn(n_points_per_cluster, 2)
C4 = [-2, 3] + 0.3 * np.random.randn(n_points_per_cluster, 2)
C5 = [3, -2] + 1.6 * np.random.randn(n_points_per_cluster, 2)
C6 = [5, 6] + 2 * np.random.randn(n_points_per_cluster, 2)
X = np.vstack((C1, C2, C3, C4, C5, C6))
clust = OPTICS(min_samples=50, xi=0.05, min_cluster_size=0.05)
# Run the fit
clust.fit(X)
labels_050 = cluster_optics_dbscan(
reachability=clust.reachability_,
core_distances=clust.core_distances_,
ordering=clust.ordering_,
eps=0.5,
)
labels_200 = cluster_optics_dbscan(
reachability=clust.reachability_,
core_distances=clust.core_distances_,
ordering=clust.ordering_,
eps=2,
)
space = np.arange(len(X))
reachability = clust.reachability_[clust.ordering_]
labels = clust.labels_[clust.ordering_]
plt.figure(figsize=(10, 7))
G = gridspec.GridSpec(2, 3)
ax1 = plt.subplot(G[0, :])
ax2 = plt.subplot(G[1, 0])
ax3 = plt.subplot(G[1, 1])
ax4 = plt.subplot(G[1, 2])
# Reachability plot
colors = ["g.", "r.", "b.", "y.", "c."]
for klass, color in zip(range(0, 5), colors):
Xk = space[labels == klass]
Rk = reachability[labels == klass]
ax1.plot(Xk, Rk, color, alpha=0.3)
ax1.plot(space[labels == -1], reachability[labels == -1], "k.", alpha=0.3)
ax1.plot(space, np.full_like(space, 2.0, dtype=float), "k-", alpha=0.5)
ax1.plot(space, np.full_like(space, 0.5, dtype=float), "k-.", alpha=0.5)
ax1.set_ylabel("Reachability (epsilon distance)")
ax1.set_title("Reachability Plot")
# OPTICS
colors = ["g.", "r.", "b.", "y.", "c."]
for klass, color in zip(range(0, 5), colors):
Xk = X[clust.labels_ == klass]
ax2.plot(Xk[:, 0], Xk[:, 1], color, alpha=0.3)
ax2.plot(X[clust.labels_ == -1, 0], X[clust.labels_ == -1, 1], "k+", alpha=0.1)
ax2.set_title("Automatic Clustering\nOPTICS")
# DBSCAN at 0.5
colors = ["g", "greenyellow", "olive", "r", "b", "c"]
for klass, color in zip(range(0, 6), colors):
Xk = X[labels_050 == klass]
ax3.plot(Xk[:, 0], Xk[:, 1], color, alpha=0.3, marker=".")
ax3.plot(X[labels_050 == -1, 0], X[labels_050 == -1, 1], "k+", alpha=0.1)
ax3.set_title("Clustering at 0.5 epsilon cut\nDBSCAN")
# DBSCAN at 2.
colors = ["g.", "m.", "y.", "c."]
for klass, color in zip(range(0, 4), colors):
Xk = X[labels_200 == klass]
ax4.plot(Xk[:, 0], Xk[:, 1], color, alpha=0.3)
ax4.plot(X[labels_200 == -1, 0], X[labels_200 == -1, 1], "k+", alpha=0.1)
ax4.set_title("Clustering at 2.0 epsilon cut\nDBSCAN")
plt.tight_layout()
plt.show()
| bsd-3-clause |
pytorch/fairseq | fairseq/data/audio/speech_to_text_dataset.py | 1 | 19717 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import csv
import io
import logging
import re
from collections import defaultdict
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, List, Optional
import numpy as np
import torch
import torch.nn.functional as F
from fairseq.data import ConcatDataset, Dictionary, FairseqDataset, ResamplingDataset
from fairseq.data import data_utils as fairseq_data_utils
from fairseq.data.audio.audio_utils import (
FEATURE_OR_SF_AUDIO_FILE_EXTENSIONS,
get_fbank,
get_waveform,
is_npy_data,
is_sf_audio_data,
parse_path,
read_from_stored_zip,
)
from fairseq.data.audio.data_cfg import S2TDataConfig
from fairseq.data.audio.feature_transforms import CompositeAudioFeatureTransform
logger = logging.getLogger(__name__)
def get_features_from_npy_or_audio(path):
ext = Path(path).suffix
if ext not in FEATURE_OR_SF_AUDIO_FILE_EXTENSIONS:
raise ValueError(f'Unsupported file format for "{path}"')
return np.load(path) if ext == ".npy" else get_fbank(path)
def get_features_or_waveform_from_stored_zip(
path,
byte_offset,
byte_size,
need_waveform=False,
use_sample_rate=None,
):
assert path.endswith(".zip")
data = read_from_stored_zip(path, byte_offset, byte_size)
f = io.BytesIO(data)
if is_npy_data(data):
features_or_waveform = np.load(f)
elif is_sf_audio_data(data):
features_or_waveform = (
get_waveform(f, always_2d=False, output_sample_rate=use_sample_rate)[0]
if need_waveform
else get_fbank(f)
)
else:
raise ValueError(f'Unknown file format for "{path}"')
return features_or_waveform
def get_features_or_waveform(path: str, need_waveform=False, use_sample_rate=None):
"""Get speech features from .npy file or waveform from .wav/.flac file.
The file may be inside an uncompressed ZIP file and is accessed via byte
offset and length.
Args:
path (str): File path in the format of "<.npy/.wav/.flac path>" or
"<zip path>:<byte offset>:<byte length>".
need_waveform (bool): return waveform instead of features.
use_sample_rate (int): change sample rate for the input wave file
Returns:
features_or_waveform (numpy.ndarray): speech features or waveform.
"""
_path, slice_ptr = parse_path(path)
if len(slice_ptr) == 0:
if need_waveform:
return get_waveform(
_path, always_2d=False, output_sample_rate=use_sample_rate
)[0]
return get_features_from_npy_or_audio(_path)
elif len(slice_ptr) == 2:
features_or_waveform = get_features_or_waveform_from_stored_zip(
_path,
slice_ptr[0],
slice_ptr[1],
need_waveform=need_waveform,
use_sample_rate=use_sample_rate,
)
else:
raise ValueError(f"Invalid path: {path}")
return features_or_waveform
def _collate_frames(
frames: List[torch.Tensor], is_audio_input: bool = False
) -> torch.Tensor:
"""
Convert a list of 2D frames into a padded 3D tensor
Args:
frames (list): list of 2D frames of size L[i]*f_dim. Where L[i] is
length of i-th frame and f_dim is static dimension of features
Returns:
3D tensor of size len(frames)*len_max*f_dim where len_max is max of L[i]
"""
max_len = max(frame.size(0) for frame in frames)
if is_audio_input:
out = frames[0].new_zeros((len(frames), max_len))
else:
out = frames[0].new_zeros((len(frames), max_len, frames[0].size(1)))
for i, v in enumerate(frames):
out[i, : v.size(0)] = v
return out
@dataclass
class SpeechToTextDatasetItem(object):
index: int
source: torch.Tensor
target: Optional[torch.Tensor] = None
speaker_id: Optional[int] = None
class SpeechToTextDataset(FairseqDataset):
LANG_TAG_TEMPLATE = "<lang:{}>"
def __init__(
self,
split: str,
is_train_split: bool,
cfg: S2TDataConfig,
audio_paths: List[str],
n_frames: List[int],
src_texts: Optional[List[str]] = None,
tgt_texts: Optional[List[str]] = None,
speakers: Optional[List[str]] = None,
src_langs: Optional[List[str]] = None,
tgt_langs: Optional[List[str]] = None,
ids: Optional[List[str]] = None,
tgt_dict: Optional[Dictionary] = None,
pre_tokenizer=None,
bpe_tokenizer=None,
n_frames_per_step=1,
speaker_to_id=None,
append_eos=True,
):
self.split, self.is_train_split = split, is_train_split
self.cfg = cfg
self.audio_paths, self.n_frames = audio_paths, n_frames
self.n_samples = len(audio_paths)
assert len(n_frames) == self.n_samples > 0
assert src_texts is None or len(src_texts) == self.n_samples
assert tgt_texts is None or len(tgt_texts) == self.n_samples
assert speakers is None or len(speakers) == self.n_samples
assert src_langs is None or len(src_langs) == self.n_samples
assert tgt_langs is None or len(tgt_langs) == self.n_samples
assert ids is None or len(ids) == self.n_samples
assert (tgt_dict is None and tgt_texts is None) or (
tgt_dict is not None and tgt_texts is not None
)
self.src_texts, self.tgt_texts = src_texts, tgt_texts
self.src_langs, self.tgt_langs = src_langs, tgt_langs
self.speakers = speakers
self.tgt_dict = tgt_dict
self.check_tgt_lang_tag()
self.ids = ids
self.shuffle = cfg.shuffle if is_train_split else False
self.feature_transforms = CompositeAudioFeatureTransform.from_config_dict(
self.cfg.get_feature_transforms(split, is_train_split)
)
self.pre_tokenizer = pre_tokenizer
self.bpe_tokenizer = bpe_tokenizer
self.n_frames_per_step = n_frames_per_step
self.speaker_to_id = speaker_to_id
self.tgt_lens = self.get_tgt_lens_and_check_oov()
self.append_eos = append_eos
logger.info(self.__repr__())
def get_tgt_lens_and_check_oov(self):
if self.tgt_texts is None:
return [0 for _ in range(self.n_samples)]
tgt_lens = []
n_tokens, n_oov_tokens = 0, 0
for i in range(self.n_samples):
tokenized = self.get_tokenized_tgt_text(i).split(" ")
oov_tokens = [
t
for t in tokenized
if self.tgt_dict.index(t) == self.tgt_dict.unk_index
]
n_tokens += len(tokenized)
n_oov_tokens += len(oov_tokens)
tgt_lens.append(len(tokenized))
logger.info(f"'{self.split}' has {n_oov_tokens / n_tokens * 100:.2f}% OOV")
return tgt_lens
def __repr__(self):
return (
self.__class__.__name__
+ f'(split="{self.split}", n_samples={self.n_samples:_}, '
f"prepend_tgt_lang_tag={self.cfg.prepend_tgt_lang_tag}, "
f"shuffle={self.shuffle}, transforms={self.feature_transforms}, "
f"n_frames_per_step={self.n_frames_per_step}"
)
@classmethod
def is_lang_tag(cls, token):
pattern = cls.LANG_TAG_TEMPLATE.replace("{}", "(.*)")
return re.match(pattern, token)
def check_tgt_lang_tag(self):
if self.cfg.prepend_tgt_lang_tag:
assert self.tgt_langs is not None and self.tgt_dict is not None
tgt_lang_tags = [
self.LANG_TAG_TEMPLATE.format(t) for t in set(self.tgt_langs)
]
assert all(t in self.tgt_dict for t in tgt_lang_tags)
@classmethod
def tokenize(cls, tokenizer, text: str):
return text if tokenizer is None else tokenizer.encode(text)
def get_tokenized_tgt_text(self, index: int):
text = self.tokenize(self.pre_tokenizer, self.tgt_texts[index])
text = self.tokenize(self.bpe_tokenizer, text)
return text
def pack_frames(self, feature: torch.Tensor):
if self.n_frames_per_step == 1:
return feature
n_packed_frames = feature.shape[0] // self.n_frames_per_step
feature = feature[: self.n_frames_per_step * n_packed_frames]
return feature.reshape(n_packed_frames, -1)
@classmethod
def get_lang_tag_idx(cls, lang: str, dictionary: Dictionary):
lang_tag_idx = dictionary.index(cls.LANG_TAG_TEMPLATE.format(lang))
assert lang_tag_idx != dictionary.unk()
return lang_tag_idx
def _get_source_audio(self, index: int) -> torch.Tensor:
source = get_features_or_waveform(
self.audio_paths[index],
need_waveform=self.cfg.use_audio_input,
use_sample_rate=self.cfg.use_sample_rate,
)
if self.cfg.use_audio_input:
source = torch.from_numpy(source).float()
if self.cfg.standardize_audio:
with torch.no_grad():
source = F.layer_norm(source, source.shape)
else:
if self.feature_transforms is not None:
source = self.feature_transforms(source)
source = torch.from_numpy(source).float()
return source
def __getitem__(self, index: int) -> SpeechToTextDatasetItem:
source = self._get_source_audio(index)
source = self.pack_frames(source)
target = None
if self.tgt_texts is not None:
tokenized = self.get_tokenized_tgt_text(index)
target = self.tgt_dict.encode_line(
tokenized, add_if_not_exist=False, append_eos=self.append_eos
).long()
if self.cfg.prepend_tgt_lang_tag:
lang_tag_idx = self.get_lang_tag_idx(
self.tgt_langs[index], self.tgt_dict
)
target = torch.cat((torch.LongTensor([lang_tag_idx]), target), 0)
if self.cfg.prepend_bos_and_append_tgt_lang_tag:
bos = torch.LongTensor([self.tgt_dict.bos()])
lang_tag_idx = self.get_lang_tag_idx(self.tgt_langs[index], self.tgt_dict)
assert lang_tag_idx != self.tgt_dict.unk()
lang_tag_idx = torch.LongTensor([lang_tag_idx])
target = torch.cat((bos, target, lang_tag_idx), 0)
speaker_id = None
if self.speaker_to_id is not None:
speaker_id = self.speaker_to_id[self.speakers[index]]
return SpeechToTextDatasetItem(
index=index, source=source, target=target, speaker_id=speaker_id
)
def __len__(self):
return self.n_samples
def collater(
self, samples: List[SpeechToTextDatasetItem], return_order: bool = False
) -> Dict:
if len(samples) == 0:
return {}
indices = torch.tensor([x.index for x in samples], dtype=torch.long)
frames = _collate_frames([x.source for x in samples], self.cfg.use_audio_input)
# sort samples by descending number of frames
n_frames = torch.tensor([x.source.size(0) for x in samples], dtype=torch.long)
n_frames, order = n_frames.sort(descending=True)
indices = indices.index_select(0, order)
frames = frames.index_select(0, order)
target, target_lengths = None, None
prev_output_tokens = None
ntokens = None
if self.tgt_texts is not None:
target = fairseq_data_utils.collate_tokens(
[x.target for x in samples],
self.tgt_dict.pad(),
self.tgt_dict.eos(),
left_pad=False,
move_eos_to_beginning=False,
)
target = target.index_select(0, order)
target_lengths = torch.tensor(
[x.target.size(0) for x in samples], dtype=torch.long
).index_select(0, order)
prev_output_tokens = fairseq_data_utils.collate_tokens(
[x.target for x in samples],
self.tgt_dict.pad(),
eos_idx=None,
left_pad=False,
move_eos_to_beginning=True,
)
prev_output_tokens = prev_output_tokens.index_select(0, order)
ntokens = sum(x.target.size(0) for x in samples)
speaker = None
if self.speaker_to_id is not None:
speaker = (
torch.tensor([s.speaker_id for s in samples], dtype=torch.long)
.index_select(0, order)
.view(-1, 1)
)
net_input = {
"src_tokens": frames,
"src_lengths": n_frames,
"prev_output_tokens": prev_output_tokens,
}
out = {
"id": indices,
"net_input": net_input,
"speaker": speaker,
"target": target,
"target_lengths": target_lengths,
"ntokens": ntokens,
"nsentences": len(samples),
}
if return_order:
out["order"] = order
return out
def num_tokens(self, index):
return self.n_frames[index]
def size(self, index):
return self.n_frames[index], self.tgt_lens[index]
@property
def sizes(self):
return np.array(self.n_frames)
@property
def can_reuse_epoch_itr_across_epochs(self):
return True
def ordered_indices(self):
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
# first by descending order of # of frames then by original/random order
order.append([-n for n in self.n_frames])
return np.lexsort(order)
def prefetch(self, indices):
raise False
class SpeechToTextDatasetCreator(object):
# mandatory columns
KEY_ID, KEY_AUDIO, KEY_N_FRAMES = "id", "audio", "n_frames"
KEY_TGT_TEXT = "tgt_text"
# optional columns
KEY_SPEAKER, KEY_SRC_TEXT = "speaker", "src_text"
KEY_SRC_LANG, KEY_TGT_LANG = "src_lang", "tgt_lang"
# default values
DEFAULT_SPEAKER = DEFAULT_SRC_TEXT = DEFAULT_LANG = ""
@classmethod
def _from_list(
cls,
split_name: str,
is_train_split,
samples: List[Dict],
cfg: S2TDataConfig,
tgt_dict,
pre_tokenizer,
bpe_tokenizer,
n_frames_per_step,
speaker_to_id,
) -> SpeechToTextDataset:
audio_root = Path(cfg.audio_root)
ids = [s[cls.KEY_ID] for s in samples]
audio_paths = [(audio_root / s[cls.KEY_AUDIO]).as_posix() for s in samples]
n_frames = [int(s[cls.KEY_N_FRAMES]) for s in samples]
tgt_texts = [s[cls.KEY_TGT_TEXT] for s in samples]
src_texts = [s.get(cls.KEY_SRC_TEXT, cls.DEFAULT_SRC_TEXT) for s in samples]
speakers = [s.get(cls.KEY_SPEAKER, cls.DEFAULT_SPEAKER) for s in samples]
src_langs = [s.get(cls.KEY_SRC_LANG, cls.DEFAULT_LANG) for s in samples]
tgt_langs = [s.get(cls.KEY_TGT_LANG, cls.DEFAULT_LANG) for s in samples]
return SpeechToTextDataset(
split_name,
is_train_split,
cfg,
audio_paths,
n_frames,
src_texts=src_texts,
tgt_texts=tgt_texts,
speakers=speakers,
src_langs=src_langs,
tgt_langs=tgt_langs,
ids=ids,
tgt_dict=tgt_dict,
pre_tokenizer=pre_tokenizer,
bpe_tokenizer=bpe_tokenizer,
n_frames_per_step=n_frames_per_step,
speaker_to_id=speaker_to_id,
)
@classmethod
def get_size_ratios(
cls, datasets: List[SpeechToTextDataset], alpha: float = 1.0
) -> List[float]:
"""Size ratios for temperature-based sampling
(https://arxiv.org/abs/1907.05019)"""
id_to_lp, lp_to_sz = {}, defaultdict(int)
for ds in datasets:
lang_pairs = {f"{s}->{t}" for s, t in zip(ds.src_langs, ds.tgt_langs)}
assert len(lang_pairs) == 1
lang_pair = list(lang_pairs)[0]
id_to_lp[ds.split] = lang_pair
lp_to_sz[lang_pair] += sum(ds.n_frames)
sz_sum = sum(v for v in lp_to_sz.values())
lp_to_prob = {k: v / sz_sum for k, v in lp_to_sz.items()}
lp_to_tgt_prob = {k: v**alpha for k, v in lp_to_prob.items()}
prob_sum = sum(v for v in lp_to_tgt_prob.values())
lp_to_tgt_prob = {k: v / prob_sum for k, v in lp_to_tgt_prob.items()}
lp_to_sz_ratio = {
k: (lp_to_tgt_prob[k] * sz_sum) / v for k, v in lp_to_sz.items()
}
size_ratio = [lp_to_sz_ratio[id_to_lp[ds.split]] for ds in datasets]
p_formatted = {
k: f"{lp_to_prob[k]:.3f}->{lp_to_tgt_prob[k]:.3f}" for k in lp_to_sz
}
logger.info(f"sampling probability balancing: {p_formatted}")
sr_formatted = {ds.split: f"{r:.3f}" for ds, r in zip(datasets, size_ratio)}
logger.info(f"balanced sampling size ratio: {sr_formatted}")
return size_ratio
@classmethod
def _load_samples_from_tsv(cls, root: str, split: str):
tsv_path = Path(root) / f"{split}.tsv"
if not tsv_path.is_file():
raise FileNotFoundError(f"Dataset not found: {tsv_path}")
with open(tsv_path) as f:
reader = csv.DictReader(
f,
delimiter="\t",
quotechar=None,
doublequote=False,
lineterminator="\n",
quoting=csv.QUOTE_NONE,
)
samples = [dict(e) for e in reader]
if len(samples) == 0:
raise ValueError(f"Empty manifest: {tsv_path}")
return samples
@classmethod
def _from_tsv(
cls,
root: str,
cfg: S2TDataConfig,
split: str,
tgt_dict,
is_train_split: bool,
pre_tokenizer,
bpe_tokenizer,
n_frames_per_step,
speaker_to_id,
) -> SpeechToTextDataset:
samples = cls._load_samples_from_tsv(root, split)
return cls._from_list(
split,
is_train_split,
samples,
cfg,
tgt_dict,
pre_tokenizer,
bpe_tokenizer,
n_frames_per_step,
speaker_to_id,
)
@classmethod
def from_tsv(
cls,
root: str,
cfg: S2TDataConfig,
splits: str,
tgt_dict,
pre_tokenizer,
bpe_tokenizer,
is_train_split: bool,
epoch: int,
seed: int,
n_frames_per_step: int = 1,
speaker_to_id=None,
) -> SpeechToTextDataset:
datasets = [
cls._from_tsv(
root,
cfg,
split,
tgt_dict,
is_train_split,
pre_tokenizer,
bpe_tokenizer,
n_frames_per_step,
speaker_to_id,
)
for split in splits.split(",")
]
if is_train_split and len(datasets) > 1 and cfg.sampling_alpha != 1.0:
# temperature-based sampling
size_ratios = cls.get_size_ratios(datasets, alpha=cfg.sampling_alpha)
datasets = [
ResamplingDataset(
d, size_ratio=r, seed=seed, epoch=epoch, replace=(r >= 1.0)
)
for r, d in zip(size_ratios, datasets)
]
return ConcatDataset(datasets) if len(datasets) > 1 else datasets[0]
| mit |
florian-f/sklearn | examples/cluster/plot_kmeans_digits.py | 4 | 4494 | """
===========================================================
A demo of K-Means clustering on the handwritten digits data
===========================================================
In this example with compare the various initialization strategies for
K-means in terms of runtime and quality of the results.
As the ground truth is known here, we also apply different cluster
quality metrics to judge the goodness of fit of the cluster labels to the
ground truth.
Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for
definitions and discussions of the metrics):
=========== ========================================================
Shorthand full name
=========== ========================================================
homo homogeneity score
compl completeness score
v-meas V measure
ARI adjusted Rand index
AMI adjusted mutual information
silhouette silhouette coefficient
=========== ========================================================
"""
print(__doc__)
from time import time
import numpy as np
import pylab as pl
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(79 * '_')
print('% 9s' % 'init'
' time inertia homo compl v-meas ARI AMI silhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(79 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will asign a color to each
x_min, x_max = reduced_data[:, 0].min() + 1, reduced_data[:, 0].max() - 1
y_min, y_max = reduced_data[:, 1].min() + 1, reduced_data[:, 1].max() - 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
pl.figure(1)
pl.clf()
pl.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=pl.cm.Paired,
aspect='auto', origin='lower')
pl.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
pl.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
pl.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
pl.xlim(x_min, x_max)
pl.ylim(y_min, y_max)
pl.xticks(())
pl.yticks(())
pl.show()
| bsd-3-clause |
florian-f/sklearn | sklearn/decomposition/tests/test_factor_analysis.py | 7 | 1674 | # Author: Christian Osendorfer <[email protected]>
# Alexandre Gramfort <[email protected]>
# Licence: BSD3
import numpy as np
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_almost_equal
from sklearn.decomposition import FactorAnalysis
def test_factor_analysis():
"""Test FactorAnalysis ability to recover the data covariance structure
"""
rng = np.random.RandomState(0)
n_samples, n_features, n_components = 20, 5, 3
# Some random settings for the generative model
W = rng.randn(n_components, n_features)
# latent variable of dim 3, 20 of it
h = rng.randn(n_samples, n_components)
# using gamma to model different noise variance
# per component
noise = rng.gamma(1, size=n_features) * rng.randn(n_samples, n_features)
# generate observations
# wlog, mean is 0
X = np.dot(h, W) + noise
fa = FactorAnalysis(n_components=n_components)
fa.fit(X)
X_t = fa.transform(X)
assert_true(X_t.shape == (n_samples, n_components))
assert_almost_equal(fa.loglike_[-1], fa.score(X).sum())
# Make log likelihood increases at each iteration
assert_true(np.all(np.diff(fa.loglike_) > 0.))
# Sample Covariance
scov = np.cov(X, rowvar=0., bias=1.)
# Model Covariance
mcov = fa.get_covariance()
diff = np.sum(np.abs(scov - mcov)) / W.size
assert_true(diff < 0.1, "Mean absolute difference is %f" % diff)
fa = FactorAnalysis(n_components=n_components,
noise_variance_init=np.ones(n_features))
assert_raises(ValueError, fa.fit, X[:, :2])
| bsd-3-clause |
anntzer/scikit-learn | sklearn/linear_model/tests/test_huber.py | 17 | 7463 | # Authors: Manoj Kumar [email protected]
# License: BSD 3 clause
import numpy as np
from scipy import optimize, sparse
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.datasets import make_regression
from sklearn.linear_model import HuberRegressor, LinearRegression, SGDRegressor, Ridge
from sklearn.linear_model._huber import _huber_loss_and_gradient
def make_regression_with_outliers(n_samples=50, n_features=20):
rng = np.random.RandomState(0)
# Generate data with outliers by replacing 10% of the samples with noise.
X, y = make_regression(
n_samples=n_samples, n_features=n_features, random_state=0, noise=0.05
)
# Replace 10% of the sample with noise.
num_noise = int(0.1 * n_samples)
random_samples = rng.randint(0, n_samples, num_noise)
X[random_samples, :] = 2.0 * rng.normal(0, 1, (num_noise, X.shape[1]))
return X, y
def test_huber_equals_lr_for_high_epsilon():
# Test that Ridge matches LinearRegression for large epsilon
X, y = make_regression_with_outliers()
lr = LinearRegression()
lr.fit(X, y)
huber = HuberRegressor(epsilon=1e3, alpha=0.0)
huber.fit(X, y)
assert_almost_equal(huber.coef_, lr.coef_, 3)
assert_almost_equal(huber.intercept_, lr.intercept_, 2)
def test_huber_max_iter():
X, y = make_regression_with_outliers()
huber = HuberRegressor(max_iter=1)
huber.fit(X, y)
assert huber.n_iter_ == huber.max_iter
def test_huber_gradient():
# Test that the gradient calculated by _huber_loss_and_gradient is correct
rng = np.random.RandomState(1)
X, y = make_regression_with_outliers()
sample_weight = rng.randint(1, 3, (y.shape[0]))
def loss_func(x, *args):
return _huber_loss_and_gradient(x, *args)[0]
def grad_func(x, *args):
return _huber_loss_and_gradient(x, *args)[1]
# Check using optimize.check_grad that the gradients are equal.
for _ in range(5):
# Check for both fit_intercept and otherwise.
for n_features in [X.shape[1] + 1, X.shape[1] + 2]:
w = rng.randn(n_features)
w[-1] = np.abs(w[-1])
grad_same = optimize.check_grad(
loss_func, grad_func, w, X, y, 0.01, 0.1, sample_weight
)
assert_almost_equal(grad_same, 1e-6, 4)
def test_huber_sample_weights():
# Test sample_weights implementation in HuberRegressor"""
X, y = make_regression_with_outliers()
huber = HuberRegressor()
huber.fit(X, y)
huber_coef = huber.coef_
huber_intercept = huber.intercept_
# Rescale coefs before comparing with assert_array_almost_equal to make
# sure that the number of decimal places used is somewhat insensitive to
# the amplitude of the coefficients and therefore to the scale of the
# data and the regularization parameter
scale = max(np.mean(np.abs(huber.coef_)), np.mean(np.abs(huber.intercept_)))
huber.fit(X, y, sample_weight=np.ones(y.shape[0]))
assert_array_almost_equal(huber.coef_ / scale, huber_coef / scale)
assert_array_almost_equal(huber.intercept_ / scale, huber_intercept / scale)
X, y = make_regression_with_outliers(n_samples=5, n_features=20)
X_new = np.vstack((X, np.vstack((X[1], X[1], X[3]))))
y_new = np.concatenate((y, [y[1]], [y[1]], [y[3]]))
huber.fit(X_new, y_new)
huber_coef = huber.coef_
huber_intercept = huber.intercept_
sample_weight = np.ones(X.shape[0])
sample_weight[1] = 3
sample_weight[3] = 2
huber.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(huber.coef_ / scale, huber_coef / scale)
assert_array_almost_equal(huber.intercept_ / scale, huber_intercept / scale)
# Test sparse implementation with sample weights.
X_csr = sparse.csr_matrix(X)
huber_sparse = HuberRegressor()
huber_sparse.fit(X_csr, y, sample_weight=sample_weight)
assert_array_almost_equal(huber_sparse.coef_ / scale, huber_coef / scale)
def test_huber_sparse():
X, y = make_regression_with_outliers()
huber = HuberRegressor(alpha=0.1)
huber.fit(X, y)
X_csr = sparse.csr_matrix(X)
huber_sparse = HuberRegressor(alpha=0.1)
huber_sparse.fit(X_csr, y)
assert_array_almost_equal(huber_sparse.coef_, huber.coef_)
assert_array_equal(huber.outliers_, huber_sparse.outliers_)
def test_huber_scaling_invariant():
# Test that outliers filtering is scaling independent.
X, y = make_regression_with_outliers()
huber = HuberRegressor(fit_intercept=False, alpha=0.0)
huber.fit(X, y)
n_outliers_mask_1 = huber.outliers_
assert not np.all(n_outliers_mask_1)
huber.fit(X, 2.0 * y)
n_outliers_mask_2 = huber.outliers_
assert_array_equal(n_outliers_mask_2, n_outliers_mask_1)
huber.fit(2.0 * X, 2.0 * y)
n_outliers_mask_3 = huber.outliers_
assert_array_equal(n_outliers_mask_3, n_outliers_mask_1)
def test_huber_and_sgd_same_results():
# Test they should converge to same coefficients for same parameters
X, y = make_regression_with_outliers(n_samples=10, n_features=2)
# Fit once to find out the scale parameter. Scale down X and y by scale
# so that the scale parameter is optimized to 1.0
huber = HuberRegressor(fit_intercept=False, alpha=0.0, epsilon=1.35)
huber.fit(X, y)
X_scale = X / huber.scale_
y_scale = y / huber.scale_
huber.fit(X_scale, y_scale)
assert_almost_equal(huber.scale_, 1.0, 3)
sgdreg = SGDRegressor(
alpha=0.0,
loss="huber",
shuffle=True,
random_state=0,
max_iter=10000,
fit_intercept=False,
epsilon=1.35,
tol=None,
)
sgdreg.fit(X_scale, y_scale)
assert_array_almost_equal(huber.coef_, sgdreg.coef_, 1)
def test_huber_warm_start():
X, y = make_regression_with_outliers()
huber_warm = HuberRegressor(alpha=1.0, max_iter=10000, warm_start=True, tol=1e-1)
huber_warm.fit(X, y)
huber_warm_coef = huber_warm.coef_.copy()
huber_warm.fit(X, y)
# SciPy performs the tol check after doing the coef updates, so
# these would be almost same but not equal.
assert_array_almost_equal(huber_warm.coef_, huber_warm_coef, 1)
assert huber_warm.n_iter_ == 0
def test_huber_better_r2_score():
# Test that huber returns a better r2 score than non-outliers"""
X, y = make_regression_with_outliers()
huber = HuberRegressor(alpha=0.01)
huber.fit(X, y)
linear_loss = np.dot(X, huber.coef_) + huber.intercept_ - y
mask = np.abs(linear_loss) < huber.epsilon * huber.scale_
huber_score = huber.score(X[mask], y[mask])
huber_outlier_score = huber.score(X[~mask], y[~mask])
# The Ridge regressor should be influenced by the outliers and hence
# give a worse score on the non-outliers as compared to the huber
# regressor.
ridge = Ridge(alpha=0.01)
ridge.fit(X, y)
ridge_score = ridge.score(X[mask], y[mask])
ridge_outlier_score = ridge.score(X[~mask], y[~mask])
assert huber_score > ridge_score
# The huber model should also fit poorly on the outliers.
assert ridge_outlier_score > huber_outlier_score
def test_huber_bool():
# Test that it does not crash with bool data
X, y = make_regression(n_samples=200, n_features=2, noise=4.0, random_state=0)
X_bool = X > 0
HuberRegressor().fit(X_bool, y)
| bsd-3-clause |
darshanthaker/nupic | examples/opf/experiments/anomaly/temporal/noisy_saw/description.py | 32 | 14305 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py'
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.clamodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [],
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': { 'f': { 'clipInput': True,
'fieldname': u'f',
'maxval': 520,
'minval': 0,
'n': 500,
'name': u'f',
'type': 'ScalarEncoder',
'w': 21}},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : True,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
# Classifier implementation selection.
'implementation': 'cpp',
'regionName' : 'CLAClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'clVerbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.0001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupic/cluster/database/StreamDef.json.
#
'dataset' : { u'info': u'cerebro_dummy',
u'streams': [ { u'columns': [u'*'],
u'info': u'test data',
u'source': u'file://'+os.path.join(os.path.dirname(__file__), 'data.csv'),
}
],
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
'iterationCount' : -1,
# A dictionary containing all the supplementary parameters for inference
"inferenceArgs":None,
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=u'f', metric='aae', inferenceElement='prediction', params={'window': 1000}),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*nupicScore.*'],
}
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| agpl-3.0 |