_kernel_pca.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569
  1. """Kernel Principal Components Analysis."""
  2. # Author: Mathieu Blondel <mathieu@mblondel.org>
  3. # Sylvain Marie <sylvain.marie@schneider-electric.com>
  4. # License: BSD 3 clause
  5. from numbers import Integral, Real
  6. import numpy as np
  7. from scipy import linalg
  8. from scipy.linalg import eigh
  9. from scipy.sparse.linalg import eigsh
  10. from ..base import (
  11. BaseEstimator,
  12. ClassNamePrefixFeaturesOutMixin,
  13. TransformerMixin,
  14. _fit_context,
  15. )
  16. from ..exceptions import NotFittedError
  17. from ..metrics.pairwise import pairwise_kernels
  18. from ..preprocessing import KernelCenterer
  19. from ..utils._arpack import _init_arpack_v0
  20. from ..utils._param_validation import Interval, StrOptions
  21. from ..utils.extmath import _randomized_eigsh, svd_flip
  22. from ..utils.validation import (
  23. _check_psd_eigenvalues,
  24. check_is_fitted,
  25. )
  26. class KernelPCA(ClassNamePrefixFeaturesOutMixin, TransformerMixin, BaseEstimator):
  27. """Kernel Principal component analysis (KPCA) [1]_.
  28. Non-linear dimensionality reduction through the use of kernels (see
  29. :ref:`metrics`).
  30. It uses the :func:`scipy.linalg.eigh` LAPACK implementation of the full SVD
  31. or the :func:`scipy.sparse.linalg.eigsh` ARPACK implementation of the
  32. truncated SVD, depending on the shape of the input data and the number of
  33. components to extract. It can also use a randomized truncated SVD by the
  34. method proposed in [3]_, see `eigen_solver`.
  35. Read more in the :ref:`User Guide <kernel_PCA>`.
  36. Parameters
  37. ----------
  38. n_components : int, default=None
  39. Number of components. If None, all non-zero components are kept.
  40. kernel : {'linear', 'poly', 'rbf', 'sigmoid', 'cosine', 'precomputed'} \
  41. or callable, default='linear'
  42. Kernel used for PCA.
  43. gamma : float, default=None
  44. Kernel coefficient for rbf, poly and sigmoid kernels. Ignored by other
  45. kernels. If ``gamma`` is ``None``, then it is set to ``1/n_features``.
  46. degree : int, default=3
  47. Degree for poly kernels. Ignored by other kernels.
  48. coef0 : float, default=1
  49. Independent term in poly and sigmoid kernels.
  50. Ignored by other kernels.
  51. kernel_params : dict, default=None
  52. Parameters (keyword arguments) and
  53. values for kernel passed as callable object.
  54. Ignored by other kernels.
  55. alpha : float, default=1.0
  56. Hyperparameter of the ridge regression that learns the
  57. inverse transform (when fit_inverse_transform=True).
  58. fit_inverse_transform : bool, default=False
  59. Learn the inverse transform for non-precomputed kernels
  60. (i.e. learn to find the pre-image of a point). This method is based
  61. on [2]_.
  62. eigen_solver : {'auto', 'dense', 'arpack', 'randomized'}, \
  63. default='auto'
  64. Select eigensolver to use. If `n_components` is much
  65. less than the number of training samples, randomized (or arpack to a
  66. smaller extent) may be more efficient than the dense eigensolver.
  67. Randomized SVD is performed according to the method of Halko et al
  68. [3]_.
  69. auto :
  70. the solver is selected by a default policy based on n_samples
  71. (the number of training samples) and `n_components`:
  72. if the number of components to extract is less than 10 (strict) and
  73. the number of samples is more than 200 (strict), the 'arpack'
  74. method is enabled. Otherwise the exact full eigenvalue
  75. decomposition is computed and optionally truncated afterwards
  76. ('dense' method).
  77. dense :
  78. run exact full eigenvalue decomposition calling the standard
  79. LAPACK solver via `scipy.linalg.eigh`, and select the components
  80. by postprocessing
  81. arpack :
  82. run SVD truncated to n_components calling ARPACK solver using
  83. `scipy.sparse.linalg.eigsh`. It requires strictly
  84. 0 < n_components < n_samples
  85. randomized :
  86. run randomized SVD by the method of Halko et al. [3]_. The current
  87. implementation selects eigenvalues based on their module; therefore
  88. using this method can lead to unexpected results if the kernel is
  89. not positive semi-definite. See also [4]_.
  90. .. versionchanged:: 1.0
  91. `'randomized'` was added.
  92. tol : float, default=0
  93. Convergence tolerance for arpack.
  94. If 0, optimal value will be chosen by arpack.
  95. max_iter : int, default=None
  96. Maximum number of iterations for arpack.
  97. If None, optimal value will be chosen by arpack.
  98. iterated_power : int >= 0, or 'auto', default='auto'
  99. Number of iterations for the power method computed by
  100. svd_solver == 'randomized'. When 'auto', it is set to 7 when
  101. `n_components < 0.1 * min(X.shape)`, other it is set to 4.
  102. .. versionadded:: 1.0
  103. remove_zero_eig : bool, default=False
  104. If True, then all components with zero eigenvalues are removed, so
  105. that the number of components in the output may be < n_components
  106. (and sometimes even zero due to numerical instability).
  107. When n_components is None, this parameter is ignored and components
  108. with zero eigenvalues are removed regardless.
  109. random_state : int, RandomState instance or None, default=None
  110. Used when ``eigen_solver`` == 'arpack' or 'randomized'. Pass an int
  111. for reproducible results across multiple function calls.
  112. See :term:`Glossary <random_state>`.
  113. .. versionadded:: 0.18
  114. copy_X : bool, default=True
  115. If True, input X is copied and stored by the model in the `X_fit_`
  116. attribute. If no further changes will be done to X, setting
  117. `copy_X=False` saves memory by storing a reference.
  118. .. versionadded:: 0.18
  119. n_jobs : int, default=None
  120. The number of parallel jobs to run.
  121. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
  122. ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
  123. for more details.
  124. .. versionadded:: 0.18
  125. Attributes
  126. ----------
  127. eigenvalues_ : ndarray of shape (n_components,)
  128. Eigenvalues of the centered kernel matrix in decreasing order.
  129. If `n_components` and `remove_zero_eig` are not set,
  130. then all values are stored.
  131. eigenvectors_ : ndarray of shape (n_samples, n_components)
  132. Eigenvectors of the centered kernel matrix. If `n_components` and
  133. `remove_zero_eig` are not set, then all components are stored.
  134. dual_coef_ : ndarray of shape (n_samples, n_features)
  135. Inverse transform matrix. Only available when
  136. ``fit_inverse_transform`` is True.
  137. X_transformed_fit_ : ndarray of shape (n_samples, n_components)
  138. Projection of the fitted data on the kernel principal components.
  139. Only available when ``fit_inverse_transform`` is True.
  140. X_fit_ : ndarray of shape (n_samples, n_features)
  141. The data used to fit the model. If `copy_X=False`, then `X_fit_` is
  142. a reference. This attribute is used for the calls to transform.
  143. n_features_in_ : int
  144. Number of features seen during :term:`fit`.
  145. .. versionadded:: 0.24
  146. feature_names_in_ : ndarray of shape (`n_features_in_`,)
  147. Names of features seen during :term:`fit`. Defined only when `X`
  148. has feature names that are all strings.
  149. .. versionadded:: 1.0
  150. gamma_ : float
  151. Kernel coefficient for rbf, poly and sigmoid kernels. When `gamma`
  152. is explicitly provided, this is just the same as `gamma`. When `gamma`
  153. is `None`, this is the actual value of kernel coefficient.
  154. .. versionadded:: 1.3
  155. See Also
  156. --------
  157. FastICA : A fast algorithm for Independent Component Analysis.
  158. IncrementalPCA : Incremental Principal Component Analysis.
  159. NMF : Non-Negative Matrix Factorization.
  160. PCA : Principal Component Analysis.
  161. SparsePCA : Sparse Principal Component Analysis.
  162. TruncatedSVD : Dimensionality reduction using truncated SVD.
  163. References
  164. ----------
  165. .. [1] `Schölkopf, Bernhard, Alexander Smola, and Klaus-Robert Müller.
  166. "Kernel principal component analysis."
  167. International conference on artificial neural networks.
  168. Springer, Berlin, Heidelberg, 1997.
  169. <https://people.eecs.berkeley.edu/~wainwrig/stat241b/scholkopf_kernel.pdf>`_
  170. .. [2] `Bakır, Gökhan H., Jason Weston, and Bernhard Schölkopf.
  171. "Learning to find pre-images."
  172. Advances in neural information processing systems 16 (2004): 449-456.
  173. <https://papers.nips.cc/paper/2003/file/ac1ad983e08ad3304a97e147f522747e-Paper.pdf>`_
  174. .. [3] :arxiv:`Halko, Nathan, Per-Gunnar Martinsson, and Joel A. Tropp.
  175. "Finding structure with randomness: Probabilistic algorithms for
  176. constructing approximate matrix decompositions."
  177. SIAM review 53.2 (2011): 217-288. <0909.4061>`
  178. .. [4] `Martinsson, Per-Gunnar, Vladimir Rokhlin, and Mark Tygert.
  179. "A randomized algorithm for the decomposition of matrices."
  180. Applied and Computational Harmonic Analysis 30.1 (2011): 47-68.
  181. <https://www.sciencedirect.com/science/article/pii/S1063520310000242>`_
  182. Examples
  183. --------
  184. >>> from sklearn.datasets import load_digits
  185. >>> from sklearn.decomposition import KernelPCA
  186. >>> X, _ = load_digits(return_X_y=True)
  187. >>> transformer = KernelPCA(n_components=7, kernel='linear')
  188. >>> X_transformed = transformer.fit_transform(X)
  189. >>> X_transformed.shape
  190. (1797, 7)
  191. """
  192. _parameter_constraints: dict = {
  193. "n_components": [
  194. Interval(Integral, 1, None, closed="left"),
  195. None,
  196. ],
  197. "kernel": [
  198. StrOptions({"linear", "poly", "rbf", "sigmoid", "cosine", "precomputed"}),
  199. callable,
  200. ],
  201. "gamma": [
  202. Interval(Real, 0, None, closed="left"),
  203. None,
  204. ],
  205. "degree": [Interval(Integral, 0, None, closed="left")],
  206. "coef0": [Interval(Real, None, None, closed="neither")],
  207. "kernel_params": [dict, None],
  208. "alpha": [Interval(Real, 0, None, closed="left")],
  209. "fit_inverse_transform": ["boolean"],
  210. "eigen_solver": [StrOptions({"auto", "dense", "arpack", "randomized"})],
  211. "tol": [Interval(Real, 0, None, closed="left")],
  212. "max_iter": [
  213. Interval(Integral, 1, None, closed="left"),
  214. None,
  215. ],
  216. "iterated_power": [
  217. Interval(Integral, 0, None, closed="left"),
  218. StrOptions({"auto"}),
  219. ],
  220. "remove_zero_eig": ["boolean"],
  221. "random_state": ["random_state"],
  222. "copy_X": ["boolean"],
  223. "n_jobs": [None, Integral],
  224. }
  225. def __init__(
  226. self,
  227. n_components=None,
  228. *,
  229. kernel="linear",
  230. gamma=None,
  231. degree=3,
  232. coef0=1,
  233. kernel_params=None,
  234. alpha=1.0,
  235. fit_inverse_transform=False,
  236. eigen_solver="auto",
  237. tol=0,
  238. max_iter=None,
  239. iterated_power="auto",
  240. remove_zero_eig=False,
  241. random_state=None,
  242. copy_X=True,
  243. n_jobs=None,
  244. ):
  245. self.n_components = n_components
  246. self.kernel = kernel
  247. self.kernel_params = kernel_params
  248. self.gamma = gamma
  249. self.degree = degree
  250. self.coef0 = coef0
  251. self.alpha = alpha
  252. self.fit_inverse_transform = fit_inverse_transform
  253. self.eigen_solver = eigen_solver
  254. self.tol = tol
  255. self.max_iter = max_iter
  256. self.iterated_power = iterated_power
  257. self.remove_zero_eig = remove_zero_eig
  258. self.random_state = random_state
  259. self.n_jobs = n_jobs
  260. self.copy_X = copy_X
  261. def _get_kernel(self, X, Y=None):
  262. if callable(self.kernel):
  263. params = self.kernel_params or {}
  264. else:
  265. params = {"gamma": self.gamma_, "degree": self.degree, "coef0": self.coef0}
  266. return pairwise_kernels(
  267. X, Y, metric=self.kernel, filter_params=True, n_jobs=self.n_jobs, **params
  268. )
  269. def _fit_transform(self, K):
  270. """Fit's using kernel K"""
  271. # center kernel
  272. K = self._centerer.fit_transform(K)
  273. # adjust n_components according to user inputs
  274. if self.n_components is None:
  275. n_components = K.shape[0] # use all dimensions
  276. else:
  277. n_components = min(K.shape[0], self.n_components)
  278. # compute eigenvectors
  279. if self.eigen_solver == "auto":
  280. if K.shape[0] > 200 and n_components < 10:
  281. eigen_solver = "arpack"
  282. else:
  283. eigen_solver = "dense"
  284. else:
  285. eigen_solver = self.eigen_solver
  286. if eigen_solver == "dense":
  287. # Note: subset_by_index specifies the indices of smallest/largest to return
  288. self.eigenvalues_, self.eigenvectors_ = eigh(
  289. K, subset_by_index=(K.shape[0] - n_components, K.shape[0] - 1)
  290. )
  291. elif eigen_solver == "arpack":
  292. v0 = _init_arpack_v0(K.shape[0], self.random_state)
  293. self.eigenvalues_, self.eigenvectors_ = eigsh(
  294. K, n_components, which="LA", tol=self.tol, maxiter=self.max_iter, v0=v0
  295. )
  296. elif eigen_solver == "randomized":
  297. self.eigenvalues_, self.eigenvectors_ = _randomized_eigsh(
  298. K,
  299. n_components=n_components,
  300. n_iter=self.iterated_power,
  301. random_state=self.random_state,
  302. selection="module",
  303. )
  304. # make sure that the eigenvalues are ok and fix numerical issues
  305. self.eigenvalues_ = _check_psd_eigenvalues(
  306. self.eigenvalues_, enable_warnings=False
  307. )
  308. # flip eigenvectors' sign to enforce deterministic output
  309. self.eigenvectors_, _ = svd_flip(
  310. self.eigenvectors_, np.zeros_like(self.eigenvectors_).T
  311. )
  312. # sort eigenvectors in descending order
  313. indices = self.eigenvalues_.argsort()[::-1]
  314. self.eigenvalues_ = self.eigenvalues_[indices]
  315. self.eigenvectors_ = self.eigenvectors_[:, indices]
  316. # remove eigenvectors with a zero eigenvalue (null space) if required
  317. if self.remove_zero_eig or self.n_components is None:
  318. self.eigenvectors_ = self.eigenvectors_[:, self.eigenvalues_ > 0]
  319. self.eigenvalues_ = self.eigenvalues_[self.eigenvalues_ > 0]
  320. # Maintenance note on Eigenvectors normalization
  321. # ----------------------------------------------
  322. # there is a link between
  323. # the eigenvectors of K=Phi(X)'Phi(X) and the ones of Phi(X)Phi(X)'
  324. # if v is an eigenvector of K
  325. # then Phi(X)v is an eigenvector of Phi(X)Phi(X)'
  326. # if u is an eigenvector of Phi(X)Phi(X)'
  327. # then Phi(X)'u is an eigenvector of Phi(X)'Phi(X)
  328. #
  329. # At this stage our self.eigenvectors_ (the v) have norm 1, we need to scale
  330. # them so that eigenvectors in kernel feature space (the u) have norm=1
  331. # instead
  332. #
  333. # We COULD scale them here:
  334. # self.eigenvectors_ = self.eigenvectors_ / np.sqrt(self.eigenvalues_)
  335. #
  336. # But choose to perform that LATER when needed, in `fit()` and in
  337. # `transform()`.
  338. return K
  339. def _fit_inverse_transform(self, X_transformed, X):
  340. if hasattr(X, "tocsr"):
  341. raise NotImplementedError(
  342. "Inverse transform not implemented for sparse matrices!"
  343. )
  344. n_samples = X_transformed.shape[0]
  345. K = self._get_kernel(X_transformed)
  346. K.flat[:: n_samples + 1] += self.alpha
  347. self.dual_coef_ = linalg.solve(K, X, assume_a="pos", overwrite_a=True)
  348. self.X_transformed_fit_ = X_transformed
  349. @_fit_context(prefer_skip_nested_validation=True)
  350. def fit(self, X, y=None):
  351. """Fit the model from data in X.
  352. Parameters
  353. ----------
  354. X : {array-like, sparse matrix} of shape (n_samples, n_features)
  355. Training vector, where `n_samples` is the number of samples
  356. and `n_features` is the number of features.
  357. y : Ignored
  358. Not used, present for API consistency by convention.
  359. Returns
  360. -------
  361. self : object
  362. Returns the instance itself.
  363. """
  364. if self.fit_inverse_transform and self.kernel == "precomputed":
  365. raise ValueError("Cannot fit_inverse_transform with a precomputed kernel.")
  366. X = self._validate_data(X, accept_sparse="csr", copy=self.copy_X)
  367. self.gamma_ = 1 / X.shape[1] if self.gamma is None else self.gamma
  368. self._centerer = KernelCenterer().set_output(transform="default")
  369. K = self._get_kernel(X)
  370. self._fit_transform(K)
  371. if self.fit_inverse_transform:
  372. # no need to use the kernel to transform X, use shortcut expression
  373. X_transformed = self.eigenvectors_ * np.sqrt(self.eigenvalues_)
  374. self._fit_inverse_transform(X_transformed, X)
  375. self.X_fit_ = X
  376. return self
  377. def fit_transform(self, X, y=None, **params):
  378. """Fit the model from data in X and transform X.
  379. Parameters
  380. ----------
  381. X : {array-like, sparse matrix} of shape (n_samples, n_features)
  382. Training vector, where `n_samples` is the number of samples
  383. and `n_features` is the number of features.
  384. y : Ignored
  385. Not used, present for API consistency by convention.
  386. **params : kwargs
  387. Parameters (keyword arguments) and values passed to
  388. the fit_transform instance.
  389. Returns
  390. -------
  391. X_new : ndarray of shape (n_samples, n_components)
  392. Returns the instance itself.
  393. """
  394. self.fit(X, **params)
  395. # no need to use the kernel to transform X, use shortcut expression
  396. X_transformed = self.eigenvectors_ * np.sqrt(self.eigenvalues_)
  397. if self.fit_inverse_transform:
  398. self._fit_inverse_transform(X_transformed, X)
  399. return X_transformed
  400. def transform(self, X):
  401. """Transform X.
  402. Parameters
  403. ----------
  404. X : {array-like, sparse matrix} of shape (n_samples, n_features)
  405. Training vector, where `n_samples` is the number of samples
  406. and `n_features` is the number of features.
  407. Returns
  408. -------
  409. X_new : ndarray of shape (n_samples, n_components)
  410. Returns the instance itself.
  411. """
  412. check_is_fitted(self)
  413. X = self._validate_data(X, accept_sparse="csr", reset=False)
  414. # Compute centered gram matrix between X and training data X_fit_
  415. K = self._centerer.transform(self._get_kernel(X, self.X_fit_))
  416. # scale eigenvectors (properly account for null-space for dot product)
  417. non_zeros = np.flatnonzero(self.eigenvalues_)
  418. scaled_alphas = np.zeros_like(self.eigenvectors_)
  419. scaled_alphas[:, non_zeros] = self.eigenvectors_[:, non_zeros] / np.sqrt(
  420. self.eigenvalues_[non_zeros]
  421. )
  422. # Project with a scalar product between K and the scaled eigenvectors
  423. return np.dot(K, scaled_alphas)
  424. def inverse_transform(self, X):
  425. """Transform X back to original space.
  426. ``inverse_transform`` approximates the inverse transformation using
  427. a learned pre-image. The pre-image is learned by kernel ridge
  428. regression of the original data on their low-dimensional representation
  429. vectors.
  430. .. note:
  431. :meth:`~sklearn.decomposition.fit` internally uses a centered
  432. kernel. As the centered kernel no longer contains the information
  433. of the mean of kernel features, such information is not taken into
  434. account in reconstruction.
  435. .. note::
  436. When users want to compute inverse transformation for 'linear'
  437. kernel, it is recommended that they use
  438. :class:`~sklearn.decomposition.PCA` instead. Unlike
  439. :class:`~sklearn.decomposition.PCA`,
  440. :class:`~sklearn.decomposition.KernelPCA`'s ``inverse_transform``
  441. does not reconstruct the mean of data when 'linear' kernel is used
  442. due to the use of centered kernel.
  443. Parameters
  444. ----------
  445. X : {array-like, sparse matrix} of shape (n_samples, n_components)
  446. Training vector, where `n_samples` is the number of samples
  447. and `n_features` is the number of features.
  448. Returns
  449. -------
  450. X_new : ndarray of shape (n_samples, n_features)
  451. Returns the instance itself.
  452. References
  453. ----------
  454. `Bakır, Gökhan H., Jason Weston, and Bernhard Schölkopf.
  455. "Learning to find pre-images."
  456. Advances in neural information processing systems 16 (2004): 449-456.
  457. <https://papers.nips.cc/paper/2003/file/ac1ad983e08ad3304a97e147f522747e-Paper.pdf>`_
  458. """
  459. if not self.fit_inverse_transform:
  460. raise NotFittedError(
  461. "The fit_inverse_transform parameter was not"
  462. " set to True when instantiating and hence "
  463. "the inverse transform is not available."
  464. )
  465. K = self._get_kernel(X, self.X_transformed_fit_)
  466. return np.dot(K, self.dual_coef_)
  467. def _more_tags(self):
  468. return {
  469. "preserves_dtype": [np.float64, np.float32],
  470. "pairwise": self.kernel == "precomputed",
  471. }
  472. @property
  473. def _n_features_out(self):
  474. """Number of transformed output features."""
  475. return self.eigenvalues_.shape[0]