extmath.py 40 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199
  1. """
  2. Extended math utilities.
  3. """
  4. # Authors: Gael Varoquaux
  5. # Alexandre Gramfort
  6. # Alexandre T. Passos
  7. # Olivier Grisel
  8. # Lars Buitinck
  9. # Stefan van der Walt
  10. # Kyle Kastner
  11. # Giorgio Patrini
  12. # License: BSD 3 clause
  13. import warnings
  14. import numpy as np
  15. from scipy import linalg, sparse
  16. from . import check_random_state
  17. from ._array_api import _is_numpy_namespace, get_namespace
  18. from ._logistic_sigmoid import _log_logistic_sigmoid
  19. from .sparsefuncs_fast import csr_row_norms
  20. from .validation import check_array
  21. def squared_norm(x):
  22. """Squared Euclidean or Frobenius norm of x.
  23. Faster than norm(x) ** 2.
  24. Parameters
  25. ----------
  26. x : array-like
  27. The input array which could be either be a vector or a 2 dimensional array.
  28. Returns
  29. -------
  30. float
  31. The Euclidean norm when x is a vector, the Frobenius norm when x
  32. is a matrix (2-d array).
  33. """
  34. x = np.ravel(x, order="K")
  35. if np.issubdtype(x.dtype, np.integer):
  36. warnings.warn(
  37. (
  38. "Array type is integer, np.dot may overflow. "
  39. "Data should be float type to avoid this issue"
  40. ),
  41. UserWarning,
  42. )
  43. return np.dot(x, x)
  44. def row_norms(X, squared=False):
  45. """Row-wise (squared) Euclidean norm of X.
  46. Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports sparse
  47. matrices and does not create an X.shape-sized temporary.
  48. Performs no input validation.
  49. Parameters
  50. ----------
  51. X : array-like
  52. The input array.
  53. squared : bool, default=False
  54. If True, return squared norms.
  55. Returns
  56. -------
  57. array-like
  58. The row-wise (squared) Euclidean norm of X.
  59. """
  60. if sparse.issparse(X):
  61. X = X.tocsr()
  62. norms = csr_row_norms(X)
  63. else:
  64. norms = np.einsum("ij,ij->i", X, X)
  65. if not squared:
  66. np.sqrt(norms, norms)
  67. return norms
  68. def fast_logdet(A):
  69. """Compute logarithm of determinant of a square matrix.
  70. The (natural) logarithm of the determinant of a square matrix
  71. is returned if det(A) is non-negative and well defined.
  72. If the determinant is zero or negative returns -Inf.
  73. Equivalent to : np.log(np.det(A)) but more robust.
  74. Parameters
  75. ----------
  76. A : array_like of shape (n, n)
  77. The square matrix.
  78. Returns
  79. -------
  80. logdet : float
  81. When det(A) is strictly positive, log(det(A)) is returned.
  82. When det(A) is non-positive or not defined, then -inf is returned.
  83. See Also
  84. --------
  85. numpy.linalg.slogdet : Compute the sign and (natural) logarithm of the determinant
  86. of an array.
  87. Examples
  88. --------
  89. >>> import numpy as np
  90. >>> from sklearn.utils.extmath import fast_logdet
  91. >>> a = np.array([[5, 1], [2, 8]])
  92. >>> fast_logdet(a)
  93. 3.6375861597263857
  94. """
  95. sign, ld = np.linalg.slogdet(A)
  96. if not sign > 0:
  97. return -np.inf
  98. return ld
  99. def density(w, **kwargs):
  100. """Compute density of a sparse vector.
  101. Parameters
  102. ----------
  103. w : array-like
  104. The sparse vector.
  105. **kwargs : keyword arguments
  106. Ignored.
  107. .. deprecated:: 1.2
  108. ``**kwargs`` were deprecated in version 1.2 and will be removed in
  109. 1.4.
  110. Returns
  111. -------
  112. float
  113. The density of w, between 0 and 1.
  114. """
  115. if kwargs:
  116. warnings.warn(
  117. (
  118. "Additional keyword arguments are deprecated in version 1.2 and will be"
  119. " removed in version 1.4."
  120. ),
  121. FutureWarning,
  122. )
  123. if hasattr(w, "toarray"):
  124. d = float(w.nnz) / (w.shape[0] * w.shape[1])
  125. else:
  126. d = 0 if w is None else float((w != 0).sum()) / w.size
  127. return d
  128. def safe_sparse_dot(a, b, *, dense_output=False):
  129. """Dot product that handle the sparse matrix case correctly.
  130. Parameters
  131. ----------
  132. a : {ndarray, sparse matrix}
  133. b : {ndarray, sparse matrix}
  134. dense_output : bool, default=False
  135. When False, ``a`` and ``b`` both being sparse will yield sparse output.
  136. When True, output will always be a dense array.
  137. Returns
  138. -------
  139. dot_product : {ndarray, sparse matrix}
  140. Sparse if ``a`` and ``b`` are sparse and ``dense_output=False``.
  141. """
  142. if a.ndim > 2 or b.ndim > 2:
  143. if sparse.issparse(a):
  144. # sparse is always 2D. Implies b is 3D+
  145. # [i, j] @ [k, ..., l, m, n] -> [i, k, ..., l, n]
  146. b_ = np.rollaxis(b, -2)
  147. b_2d = b_.reshape((b.shape[-2], -1))
  148. ret = a @ b_2d
  149. ret = ret.reshape(a.shape[0], *b_.shape[1:])
  150. elif sparse.issparse(b):
  151. # sparse is always 2D. Implies a is 3D+
  152. # [k, ..., l, m] @ [i, j] -> [k, ..., l, j]
  153. a_2d = a.reshape(-1, a.shape[-1])
  154. ret = a_2d @ b
  155. ret = ret.reshape(*a.shape[:-1], b.shape[1])
  156. else:
  157. ret = np.dot(a, b)
  158. else:
  159. ret = a @ b
  160. if (
  161. sparse.issparse(a)
  162. and sparse.issparse(b)
  163. and dense_output
  164. and hasattr(ret, "toarray")
  165. ):
  166. return ret.toarray()
  167. return ret
  168. def randomized_range_finder(
  169. A, *, size, n_iter, power_iteration_normalizer="auto", random_state=None
  170. ):
  171. """Compute an orthonormal matrix whose range approximates the range of A.
  172. Parameters
  173. ----------
  174. A : 2D array
  175. The input data matrix.
  176. size : int
  177. Size of the return array.
  178. n_iter : int
  179. Number of power iterations used to stabilize the result.
  180. power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto'
  181. Whether the power iterations are normalized with step-by-step
  182. QR factorization (the slowest but most accurate), 'none'
  183. (the fastest but numerically unstable when `n_iter` is large, e.g.
  184. typically 5 or larger), or 'LU' factorization (numerically stable
  185. but can lose slightly in accuracy). The 'auto' mode applies no
  186. normalization if `n_iter` <= 2 and switches to LU otherwise.
  187. .. versionadded:: 0.18
  188. random_state : int, RandomState instance or None, default=None
  189. The seed of the pseudo random number generator to use when shuffling
  190. the data, i.e. getting the random vectors to initialize the algorithm.
  191. Pass an int for reproducible results across multiple function calls.
  192. See :term:`Glossary <random_state>`.
  193. Returns
  194. -------
  195. Q : ndarray
  196. A (size x size) projection matrix, the range of which
  197. approximates well the range of the input matrix A.
  198. Notes
  199. -----
  200. Follows Algorithm 4.3 of
  201. :arxiv:`"Finding structure with randomness:
  202. Stochastic algorithms for constructing approximate matrix decompositions"
  203. <0909.4061>`
  204. Halko, et al. (2009)
  205. An implementation of a randomized algorithm for principal component
  206. analysis
  207. A. Szlam et al. 2014
  208. """
  209. random_state = check_random_state(random_state)
  210. # Generating normal random vectors with shape: (A.shape[1], size)
  211. Q = random_state.normal(size=(A.shape[1], size))
  212. if hasattr(A, "dtype") and A.dtype.kind == "f":
  213. # Ensure f32 is preserved as f32
  214. Q = Q.astype(A.dtype, copy=False)
  215. # Deal with "auto" mode
  216. if power_iteration_normalizer == "auto":
  217. if n_iter <= 2:
  218. power_iteration_normalizer = "none"
  219. else:
  220. power_iteration_normalizer = "LU"
  221. # Perform power iterations with Q to further 'imprint' the top
  222. # singular vectors of A in Q
  223. for i in range(n_iter):
  224. if power_iteration_normalizer == "none":
  225. Q = safe_sparse_dot(A, Q)
  226. Q = safe_sparse_dot(A.T, Q)
  227. elif power_iteration_normalizer == "LU":
  228. Q, _ = linalg.lu(safe_sparse_dot(A, Q), permute_l=True)
  229. Q, _ = linalg.lu(safe_sparse_dot(A.T, Q), permute_l=True)
  230. elif power_iteration_normalizer == "QR":
  231. Q, _ = linalg.qr(safe_sparse_dot(A, Q), mode="economic")
  232. Q, _ = linalg.qr(safe_sparse_dot(A.T, Q), mode="economic")
  233. # Sample the range of A using by linear projection of Q
  234. # Extract an orthonormal basis
  235. Q, _ = linalg.qr(safe_sparse_dot(A, Q), mode="economic")
  236. return Q
  237. def randomized_svd(
  238. M,
  239. n_components,
  240. *,
  241. n_oversamples=10,
  242. n_iter="auto",
  243. power_iteration_normalizer="auto",
  244. transpose="auto",
  245. flip_sign=True,
  246. random_state=None,
  247. svd_lapack_driver="gesdd",
  248. ):
  249. """Compute a truncated randomized SVD.
  250. This method solves the fixed-rank approximation problem described in [1]_
  251. (problem (1.5), p5).
  252. Parameters
  253. ----------
  254. M : {ndarray, sparse matrix}
  255. Matrix to decompose.
  256. n_components : int
  257. Number of singular values and vectors to extract.
  258. n_oversamples : int, default=10
  259. Additional number of random vectors to sample the range of M so as
  260. to ensure proper conditioning. The total number of random vectors
  261. used to find the range of M is n_components + n_oversamples. Smaller
  262. number can improve speed but can negatively impact the quality of
  263. approximation of singular vectors and singular values. Users might wish
  264. to increase this parameter up to `2*k - n_components` where k is the
  265. effective rank, for large matrices, noisy problems, matrices with
  266. slowly decaying spectrums, or to increase precision accuracy. See [1]_
  267. (pages 5, 23 and 26).
  268. n_iter : int or 'auto', default='auto'
  269. Number of power iterations. It can be used to deal with very noisy
  270. problems. When 'auto', it is set to 4, unless `n_components` is small
  271. (< .1 * min(X.shape)) in which case `n_iter` is set to 7.
  272. This improves precision with few components. Note that in general
  273. users should rather increase `n_oversamples` before increasing `n_iter`
  274. as the principle of the randomized method is to avoid usage of these
  275. more costly power iterations steps. When `n_components` is equal
  276. or greater to the effective matrix rank and the spectrum does not
  277. present a slow decay, `n_iter=0` or `1` should even work fine in theory
  278. (see [1]_ page 9).
  279. .. versionchanged:: 0.18
  280. power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto'
  281. Whether the power iterations are normalized with step-by-step
  282. QR factorization (the slowest but most accurate), 'none'
  283. (the fastest but numerically unstable when `n_iter` is large, e.g.
  284. typically 5 or larger), or 'LU' factorization (numerically stable
  285. but can lose slightly in accuracy). The 'auto' mode applies no
  286. normalization if `n_iter` <= 2 and switches to LU otherwise.
  287. .. versionadded:: 0.18
  288. transpose : bool or 'auto', default='auto'
  289. Whether the algorithm should be applied to M.T instead of M. The
  290. result should approximately be the same. The 'auto' mode will
  291. trigger the transposition if M.shape[1] > M.shape[0] since this
  292. implementation of randomized SVD tend to be a little faster in that
  293. case.
  294. .. versionchanged:: 0.18
  295. flip_sign : bool, default=True
  296. The output of a singular value decomposition is only unique up to a
  297. permutation of the signs of the singular vectors. If `flip_sign` is
  298. set to `True`, the sign ambiguity is resolved by making the largest
  299. loadings for each component in the left singular vectors positive.
  300. random_state : int, RandomState instance or None, default='warn'
  301. The seed of the pseudo random number generator to use when
  302. shuffling the data, i.e. getting the random vectors to initialize
  303. the algorithm. Pass an int for reproducible results across multiple
  304. function calls. See :term:`Glossary <random_state>`.
  305. .. versionchanged:: 1.2
  306. The default value changed from 0 to None.
  307. svd_lapack_driver : {"gesdd", "gesvd"}, default="gesdd"
  308. Whether to use the more efficient divide-and-conquer approach
  309. (`"gesdd"`) or more general rectangular approach (`"gesvd"`) to compute
  310. the SVD of the matrix B, which is the projection of M into a low
  311. dimensional subspace, as described in [1]_.
  312. .. versionadded:: 1.2
  313. Returns
  314. -------
  315. u : ndarray of shape (n_samples, n_components)
  316. Unitary matrix having left singular vectors with signs flipped as columns.
  317. s : ndarray of shape (n_components,)
  318. The singular values, sorted in non-increasing order.
  319. vh : ndarray of shape (n_components, n_features)
  320. Unitary matrix having right singular vectors with signs flipped as rows.
  321. Notes
  322. -----
  323. This algorithm finds a (usually very good) approximate truncated
  324. singular value decomposition using randomization to speed up the
  325. computations. It is particularly fast on large matrices on which
  326. you wish to extract only a small number of components. In order to
  327. obtain further speed up, `n_iter` can be set <=2 (at the cost of
  328. loss of precision). To increase the precision it is recommended to
  329. increase `n_oversamples`, up to `2*k-n_components` where k is the
  330. effective rank. Usually, `n_components` is chosen to be greater than k
  331. so increasing `n_oversamples` up to `n_components` should be enough.
  332. References
  333. ----------
  334. .. [1] :arxiv:`"Finding structure with randomness:
  335. Stochastic algorithms for constructing approximate matrix decompositions"
  336. <0909.4061>`
  337. Halko, et al. (2009)
  338. .. [2] A randomized algorithm for the decomposition of matrices
  339. Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
  340. .. [3] An implementation of a randomized algorithm for principal component
  341. analysis A. Szlam et al. 2014
  342. Examples
  343. --------
  344. >>> import numpy as np
  345. >>> from sklearn.utils.extmath import randomized_svd
  346. >>> a = np.array([[1, 2, 3, 5],
  347. ... [3, 4, 5, 6],
  348. ... [7, 8, 9, 10]])
  349. >>> U, s, Vh = randomized_svd(a, n_components=2, random_state=0)
  350. >>> U.shape, s.shape, Vh.shape
  351. ((3, 2), (2,), (2, 4))
  352. """
  353. if sparse.issparse(M) and M.format in ("lil", "dok"):
  354. warnings.warn(
  355. "Calculating SVD of a {} is expensive. "
  356. "csr_matrix is more efficient.".format(type(M).__name__),
  357. sparse.SparseEfficiencyWarning,
  358. )
  359. random_state = check_random_state(random_state)
  360. n_random = n_components + n_oversamples
  361. n_samples, n_features = M.shape
  362. if n_iter == "auto":
  363. # Checks if the number of iterations is explicitly specified
  364. # Adjust n_iter. 7 was found a good compromise for PCA. See #5299
  365. n_iter = 7 if n_components < 0.1 * min(M.shape) else 4
  366. if transpose == "auto":
  367. transpose = n_samples < n_features
  368. if transpose:
  369. # this implementation is a bit faster with smaller shape[1]
  370. M = M.T
  371. Q = randomized_range_finder(
  372. M,
  373. size=n_random,
  374. n_iter=n_iter,
  375. power_iteration_normalizer=power_iteration_normalizer,
  376. random_state=random_state,
  377. )
  378. # project M to the (k + p) dimensional space using the basis vectors
  379. B = safe_sparse_dot(Q.T, M)
  380. # compute the SVD on the thin matrix: (k + p) wide
  381. Uhat, s, Vt = linalg.svd(B, full_matrices=False, lapack_driver=svd_lapack_driver)
  382. del B
  383. U = np.dot(Q, Uhat)
  384. if flip_sign:
  385. if not transpose:
  386. U, Vt = svd_flip(U, Vt)
  387. else:
  388. # In case of transpose u_based_decision=false
  389. # to actually flip based on u and not v.
  390. U, Vt = svd_flip(U, Vt, u_based_decision=False)
  391. if transpose:
  392. # transpose back the results according to the input convention
  393. return Vt[:n_components, :].T, s[:n_components], U[:, :n_components].T
  394. else:
  395. return U[:, :n_components], s[:n_components], Vt[:n_components, :]
  396. def _randomized_eigsh(
  397. M,
  398. n_components,
  399. *,
  400. n_oversamples=10,
  401. n_iter="auto",
  402. power_iteration_normalizer="auto",
  403. selection="module",
  404. random_state=None,
  405. ):
  406. """Computes a truncated eigendecomposition using randomized methods
  407. This method solves the fixed-rank approximation problem described in the
  408. Halko et al paper.
  409. The choice of which components to select can be tuned with the `selection`
  410. parameter.
  411. .. versionadded:: 0.24
  412. Parameters
  413. ----------
  414. M : ndarray or sparse matrix
  415. Matrix to decompose, it should be real symmetric square or complex
  416. hermitian
  417. n_components : int
  418. Number of eigenvalues and vectors to extract.
  419. n_oversamples : int, default=10
  420. Additional number of random vectors to sample the range of M so as
  421. to ensure proper conditioning. The total number of random vectors
  422. used to find the range of M is n_components + n_oversamples. Smaller
  423. number can improve speed but can negatively impact the quality of
  424. approximation of eigenvectors and eigenvalues. Users might wish
  425. to increase this parameter up to `2*k - n_components` where k is the
  426. effective rank, for large matrices, noisy problems, matrices with
  427. slowly decaying spectrums, or to increase precision accuracy. See Halko
  428. et al (pages 5, 23 and 26).
  429. n_iter : int or 'auto', default='auto'
  430. Number of power iterations. It can be used to deal with very noisy
  431. problems. When 'auto', it is set to 4, unless `n_components` is small
  432. (< .1 * min(X.shape)) in which case `n_iter` is set to 7.
  433. This improves precision with few components. Note that in general
  434. users should rather increase `n_oversamples` before increasing `n_iter`
  435. as the principle of the randomized method is to avoid usage of these
  436. more costly power iterations steps. When `n_components` is equal
  437. or greater to the effective matrix rank and the spectrum does not
  438. present a slow decay, `n_iter=0` or `1` should even work fine in theory
  439. (see Halko et al paper, page 9).
  440. power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto'
  441. Whether the power iterations are normalized with step-by-step
  442. QR factorization (the slowest but most accurate), 'none'
  443. (the fastest but numerically unstable when `n_iter` is large, e.g.
  444. typically 5 or larger), or 'LU' factorization (numerically stable
  445. but can lose slightly in accuracy). The 'auto' mode applies no
  446. normalization if `n_iter` <= 2 and switches to LU otherwise.
  447. selection : {'value', 'module'}, default='module'
  448. Strategy used to select the n components. When `selection` is `'value'`
  449. (not yet implemented, will become the default when implemented), the
  450. components corresponding to the n largest eigenvalues are returned.
  451. When `selection` is `'module'`, the components corresponding to the n
  452. eigenvalues with largest modules are returned.
  453. random_state : int, RandomState instance, default=None
  454. The seed of the pseudo random number generator to use when shuffling
  455. the data, i.e. getting the random vectors to initialize the algorithm.
  456. Pass an int for reproducible results across multiple function calls.
  457. See :term:`Glossary <random_state>`.
  458. Notes
  459. -----
  460. This algorithm finds a (usually very good) approximate truncated
  461. eigendecomposition using randomized methods to speed up the computations.
  462. This method is particularly fast on large matrices on which
  463. you wish to extract only a small number of components. In order to
  464. obtain further speed up, `n_iter` can be set <=2 (at the cost of
  465. loss of precision). To increase the precision it is recommended to
  466. increase `n_oversamples`, up to `2*k-n_components` where k is the
  467. effective rank. Usually, `n_components` is chosen to be greater than k
  468. so increasing `n_oversamples` up to `n_components` should be enough.
  469. Strategy 'value': not implemented yet.
  470. Algorithms 5.3, 5.4 and 5.5 in the Halko et al paper should provide good
  471. candidates for a future implementation.
  472. Strategy 'module':
  473. The principle is that for diagonalizable matrices, the singular values and
  474. eigenvalues are related: if t is an eigenvalue of A, then :math:`|t|` is a
  475. singular value of A. This method relies on a randomized SVD to find the n
  476. singular components corresponding to the n singular values with largest
  477. modules, and then uses the signs of the singular vectors to find the true
  478. sign of t: if the sign of left and right singular vectors are different
  479. then the corresponding eigenvalue is negative.
  480. Returns
  481. -------
  482. eigvals : 1D array of shape (n_components,) containing the `n_components`
  483. eigenvalues selected (see ``selection`` parameter).
  484. eigvecs : 2D array of shape (M.shape[0], n_components) containing the
  485. `n_components` eigenvectors corresponding to the `eigvals`, in the
  486. corresponding order. Note that this follows the `scipy.linalg.eigh`
  487. convention.
  488. See Also
  489. --------
  490. :func:`randomized_svd`
  491. References
  492. ----------
  493. * :arxiv:`"Finding structure with randomness:
  494. Stochastic algorithms for constructing approximate matrix decompositions"
  495. (Algorithm 4.3 for strategy 'module') <0909.4061>`
  496. Halko, et al. (2009)
  497. """
  498. if selection == "value": # pragma: no cover
  499. # to do : an algorithm can be found in the Halko et al reference
  500. raise NotImplementedError()
  501. elif selection == "module":
  502. # Note: no need for deterministic U and Vt (flip_sign=True),
  503. # as we only use the dot product UVt afterwards
  504. U, S, Vt = randomized_svd(
  505. M,
  506. n_components=n_components,
  507. n_oversamples=n_oversamples,
  508. n_iter=n_iter,
  509. power_iteration_normalizer=power_iteration_normalizer,
  510. flip_sign=False,
  511. random_state=random_state,
  512. )
  513. eigvecs = U[:, :n_components]
  514. eigvals = S[:n_components]
  515. # Conversion of Singular values into Eigenvalues:
  516. # For any eigenvalue t, the corresponding singular value is |t|.
  517. # So if there is a negative eigenvalue t, the corresponding singular
  518. # value will be -t, and the left (U) and right (V) singular vectors
  519. # will have opposite signs.
  520. # Fastest way: see <https://stackoverflow.com/a/61974002/7262247>
  521. diag_VtU = np.einsum("ji,ij->j", Vt[:n_components, :], U[:, :n_components])
  522. signs = np.sign(diag_VtU)
  523. eigvals = eigvals * signs
  524. else: # pragma: no cover
  525. raise ValueError("Invalid `selection`: %r" % selection)
  526. return eigvals, eigvecs
  527. def weighted_mode(a, w, *, axis=0):
  528. """Return an array of the weighted modal (most common) value in the passed array.
  529. If there is more than one such value, only the first is returned.
  530. The bin-count for the modal bins is also returned.
  531. This is an extension of the algorithm in scipy.stats.mode.
  532. Parameters
  533. ----------
  534. a : array-like of shape (n_samples,)
  535. Array of which values to find mode(s).
  536. w : array-like of shape (n_samples,)
  537. Array of weights for each value.
  538. axis : int, default=0
  539. Axis along which to operate. Default is 0, i.e. the first axis.
  540. Returns
  541. -------
  542. vals : ndarray
  543. Array of modal values.
  544. score : ndarray
  545. Array of weighted counts for each mode.
  546. See Also
  547. --------
  548. scipy.stats.mode: Calculates the Modal (most common) value of array elements
  549. along specified axis.
  550. Examples
  551. --------
  552. >>> from sklearn.utils.extmath import weighted_mode
  553. >>> x = [4, 1, 4, 2, 4, 2]
  554. >>> weights = [1, 1, 1, 1, 1, 1]
  555. >>> weighted_mode(x, weights)
  556. (array([4.]), array([3.]))
  557. The value 4 appears three times: with uniform weights, the result is
  558. simply the mode of the distribution.
  559. >>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
  560. >>> weighted_mode(x, weights)
  561. (array([2.]), array([3.5]))
  562. The value 2 has the highest score: it appears twice with weights of
  563. 1.5 and 2: the sum of these is 3.5.
  564. """
  565. if axis is None:
  566. a = np.ravel(a)
  567. w = np.ravel(w)
  568. axis = 0
  569. else:
  570. a = np.asarray(a)
  571. w = np.asarray(w)
  572. if a.shape != w.shape:
  573. w = np.full(a.shape, w, dtype=w.dtype)
  574. scores = np.unique(np.ravel(a)) # get ALL unique values
  575. testshape = list(a.shape)
  576. testshape[axis] = 1
  577. oldmostfreq = np.zeros(testshape)
  578. oldcounts = np.zeros(testshape)
  579. for score in scores:
  580. template = np.zeros(a.shape)
  581. ind = a == score
  582. template[ind] = w[ind]
  583. counts = np.expand_dims(np.sum(template, axis), axis)
  584. mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
  585. oldcounts = np.maximum(counts, oldcounts)
  586. oldmostfreq = mostfrequent
  587. return mostfrequent, oldcounts
  588. def cartesian(arrays, out=None):
  589. """Generate a cartesian product of input arrays.
  590. Parameters
  591. ----------
  592. arrays : list of array-like
  593. 1-D arrays to form the cartesian product of.
  594. out : ndarray of shape (M, len(arrays)), default=None
  595. Array to place the cartesian product in.
  596. Returns
  597. -------
  598. out : ndarray of shape (M, len(arrays))
  599. Array containing the cartesian products formed of input arrays.
  600. If not provided, the `dtype` of the output array is set to the most
  601. permissive `dtype` of the input arrays, according to NumPy type
  602. promotion.
  603. .. versionadded:: 1.2
  604. Add support for arrays of different types.
  605. Notes
  606. -----
  607. This function may not be used on more than 32 arrays
  608. because the underlying numpy functions do not support it.
  609. Examples
  610. --------
  611. >>> from sklearn.utils.extmath import cartesian
  612. >>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
  613. array([[1, 4, 6],
  614. [1, 4, 7],
  615. [1, 5, 6],
  616. [1, 5, 7],
  617. [2, 4, 6],
  618. [2, 4, 7],
  619. [2, 5, 6],
  620. [2, 5, 7],
  621. [3, 4, 6],
  622. [3, 4, 7],
  623. [3, 5, 6],
  624. [3, 5, 7]])
  625. """
  626. arrays = [np.asarray(x) for x in arrays]
  627. shape = (len(x) for x in arrays)
  628. ix = np.indices(shape)
  629. ix = ix.reshape(len(arrays), -1).T
  630. if out is None:
  631. dtype = np.result_type(*arrays) # find the most permissive dtype
  632. out = np.empty_like(ix, dtype=dtype)
  633. for n, arr in enumerate(arrays):
  634. out[:, n] = arrays[n][ix[:, n]]
  635. return out
  636. def svd_flip(u, v, u_based_decision=True):
  637. """Sign correction to ensure deterministic output from SVD.
  638. Adjusts the columns of u and the rows of v such that the loadings in the
  639. columns in u that are largest in absolute value are always positive.
  640. Parameters
  641. ----------
  642. u : ndarray
  643. Parameters u and v are the output of `linalg.svd` or
  644. :func:`~sklearn.utils.extmath.randomized_svd`, with matching inner
  645. dimensions so one can compute `np.dot(u * s, v)`.
  646. v : ndarray
  647. Parameters u and v are the output of `linalg.svd` or
  648. :func:`~sklearn.utils.extmath.randomized_svd`, with matching inner
  649. dimensions so one can compute `np.dot(u * s, v)`.
  650. The input v should really be called vt to be consistent with scipy's
  651. output.
  652. u_based_decision : bool, default=True
  653. If True, use the columns of u as the basis for sign flipping.
  654. Otherwise, use the rows of v. The choice of which variable to base the
  655. decision on is generally algorithm dependent.
  656. Returns
  657. -------
  658. u_adjusted : ndarray
  659. Array u with adjusted columns and the same dimensions as u.
  660. v_adjusted : ndarray
  661. Array v with adjusted rows and the same dimensions as v.
  662. """
  663. if u_based_decision:
  664. # columns of u, rows of v
  665. max_abs_cols = np.argmax(np.abs(u), axis=0)
  666. signs = np.sign(u[max_abs_cols, range(u.shape[1])])
  667. u *= signs
  668. v *= signs[:, np.newaxis]
  669. else:
  670. # rows of v, columns of u
  671. max_abs_rows = np.argmax(np.abs(v), axis=1)
  672. signs = np.sign(v[range(v.shape[0]), max_abs_rows])
  673. u *= signs
  674. v *= signs[:, np.newaxis]
  675. return u, v
  676. def log_logistic(X, out=None):
  677. """Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
  678. This implementation is numerically stable because it splits positive and
  679. negative values::
  680. -log(1 + exp(-x_i)) if x_i > 0
  681. x_i - log(1 + exp(x_i)) if x_i <= 0
  682. For the ordinary logistic function, use ``scipy.special.expit``.
  683. Parameters
  684. ----------
  685. X : array-like of shape (M, N) or (M,)
  686. Argument to the logistic function.
  687. out : array-like of shape (M, N) or (M,), default=None
  688. Preallocated output array.
  689. Returns
  690. -------
  691. out : ndarray of shape (M, N) or (M,)
  692. Log of the logistic function evaluated at every point in x.
  693. Notes
  694. -----
  695. See the blog post describing this implementation:
  696. http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
  697. """
  698. is_1d = X.ndim == 1
  699. X = np.atleast_2d(X)
  700. X = check_array(X, dtype=np.float64)
  701. n_samples, n_features = X.shape
  702. if out is None:
  703. out = np.empty_like(X)
  704. _log_logistic_sigmoid(n_samples, n_features, X, out)
  705. if is_1d:
  706. return np.squeeze(out)
  707. return out
  708. def softmax(X, copy=True):
  709. """
  710. Calculate the softmax function.
  711. The softmax function is calculated by
  712. np.exp(X) / np.sum(np.exp(X), axis=1)
  713. This will cause overflow when large values are exponentiated.
  714. Hence the largest value in each row is subtracted from each data
  715. point to prevent this.
  716. Parameters
  717. ----------
  718. X : array-like of float of shape (M, N)
  719. Argument to the logistic function.
  720. copy : bool, default=True
  721. Copy X or not.
  722. Returns
  723. -------
  724. out : ndarray of shape (M, N)
  725. Softmax function evaluated at every point in x.
  726. """
  727. xp, is_array_api_compliant = get_namespace(X)
  728. if copy:
  729. X = xp.asarray(X, copy=True)
  730. max_prob = xp.reshape(xp.max(X, axis=1), (-1, 1))
  731. X -= max_prob
  732. if _is_numpy_namespace(xp):
  733. # optimization for NumPy arrays
  734. np.exp(X, out=np.asarray(X))
  735. else:
  736. # array_api does not have `out=`
  737. X = xp.exp(X)
  738. sum_prob = xp.reshape(xp.sum(X, axis=1), (-1, 1))
  739. X /= sum_prob
  740. return X
  741. def make_nonnegative(X, min_value=0):
  742. """Ensure `X.min()` >= `min_value`.
  743. Parameters
  744. ----------
  745. X : array-like
  746. The matrix to make non-negative.
  747. min_value : float, default=0
  748. The threshold value.
  749. Returns
  750. -------
  751. array-like
  752. The thresholded array.
  753. Raises
  754. ------
  755. ValueError
  756. When X is sparse.
  757. """
  758. min_ = X.min()
  759. if min_ < min_value:
  760. if sparse.issparse(X):
  761. raise ValueError(
  762. "Cannot make the data matrix"
  763. " nonnegative because it is sparse."
  764. " Adding a value to every entry would"
  765. " make it no longer sparse."
  766. )
  767. X = X + (min_value - min_)
  768. return X
  769. # Use at least float64 for the accumulating functions to avoid precision issue
  770. # see https://github.com/numpy/numpy/issues/9393. The float64 is also retained
  771. # as it is in case the float overflows
  772. def _safe_accumulator_op(op, x, *args, **kwargs):
  773. """
  774. This function provides numpy accumulator functions with a float64 dtype
  775. when used on a floating point input. This prevents accumulator overflow on
  776. smaller floating point dtypes.
  777. Parameters
  778. ----------
  779. op : function
  780. A numpy accumulator function such as np.mean or np.sum.
  781. x : ndarray
  782. A numpy array to apply the accumulator function.
  783. *args : positional arguments
  784. Positional arguments passed to the accumulator function after the
  785. input x.
  786. **kwargs : keyword arguments
  787. Keyword arguments passed to the accumulator function.
  788. Returns
  789. -------
  790. result
  791. The output of the accumulator function passed to this function.
  792. """
  793. if np.issubdtype(x.dtype, np.floating) and x.dtype.itemsize < 8:
  794. result = op(x, *args, **kwargs, dtype=np.float64)
  795. else:
  796. result = op(x, *args, **kwargs)
  797. return result
  798. def _incremental_mean_and_var(
  799. X, last_mean, last_variance, last_sample_count, sample_weight=None
  800. ):
  801. """Calculate mean update and a Youngs and Cramer variance update.
  802. If sample_weight is given, the weighted mean and variance is computed.
  803. Update a given mean and (possibly) variance according to new data given
  804. in X. last_mean is always required to compute the new mean.
  805. If last_variance is None, no variance is computed and None return for
  806. updated_variance.
  807. From the paper "Algorithms for computing the sample variance: analysis and
  808. recommendations", by Chan, Golub, and LeVeque.
  809. Parameters
  810. ----------
  811. X : array-like of shape (n_samples, n_features)
  812. Data to use for variance update.
  813. last_mean : array-like of shape (n_features,)
  814. last_variance : array-like of shape (n_features,)
  815. last_sample_count : array-like of shape (n_features,)
  816. The number of samples encountered until now if sample_weight is None.
  817. If sample_weight is not None, this is the sum of sample_weight
  818. encountered.
  819. sample_weight : array-like of shape (n_samples,) or None
  820. Sample weights. If None, compute the unweighted mean/variance.
  821. Returns
  822. -------
  823. updated_mean : ndarray of shape (n_features,)
  824. updated_variance : ndarray of shape (n_features,)
  825. None if last_variance was None.
  826. updated_sample_count : ndarray of shape (n_features,)
  827. Notes
  828. -----
  829. NaNs are ignored during the algorithm.
  830. References
  831. ----------
  832. T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample
  833. variance: recommendations, The American Statistician, Vol. 37, No. 3,
  834. pp. 242-247
  835. Also, see the sparse implementation of this in
  836. `utils.sparsefuncs.incr_mean_variance_axis` and
  837. `utils.sparsefuncs_fast.incr_mean_variance_axis0`
  838. """
  839. # old = stats until now
  840. # new = the current increment
  841. # updated = the aggregated stats
  842. last_sum = last_mean * last_sample_count
  843. X_nan_mask = np.isnan(X)
  844. if np.any(X_nan_mask):
  845. sum_op = np.nansum
  846. else:
  847. sum_op = np.sum
  848. if sample_weight is not None:
  849. # equivalent to np.nansum(X * sample_weight, axis=0)
  850. # safer because np.float64(X*W) != np.float64(X)*np.float64(W)
  851. new_sum = _safe_accumulator_op(
  852. np.matmul, sample_weight, np.where(X_nan_mask, 0, X)
  853. )
  854. new_sample_count = _safe_accumulator_op(
  855. np.sum, sample_weight[:, None] * (~X_nan_mask), axis=0
  856. )
  857. else:
  858. new_sum = _safe_accumulator_op(sum_op, X, axis=0)
  859. n_samples = X.shape[0]
  860. new_sample_count = n_samples - np.sum(X_nan_mask, axis=0)
  861. updated_sample_count = last_sample_count + new_sample_count
  862. updated_mean = (last_sum + new_sum) / updated_sample_count
  863. if last_variance is None:
  864. updated_variance = None
  865. else:
  866. T = new_sum / new_sample_count
  867. temp = X - T
  868. if sample_weight is not None:
  869. # equivalent to np.nansum((X-T)**2 * sample_weight, axis=0)
  870. # safer because np.float64(X*W) != np.float64(X)*np.float64(W)
  871. correction = _safe_accumulator_op(
  872. np.matmul, sample_weight, np.where(X_nan_mask, 0, temp)
  873. )
  874. temp **= 2
  875. new_unnormalized_variance = _safe_accumulator_op(
  876. np.matmul, sample_weight, np.where(X_nan_mask, 0, temp)
  877. )
  878. else:
  879. correction = _safe_accumulator_op(sum_op, temp, axis=0)
  880. temp **= 2
  881. new_unnormalized_variance = _safe_accumulator_op(sum_op, temp, axis=0)
  882. # correction term of the corrected 2 pass algorithm.
  883. # See "Algorithms for computing the sample variance: analysis
  884. # and recommendations", by Chan, Golub, and LeVeque.
  885. new_unnormalized_variance -= correction**2 / new_sample_count
  886. last_unnormalized_variance = last_variance * last_sample_count
  887. with np.errstate(divide="ignore", invalid="ignore"):
  888. last_over_new_count = last_sample_count / new_sample_count
  889. updated_unnormalized_variance = (
  890. last_unnormalized_variance
  891. + new_unnormalized_variance
  892. + last_over_new_count
  893. / updated_sample_count
  894. * (last_sum / last_over_new_count - new_sum) ** 2
  895. )
  896. zeros = last_sample_count == 0
  897. updated_unnormalized_variance[zeros] = new_unnormalized_variance[zeros]
  898. updated_variance = updated_unnormalized_variance / updated_sample_count
  899. return updated_mean, updated_variance, updated_sample_count
  900. def _deterministic_vector_sign_flip(u):
  901. """Modify the sign of vectors for reproducibility.
  902. Flips the sign of elements of all the vectors (rows of u) such that
  903. the absolute maximum element of each vector is positive.
  904. Parameters
  905. ----------
  906. u : ndarray
  907. Array with vectors as its rows.
  908. Returns
  909. -------
  910. u_flipped : ndarray with same shape as u
  911. Array with the sign flipped vectors as its rows.
  912. """
  913. max_abs_rows = np.argmax(np.abs(u), axis=1)
  914. signs = np.sign(u[range(u.shape[0]), max_abs_rows])
  915. u *= signs[:, np.newaxis]
  916. return u
  917. def stable_cumsum(arr, axis=None, rtol=1e-05, atol=1e-08):
  918. """Use high precision for cumsum and check that final value matches sum.
  919. Warns if the final cumulative sum does not match the sum (up to the chosen
  920. tolerance).
  921. Parameters
  922. ----------
  923. arr : array-like
  924. To be cumulatively summed as flat.
  925. axis : int, default=None
  926. Axis along which the cumulative sum is computed.
  927. The default (None) is to compute the cumsum over the flattened array.
  928. rtol : float, default=1e-05
  929. Relative tolerance, see ``np.allclose``.
  930. atol : float, default=1e-08
  931. Absolute tolerance, see ``np.allclose``.
  932. Returns
  933. -------
  934. out : ndarray
  935. Array with the cumulative sums along the chosen axis.
  936. """
  937. out = np.cumsum(arr, axis=axis, dtype=np.float64)
  938. expected = np.sum(arr, axis=axis, dtype=np.float64)
  939. if not np.all(
  940. np.isclose(
  941. out.take(-1, axis=axis), expected, rtol=rtol, atol=atol, equal_nan=True
  942. )
  943. ):
  944. warnings.warn(
  945. (
  946. "cumsum was found to be unstable: "
  947. "its last element does not correspond to sum"
  948. ),
  949. RuntimeWarning,
  950. )
  951. return out
  952. def _nanaverage(a, weights=None):
  953. """Compute the weighted average, ignoring NaNs.
  954. Parameters
  955. ----------
  956. a : ndarray
  957. Array containing data to be averaged.
  958. weights : array-like, default=None
  959. An array of weights associated with the values in a. Each value in a
  960. contributes to the average according to its associated weight. The
  961. weights array can either be 1-D of the same shape as a. If `weights=None`,
  962. then all data in a are assumed to have a weight equal to one.
  963. Returns
  964. -------
  965. weighted_average : float
  966. The weighted average.
  967. Notes
  968. -----
  969. This wrapper to combine :func:`numpy.average` and :func:`numpy.nanmean`, so
  970. that :func:`np.nan` values are ignored from the average and weights can
  971. be passed. Note that when possible, we delegate to the prime methods.
  972. """
  973. if len(a) == 0:
  974. return np.nan
  975. mask = np.isnan(a)
  976. if mask.all():
  977. return np.nan
  978. if weights is None:
  979. return np.nanmean(a)
  980. weights = np.array(weights, copy=False)
  981. a, weights = a[~mask], weights[~mask]
  982. try:
  983. return np.average(a, weights=weights)
  984. except ZeroDivisionError:
  985. # this is when all weights are zero, then ignore them
  986. return np.average(a)