_base.py 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920
  1. """
  2. Generalized Linear Models.
  3. """
  4. # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
  5. # Fabian Pedregosa <fabian.pedregosa@inria.fr>
  6. # Olivier Grisel <olivier.grisel@ensta.org>
  7. # Vincent Michel <vincent.michel@inria.fr>
  8. # Peter Prettenhofer <peter.prettenhofer@gmail.com>
  9. # Mathieu Blondel <mathieu@mblondel.org>
  10. # Lars Buitinck
  11. # Maryan Morel <maryan.morel@polytechnique.edu>
  12. # Giorgio Patrini <giorgio.patrini@anu.edu.au>
  13. # Maria Telenczuk <https://github.com/maikia>
  14. # License: BSD 3 clause
  15. import numbers
  16. import warnings
  17. from abc import ABCMeta, abstractmethod
  18. from numbers import Integral
  19. import numpy as np
  20. import scipy.sparse as sp
  21. from scipy import linalg, optimize, sparse
  22. from scipy.sparse.linalg import lsqr
  23. from scipy.special import expit
  24. from ..base import (
  25. BaseEstimator,
  26. ClassifierMixin,
  27. MultiOutputMixin,
  28. RegressorMixin,
  29. _fit_context,
  30. )
  31. from ..preprocessing._data import _is_constant_feature
  32. from ..utils import check_array, check_random_state
  33. from ..utils._array_api import get_namespace
  34. from ..utils._seq_dataset import (
  35. ArrayDataset32,
  36. ArrayDataset64,
  37. CSRDataset32,
  38. CSRDataset64,
  39. )
  40. from ..utils.extmath import _incremental_mean_and_var, safe_sparse_dot
  41. from ..utils.parallel import Parallel, delayed
  42. from ..utils.sparsefuncs import inplace_column_scale, mean_variance_axis
  43. from ..utils.validation import FLOAT_DTYPES, _check_sample_weight, check_is_fitted
  44. # TODO: bayesian_ridge_regression and bayesian_regression_ard
  45. # should be squashed into its respective objects.
  46. SPARSE_INTERCEPT_DECAY = 0.01
  47. # For sparse data intercept updates are scaled by this decay factor to avoid
  48. # intercept oscillation.
  49. # TODO(1.4): remove
  50. # parameter 'normalize' should be removed from linear models
  51. def _deprecate_normalize(normalize, estimator_name):
  52. """Normalize is to be deprecated from linear models and a use of
  53. a pipeline with a StandardScaler is to be recommended instead.
  54. Here the appropriate message is selected to be displayed to the user
  55. depending on the default normalize value (as it varies between the linear
  56. models and normalize value selected by the user).
  57. Parameters
  58. ----------
  59. normalize : bool,
  60. normalize value passed by the user
  61. estimator_name : str
  62. name of the linear estimator which calls this function.
  63. The name will be used for writing the deprecation warnings
  64. Returns
  65. -------
  66. normalize : bool,
  67. normalize value which should further be used by the estimator at this
  68. stage of the depreciation process
  69. Notes
  70. -----
  71. This function should be completely removed in 1.4.
  72. """
  73. if normalize not in [True, False, "deprecated"]:
  74. raise ValueError(
  75. "Leave 'normalize' to its default value or set it to True or False"
  76. )
  77. if normalize == "deprecated":
  78. _normalize = False
  79. else:
  80. _normalize = normalize
  81. pipeline_msg = (
  82. "If you wish to scale the data, use Pipeline with a StandardScaler "
  83. "in a preprocessing stage. To reproduce the previous behavior:\n\n"
  84. "from sklearn.pipeline import make_pipeline\n\n"
  85. "model = make_pipeline(StandardScaler(with_mean=False), "
  86. f"{estimator_name}())\n\n"
  87. "If you wish to pass a sample_weight parameter, you need to pass it "
  88. "as a fit parameter to each step of the pipeline as follows:\n\n"
  89. "kwargs = {s[0] + '__sample_weight': sample_weight for s "
  90. "in model.steps}\n"
  91. "model.fit(X, y, **kwargs)\n\n"
  92. )
  93. alpha_msg = ""
  94. if "LassoLars" in estimator_name:
  95. alpha_msg = "Set parameter alpha to: original_alpha * np.sqrt(n_samples). "
  96. if normalize != "deprecated" and normalize:
  97. warnings.warn(
  98. "'normalize' was deprecated in version 1.2 and will be removed in 1.4.\n"
  99. + pipeline_msg
  100. + alpha_msg,
  101. FutureWarning,
  102. )
  103. elif not normalize:
  104. warnings.warn(
  105. (
  106. "'normalize' was deprecated in version 1.2 and will be "
  107. "removed in 1.4. "
  108. "Please leave the normalize parameter to its default value to "
  109. "silence this warning. The default behavior of this estimator "
  110. "is to not do any normalization. If normalization is needed "
  111. "please use sklearn.preprocessing.StandardScaler instead."
  112. ),
  113. FutureWarning,
  114. )
  115. return _normalize
  116. def make_dataset(X, y, sample_weight, random_state=None):
  117. """Create ``Dataset`` abstraction for sparse and dense inputs.
  118. This also returns the ``intercept_decay`` which is different
  119. for sparse datasets.
  120. Parameters
  121. ----------
  122. X : array-like, shape (n_samples, n_features)
  123. Training data
  124. y : array-like, shape (n_samples, )
  125. Target values.
  126. sample_weight : numpy array of shape (n_samples,)
  127. The weight of each sample
  128. random_state : int, RandomState instance or None (default)
  129. Determines random number generation for dataset random sampling. It is not
  130. used for dataset shuffling.
  131. Pass an int for reproducible output across multiple function calls.
  132. See :term:`Glossary <random_state>`.
  133. Returns
  134. -------
  135. dataset
  136. The ``Dataset`` abstraction
  137. intercept_decay
  138. The intercept decay
  139. """
  140. rng = check_random_state(random_state)
  141. # seed should never be 0 in SequentialDataset64
  142. seed = rng.randint(1, np.iinfo(np.int32).max)
  143. if X.dtype == np.float32:
  144. CSRData = CSRDataset32
  145. ArrayData = ArrayDataset32
  146. else:
  147. CSRData = CSRDataset64
  148. ArrayData = ArrayDataset64
  149. if sp.issparse(X):
  150. dataset = CSRData(X.data, X.indptr, X.indices, y, sample_weight, seed=seed)
  151. intercept_decay = SPARSE_INTERCEPT_DECAY
  152. else:
  153. X = np.ascontiguousarray(X)
  154. dataset = ArrayData(X, y, sample_weight, seed=seed)
  155. intercept_decay = 1.0
  156. return dataset, intercept_decay
  157. def _preprocess_data(
  158. X,
  159. y,
  160. fit_intercept,
  161. normalize=False,
  162. copy=True,
  163. copy_y=True,
  164. sample_weight=None,
  165. check_input=True,
  166. ):
  167. """Center and scale data.
  168. Centers data to have mean zero along axis 0. If fit_intercept=False or if
  169. the X is a sparse matrix, no centering is done, but normalization can still
  170. be applied. The function returns the statistics necessary to reconstruct
  171. the input data, which are X_offset, y_offset, X_scale, such that the output
  172. X = (X - X_offset) / X_scale
  173. X_scale is the L2 norm of X - X_offset. If sample_weight is not None,
  174. then the weighted mean of X and y is zero, and not the mean itself. If
  175. fit_intercept=True, the mean, eventually weighted, is returned, independently
  176. of whether X was centered (option used for optimization with sparse data in
  177. coordinate_descend).
  178. This is here because nearly all linear models will want their data to be
  179. centered. This function also systematically makes y consistent with X.dtype
  180. Returns
  181. -------
  182. X_out : {ndarray, sparse matrix} of shape (n_samples, n_features)
  183. If copy=True a copy of the input X is triggered, otherwise operations are
  184. inplace.
  185. If input X is dense, then X_out is centered.
  186. If normalize is True, then X_out is rescaled (dense and sparse case)
  187. y_out : {ndarray, sparse matrix} of shape (n_samples,) or (n_samples, n_targets)
  188. Centered version of y. Likely performed inplace on input y.
  189. X_offset : ndarray of shape (n_features,)
  190. The mean per column of input X.
  191. y_offset : float or ndarray of shape (n_features,)
  192. X_scale : ndarray of shape (n_features,)
  193. The standard deviation per column of input X.
  194. """
  195. if isinstance(sample_weight, numbers.Number):
  196. sample_weight = None
  197. if sample_weight is not None:
  198. sample_weight = np.asarray(sample_weight)
  199. if check_input:
  200. X = check_array(X, copy=copy, accept_sparse=["csr", "csc"], dtype=FLOAT_DTYPES)
  201. y = check_array(y, dtype=X.dtype, copy=copy_y, ensure_2d=False)
  202. else:
  203. y = y.astype(X.dtype, copy=copy_y)
  204. if copy:
  205. if sp.issparse(X):
  206. X = X.copy()
  207. else:
  208. X = X.copy(order="K")
  209. if fit_intercept:
  210. if sp.issparse(X):
  211. X_offset, X_var = mean_variance_axis(X, axis=0, weights=sample_weight)
  212. else:
  213. if normalize:
  214. X_offset, X_var, _ = _incremental_mean_and_var(
  215. X,
  216. last_mean=0.0,
  217. last_variance=0.0,
  218. last_sample_count=0.0,
  219. sample_weight=sample_weight,
  220. )
  221. else:
  222. X_offset = np.average(X, axis=0, weights=sample_weight)
  223. X_offset = X_offset.astype(X.dtype, copy=False)
  224. X -= X_offset
  225. if normalize:
  226. X_var = X_var.astype(X.dtype, copy=False)
  227. # Detect constant features on the computed variance, before taking
  228. # the np.sqrt. Otherwise constant features cannot be detected with
  229. # sample weights.
  230. constant_mask = _is_constant_feature(X_var, X_offset, X.shape[0])
  231. if sample_weight is None:
  232. X_var *= X.shape[0]
  233. else:
  234. X_var *= sample_weight.sum()
  235. X_scale = np.sqrt(X_var, out=X_var)
  236. X_scale[constant_mask] = 1.0
  237. if sp.issparse(X):
  238. inplace_column_scale(X, 1.0 / X_scale)
  239. else:
  240. X /= X_scale
  241. else:
  242. X_scale = np.ones(X.shape[1], dtype=X.dtype)
  243. y_offset = np.average(y, axis=0, weights=sample_weight)
  244. y -= y_offset
  245. else:
  246. X_offset = np.zeros(X.shape[1], dtype=X.dtype)
  247. X_scale = np.ones(X.shape[1], dtype=X.dtype)
  248. if y.ndim == 1:
  249. y_offset = X.dtype.type(0)
  250. else:
  251. y_offset = np.zeros(y.shape[1], dtype=X.dtype)
  252. return X, y, X_offset, y_offset, X_scale
  253. # TODO: _rescale_data should be factored into _preprocess_data.
  254. # Currently, the fact that sag implements its own way to deal with
  255. # sample_weight makes the refactoring tricky.
  256. def _rescale_data(X, y, sample_weight, inplace=False):
  257. """Rescale data sample-wise by square root of sample_weight.
  258. For many linear models, this enables easy support for sample_weight because
  259. (y - X w)' S (y - X w)
  260. with S = diag(sample_weight) becomes
  261. ||y_rescaled - X_rescaled w||_2^2
  262. when setting
  263. y_rescaled = sqrt(S) y
  264. X_rescaled = sqrt(S) X
  265. Returns
  266. -------
  267. X_rescaled : {array-like, sparse matrix}
  268. y_rescaled : {array-like, sparse matrix}
  269. """
  270. # Assume that _validate_data and _check_sample_weight have been called by
  271. # the caller.
  272. n_samples = X.shape[0]
  273. sample_weight_sqrt = np.sqrt(sample_weight)
  274. if sp.issparse(X) or sp.issparse(y):
  275. sw_matrix = sparse.dia_matrix(
  276. (sample_weight_sqrt, 0), shape=(n_samples, n_samples)
  277. )
  278. if sp.issparse(X):
  279. X = safe_sparse_dot(sw_matrix, X)
  280. else:
  281. if inplace:
  282. X *= sample_weight_sqrt[:, np.newaxis]
  283. else:
  284. X = X * sample_weight_sqrt[:, np.newaxis]
  285. if sp.issparse(y):
  286. y = safe_sparse_dot(sw_matrix, y)
  287. else:
  288. if inplace:
  289. if y.ndim == 1:
  290. y *= sample_weight_sqrt
  291. else:
  292. y *= sample_weight_sqrt[:, np.newaxis]
  293. else:
  294. if y.ndim == 1:
  295. y = y * sample_weight_sqrt
  296. else:
  297. y = y * sample_weight_sqrt[:, np.newaxis]
  298. return X, y, sample_weight_sqrt
  299. class LinearModel(BaseEstimator, metaclass=ABCMeta):
  300. """Base class for Linear Models"""
  301. @abstractmethod
  302. def fit(self, X, y):
  303. """Fit model."""
  304. def _decision_function(self, X):
  305. check_is_fitted(self)
  306. X = self._validate_data(X, accept_sparse=["csr", "csc", "coo"], reset=False)
  307. return safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_
  308. def predict(self, X):
  309. """
  310. Predict using the linear model.
  311. Parameters
  312. ----------
  313. X : array-like or sparse matrix, shape (n_samples, n_features)
  314. Samples.
  315. Returns
  316. -------
  317. C : array, shape (n_samples,)
  318. Returns predicted values.
  319. """
  320. return self._decision_function(X)
  321. def _set_intercept(self, X_offset, y_offset, X_scale):
  322. """Set the intercept_"""
  323. if self.fit_intercept:
  324. # We always want coef_.dtype=X.dtype. For instance, X.dtype can differ from
  325. # coef_.dtype if warm_start=True.
  326. self.coef_ = np.divide(self.coef_, X_scale, dtype=X_scale.dtype)
  327. self.intercept_ = y_offset - np.dot(X_offset, self.coef_.T)
  328. else:
  329. self.intercept_ = 0.0
  330. def _more_tags(self):
  331. return {"requires_y": True}
  332. # XXX Should this derive from LinearModel? It should be a mixin, not an ABC.
  333. # Maybe the n_features checking can be moved to LinearModel.
  334. class LinearClassifierMixin(ClassifierMixin):
  335. """Mixin for linear classifiers.
  336. Handles prediction for sparse and dense X.
  337. """
  338. def decision_function(self, X):
  339. """
  340. Predict confidence scores for samples.
  341. The confidence score for a sample is proportional to the signed
  342. distance of that sample to the hyperplane.
  343. Parameters
  344. ----------
  345. X : {array-like, sparse matrix} of shape (n_samples, n_features)
  346. The data matrix for which we want to get the confidence scores.
  347. Returns
  348. -------
  349. scores : ndarray of shape (n_samples,) or (n_samples, n_classes)
  350. Confidence scores per `(n_samples, n_classes)` combination. In the
  351. binary case, confidence score for `self.classes_[1]` where >0 means
  352. this class would be predicted.
  353. """
  354. check_is_fitted(self)
  355. xp, _ = get_namespace(X)
  356. X = self._validate_data(X, accept_sparse="csr", reset=False)
  357. scores = safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_
  358. return xp.reshape(scores, (-1,)) if scores.shape[1] == 1 else scores
  359. def predict(self, X):
  360. """
  361. Predict class labels for samples in X.
  362. Parameters
  363. ----------
  364. X : {array-like, sparse matrix} of shape (n_samples, n_features)
  365. The data matrix for which we want to get the predictions.
  366. Returns
  367. -------
  368. y_pred : ndarray of shape (n_samples,)
  369. Vector containing the class labels for each sample.
  370. """
  371. xp, _ = get_namespace(X)
  372. scores = self.decision_function(X)
  373. if len(scores.shape) == 1:
  374. indices = xp.astype(scores > 0, int)
  375. else:
  376. indices = xp.argmax(scores, axis=1)
  377. return xp.take(self.classes_, indices)
  378. def _predict_proba_lr(self, X):
  379. """Probability estimation for OvR logistic regression.
  380. Positive class probabilities are computed as
  381. 1. / (1. + np.exp(-self.decision_function(X)));
  382. multiclass is handled by normalizing that over all classes.
  383. """
  384. prob = self.decision_function(X)
  385. expit(prob, out=prob)
  386. if prob.ndim == 1:
  387. return np.vstack([1 - prob, prob]).T
  388. else:
  389. # OvR normalization, like LibLinear's predict_probability
  390. prob /= prob.sum(axis=1).reshape((prob.shape[0], -1))
  391. return prob
  392. class SparseCoefMixin:
  393. """Mixin for converting coef_ to and from CSR format.
  394. L1-regularizing estimators should inherit this.
  395. """
  396. def densify(self):
  397. """
  398. Convert coefficient matrix to dense array format.
  399. Converts the ``coef_`` member (back) to a numpy.ndarray. This is the
  400. default format of ``coef_`` and is required for fitting, so calling
  401. this method is only required on models that have previously been
  402. sparsified; otherwise, it is a no-op.
  403. Returns
  404. -------
  405. self
  406. Fitted estimator.
  407. """
  408. msg = "Estimator, %(name)s, must be fitted before densifying."
  409. check_is_fitted(self, msg=msg)
  410. if sp.issparse(self.coef_):
  411. self.coef_ = self.coef_.toarray()
  412. return self
  413. def sparsify(self):
  414. """
  415. Convert coefficient matrix to sparse format.
  416. Converts the ``coef_`` member to a scipy.sparse matrix, which for
  417. L1-regularized models can be much more memory- and storage-efficient
  418. than the usual numpy.ndarray representation.
  419. The ``intercept_`` member is not converted.
  420. Returns
  421. -------
  422. self
  423. Fitted estimator.
  424. Notes
  425. -----
  426. For non-sparse models, i.e. when there are not many zeros in ``coef_``,
  427. this may actually *increase* memory usage, so use this method with
  428. care. A rule of thumb is that the number of zero elements, which can
  429. be computed with ``(coef_ == 0).sum()``, must be more than 50% for this
  430. to provide significant benefits.
  431. After calling this method, further fitting with the partial_fit
  432. method (if any) will not work until you call densify.
  433. """
  434. msg = "Estimator, %(name)s, must be fitted before sparsifying."
  435. check_is_fitted(self, msg=msg)
  436. self.coef_ = sp.csr_matrix(self.coef_)
  437. return self
  438. class LinearRegression(MultiOutputMixin, RegressorMixin, LinearModel):
  439. """
  440. Ordinary least squares Linear Regression.
  441. LinearRegression fits a linear model with coefficients w = (w1, ..., wp)
  442. to minimize the residual sum of squares between the observed targets in
  443. the dataset, and the targets predicted by the linear approximation.
  444. Parameters
  445. ----------
  446. fit_intercept : bool, default=True
  447. Whether to calculate the intercept for this model. If set
  448. to False, no intercept will be used in calculations
  449. (i.e. data is expected to be centered).
  450. copy_X : bool, default=True
  451. If True, X will be copied; else, it may be overwritten.
  452. n_jobs : int, default=None
  453. The number of jobs to use for the computation. This will only provide
  454. speedup in case of sufficiently large problems, that is if firstly
  455. `n_targets > 1` and secondly `X` is sparse or if `positive` is set
  456. to `True`. ``None`` means 1 unless in a
  457. :obj:`joblib.parallel_backend` context. ``-1`` means using all
  458. processors. See :term:`Glossary <n_jobs>` for more details.
  459. positive : bool, default=False
  460. When set to ``True``, forces the coefficients to be positive. This
  461. option is only supported for dense arrays.
  462. .. versionadded:: 0.24
  463. Attributes
  464. ----------
  465. coef_ : array of shape (n_features, ) or (n_targets, n_features)
  466. Estimated coefficients for the linear regression problem.
  467. If multiple targets are passed during the fit (y 2D), this
  468. is a 2D array of shape (n_targets, n_features), while if only
  469. one target is passed, this is a 1D array of length n_features.
  470. rank_ : int
  471. Rank of matrix `X`. Only available when `X` is dense.
  472. singular_ : array of shape (min(X, y),)
  473. Singular values of `X`. Only available when `X` is dense.
  474. intercept_ : float or array of shape (n_targets,)
  475. Independent term in the linear model. Set to 0.0 if
  476. `fit_intercept = False`.
  477. n_features_in_ : int
  478. Number of features seen during :term:`fit`.
  479. .. versionadded:: 0.24
  480. feature_names_in_ : ndarray of shape (`n_features_in_`,)
  481. Names of features seen during :term:`fit`. Defined only when `X`
  482. has feature names that are all strings.
  483. .. versionadded:: 1.0
  484. See Also
  485. --------
  486. Ridge : Ridge regression addresses some of the
  487. problems of Ordinary Least Squares by imposing a penalty on the
  488. size of the coefficients with l2 regularization.
  489. Lasso : The Lasso is a linear model that estimates
  490. sparse coefficients with l1 regularization.
  491. ElasticNet : Elastic-Net is a linear regression
  492. model trained with both l1 and l2 -norm regularization of the
  493. coefficients.
  494. Notes
  495. -----
  496. From the implementation point of view, this is just plain Ordinary
  497. Least Squares (scipy.linalg.lstsq) or Non Negative Least Squares
  498. (scipy.optimize.nnls) wrapped as a predictor object.
  499. Examples
  500. --------
  501. >>> import numpy as np
  502. >>> from sklearn.linear_model import LinearRegression
  503. >>> X = np.array([[1, 1], [1, 2], [2, 2], [2, 3]])
  504. >>> # y = 1 * x_0 + 2 * x_1 + 3
  505. >>> y = np.dot(X, np.array([1, 2])) + 3
  506. >>> reg = LinearRegression().fit(X, y)
  507. >>> reg.score(X, y)
  508. 1.0
  509. >>> reg.coef_
  510. array([1., 2.])
  511. >>> reg.intercept_
  512. 3.0...
  513. >>> reg.predict(np.array([[3, 5]]))
  514. array([16.])
  515. """
  516. _parameter_constraints: dict = {
  517. "fit_intercept": ["boolean"],
  518. "copy_X": ["boolean"],
  519. "n_jobs": [None, Integral],
  520. "positive": ["boolean"],
  521. }
  522. def __init__(
  523. self,
  524. *,
  525. fit_intercept=True,
  526. copy_X=True,
  527. n_jobs=None,
  528. positive=False,
  529. ):
  530. self.fit_intercept = fit_intercept
  531. self.copy_X = copy_X
  532. self.n_jobs = n_jobs
  533. self.positive = positive
  534. @_fit_context(prefer_skip_nested_validation=True)
  535. def fit(self, X, y, sample_weight=None):
  536. """
  537. Fit linear model.
  538. Parameters
  539. ----------
  540. X : {array-like, sparse matrix} of shape (n_samples, n_features)
  541. Training data.
  542. y : array-like of shape (n_samples,) or (n_samples, n_targets)
  543. Target values. Will be cast to X's dtype if necessary.
  544. sample_weight : array-like of shape (n_samples,), default=None
  545. Individual weights for each sample.
  546. .. versionadded:: 0.17
  547. parameter *sample_weight* support to LinearRegression.
  548. Returns
  549. -------
  550. self : object
  551. Fitted Estimator.
  552. """
  553. n_jobs_ = self.n_jobs
  554. accept_sparse = False if self.positive else ["csr", "csc", "coo"]
  555. X, y = self._validate_data(
  556. X, y, accept_sparse=accept_sparse, y_numeric=True, multi_output=True
  557. )
  558. has_sw = sample_weight is not None
  559. if has_sw:
  560. sample_weight = _check_sample_weight(
  561. sample_weight, X, dtype=X.dtype, only_non_negative=True
  562. )
  563. # Note that neither _rescale_data nor the rest of the fit method of
  564. # LinearRegression can benefit from in-place operations when X is a
  565. # sparse matrix. Therefore, let's not copy X when it is sparse.
  566. copy_X_in_preprocess_data = self.copy_X and not sp.issparse(X)
  567. X, y, X_offset, y_offset, X_scale = _preprocess_data(
  568. X,
  569. y,
  570. fit_intercept=self.fit_intercept,
  571. copy=copy_X_in_preprocess_data,
  572. sample_weight=sample_weight,
  573. )
  574. if has_sw:
  575. # Sample weight can be implemented via a simple rescaling. Note
  576. # that we safely do inplace rescaling when _preprocess_data has
  577. # already made a copy if requested.
  578. X, y, sample_weight_sqrt = _rescale_data(
  579. X, y, sample_weight, inplace=copy_X_in_preprocess_data
  580. )
  581. if self.positive:
  582. if y.ndim < 2:
  583. self.coef_ = optimize.nnls(X, y)[0]
  584. else:
  585. # scipy.optimize.nnls cannot handle y with shape (M, K)
  586. outs = Parallel(n_jobs=n_jobs_)(
  587. delayed(optimize.nnls)(X, y[:, j]) for j in range(y.shape[1])
  588. )
  589. self.coef_ = np.vstack([out[0] for out in outs])
  590. elif sp.issparse(X):
  591. X_offset_scale = X_offset / X_scale
  592. if has_sw:
  593. def matvec(b):
  594. return X.dot(b) - sample_weight_sqrt * b.dot(X_offset_scale)
  595. def rmatvec(b):
  596. return X.T.dot(b) - X_offset_scale * b.dot(sample_weight_sqrt)
  597. else:
  598. def matvec(b):
  599. return X.dot(b) - b.dot(X_offset_scale)
  600. def rmatvec(b):
  601. return X.T.dot(b) - X_offset_scale * b.sum()
  602. X_centered = sparse.linalg.LinearOperator(
  603. shape=X.shape, matvec=matvec, rmatvec=rmatvec
  604. )
  605. if y.ndim < 2:
  606. self.coef_ = lsqr(X_centered, y)[0]
  607. else:
  608. # sparse_lstsq cannot handle y with shape (M, K)
  609. outs = Parallel(n_jobs=n_jobs_)(
  610. delayed(lsqr)(X_centered, y[:, j].ravel())
  611. for j in range(y.shape[1])
  612. )
  613. self.coef_ = np.vstack([out[0] for out in outs])
  614. else:
  615. self.coef_, _, self.rank_, self.singular_ = linalg.lstsq(X, y)
  616. self.coef_ = self.coef_.T
  617. if y.ndim == 1:
  618. self.coef_ = np.ravel(self.coef_)
  619. self._set_intercept(X_offset, y_offset, X_scale)
  620. return self
  621. def _check_precomputed_gram_matrix(
  622. X, precompute, X_offset, X_scale, rtol=None, atol=1e-5
  623. ):
  624. """Computes a single element of the gram matrix and compares it to
  625. the corresponding element of the user supplied gram matrix.
  626. If the values do not match a ValueError will be thrown.
  627. Parameters
  628. ----------
  629. X : ndarray of shape (n_samples, n_features)
  630. Data array.
  631. precompute : array-like of shape (n_features, n_features)
  632. User-supplied gram matrix.
  633. X_offset : ndarray of shape (n_features,)
  634. Array of feature means used to center design matrix.
  635. X_scale : ndarray of shape (n_features,)
  636. Array of feature scale factors used to normalize design matrix.
  637. rtol : float, default=None
  638. Relative tolerance; see numpy.allclose
  639. If None, it is set to 1e-4 for arrays of dtype numpy.float32 and 1e-7
  640. otherwise.
  641. atol : float, default=1e-5
  642. absolute tolerance; see :func`numpy.allclose`. Note that the default
  643. here is more tolerant than the default for
  644. :func:`numpy.testing.assert_allclose`, where `atol=0`.
  645. Raises
  646. ------
  647. ValueError
  648. Raised when the provided Gram matrix is not consistent.
  649. """
  650. n_features = X.shape[1]
  651. f1 = n_features // 2
  652. f2 = min(f1 + 1, n_features - 1)
  653. v1 = (X[:, f1] - X_offset[f1]) * X_scale[f1]
  654. v2 = (X[:, f2] - X_offset[f2]) * X_scale[f2]
  655. expected = np.dot(v1, v2)
  656. actual = precompute[f1, f2]
  657. dtypes = [precompute.dtype, expected.dtype]
  658. if rtol is None:
  659. rtols = [1e-4 if dtype == np.float32 else 1e-7 for dtype in dtypes]
  660. rtol = max(rtols)
  661. if not np.isclose(expected, actual, rtol=rtol, atol=atol):
  662. raise ValueError(
  663. "Gram matrix passed in via 'precompute' parameter "
  664. "did not pass validation when a single element was "
  665. "checked - please check that it was computed "
  666. f"properly. For element ({f1},{f2}) we computed "
  667. f"{expected} but the user-supplied value was "
  668. f"{actual}."
  669. )
  670. def _pre_fit(
  671. X,
  672. y,
  673. Xy,
  674. precompute,
  675. normalize,
  676. fit_intercept,
  677. copy,
  678. check_input=True,
  679. sample_weight=None,
  680. ):
  681. """Function used at beginning of fit in linear models with L1 or L0 penalty.
  682. This function applies _preprocess_data and additionally computes the gram matrix
  683. `precompute` as needed as well as `Xy`.
  684. """
  685. n_samples, n_features = X.shape
  686. if sparse.issparse(X):
  687. # copy is not needed here as X is not modified inplace when X is sparse
  688. precompute = False
  689. X, y, X_offset, y_offset, X_scale = _preprocess_data(
  690. X,
  691. y,
  692. fit_intercept=fit_intercept,
  693. normalize=normalize,
  694. copy=False,
  695. check_input=check_input,
  696. sample_weight=sample_weight,
  697. )
  698. else:
  699. # copy was done in fit if necessary
  700. X, y, X_offset, y_offset, X_scale = _preprocess_data(
  701. X,
  702. y,
  703. fit_intercept=fit_intercept,
  704. normalize=normalize,
  705. copy=copy,
  706. check_input=check_input,
  707. sample_weight=sample_weight,
  708. )
  709. # Rescale only in dense case. Sparse cd solver directly deals with
  710. # sample_weight.
  711. if sample_weight is not None:
  712. # This triggers copies anyway.
  713. X, y, _ = _rescale_data(X, y, sample_weight=sample_weight)
  714. # FIXME: 'normalize' to be removed in 1.4
  715. if hasattr(precompute, "__array__"):
  716. if (
  717. fit_intercept
  718. and not np.allclose(X_offset, np.zeros(n_features))
  719. or normalize
  720. and not np.allclose(X_scale, np.ones(n_features))
  721. ):
  722. warnings.warn(
  723. (
  724. "Gram matrix was provided but X was centered to fit "
  725. "intercept, or X was normalized : recomputing Gram matrix."
  726. ),
  727. UserWarning,
  728. )
  729. # recompute Gram
  730. precompute = "auto"
  731. Xy = None
  732. elif check_input:
  733. # If we're going to use the user's precomputed gram matrix, we
  734. # do a quick check to make sure its not totally bogus.
  735. _check_precomputed_gram_matrix(X, precompute, X_offset, X_scale)
  736. # precompute if n_samples > n_features
  737. if isinstance(precompute, str) and precompute == "auto":
  738. precompute = n_samples > n_features
  739. if precompute is True:
  740. # make sure that the 'precompute' array is contiguous.
  741. precompute = np.empty(shape=(n_features, n_features), dtype=X.dtype, order="C")
  742. np.dot(X.T, X, out=precompute)
  743. if not hasattr(precompute, "__array__"):
  744. Xy = None # cannot use Xy if precompute is not Gram
  745. if hasattr(precompute, "__array__") and Xy is None:
  746. common_dtype = np.result_type(X.dtype, y.dtype)
  747. if y.ndim == 1:
  748. # Xy is 1d, make sure it is contiguous.
  749. Xy = np.empty(shape=n_features, dtype=common_dtype, order="C")
  750. np.dot(X.T, y, out=Xy)
  751. else:
  752. # Make sure that Xy is always F contiguous even if X or y are not
  753. # contiguous: the goal is to make it fast to extract the data for a
  754. # specific target.
  755. n_targets = y.shape[1]
  756. Xy = np.empty(shape=(n_features, n_targets), dtype=common_dtype, order="F")
  757. np.dot(y.T, X, out=Xy.T)
  758. return X, y, X_offset, y_offset, X_scale, precompute, Xy