_polynomial.py 46 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169
  1. """
  2. This file contains preprocessing tools based on polynomials.
  3. """
  4. import collections
  5. from itertools import chain, combinations
  6. from itertools import combinations_with_replacement as combinations_w_r
  7. from numbers import Integral
  8. import numpy as np
  9. from scipy import sparse
  10. from scipy.interpolate import BSpline
  11. from scipy.special import comb
  12. from ..base import BaseEstimator, TransformerMixin, _fit_context
  13. from ..utils import check_array
  14. from ..utils._param_validation import Interval, StrOptions
  15. from ..utils.fixes import parse_version, sp_version
  16. from ..utils.stats import _weighted_percentile
  17. from ..utils.validation import (
  18. FLOAT_DTYPES,
  19. _check_feature_names_in,
  20. _check_sample_weight,
  21. check_is_fitted,
  22. )
  23. from ._csr_polynomial_expansion import (
  24. _calc_expanded_nnz,
  25. _calc_total_nnz,
  26. _csr_polynomial_expansion,
  27. )
  28. __all__ = [
  29. "PolynomialFeatures",
  30. "SplineTransformer",
  31. ]
  32. def _create_expansion(X, interaction_only, deg, n_features, cumulative_size=0):
  33. """Helper function for creating and appending sparse expansion matrices"""
  34. total_nnz = _calc_total_nnz(X.indptr, interaction_only, deg)
  35. expanded_col = _calc_expanded_nnz(n_features, interaction_only, deg)
  36. if expanded_col == 0:
  37. return None
  38. # This only checks whether each block needs 64bit integers upon
  39. # expansion. We prefer to keep int32 indexing where we can,
  40. # since currently SciPy's CSR construction downcasts when possible,
  41. # so we prefer to avoid an unnecessary cast. The dtype may still
  42. # change in the concatenation process if needed.
  43. # See: https://github.com/scipy/scipy/issues/16569
  44. max_indices = expanded_col - 1
  45. max_indptr = total_nnz
  46. max_int32 = np.iinfo(np.int32).max
  47. needs_int64 = max(max_indices, max_indptr) > max_int32
  48. index_dtype = np.int64 if needs_int64 else np.int32
  49. # This is a pretty specific bug that is hard to work around by a user,
  50. # hence we do not detail the entire bug and all possible avoidance
  51. # mechnasisms. Instead we recommend upgrading scipy or shrinking their data.
  52. cumulative_size += expanded_col
  53. if (
  54. sp_version < parse_version("1.8.0")
  55. and cumulative_size - 1 > max_int32
  56. and not needs_int64
  57. ):
  58. raise ValueError(
  59. "In scipy versions `<1.8.0`, the function `scipy.sparse.hstack`"
  60. " sometimes produces negative columns when the output shape contains"
  61. " `n_cols` too large to be represented by a 32bit signed"
  62. " integer. To avoid this error, either use a version"
  63. " of scipy `>=1.8.0` or alter the `PolynomialFeatures`"
  64. " transformer to produce fewer than 2^31 output features."
  65. )
  66. # Result of the expansion, modified in place by the
  67. # `_csr_polynomial_expansion` routine.
  68. expanded_data = np.empty(shape=total_nnz, dtype=X.data.dtype)
  69. expanded_indices = np.empty(shape=total_nnz, dtype=index_dtype)
  70. expanded_indptr = np.empty(shape=X.indptr.shape[0], dtype=index_dtype)
  71. _csr_polynomial_expansion(
  72. X.data,
  73. X.indices,
  74. X.indptr,
  75. X.shape[1],
  76. expanded_data,
  77. expanded_indices,
  78. expanded_indptr,
  79. interaction_only,
  80. deg,
  81. )
  82. return sparse.csr_matrix(
  83. (expanded_data, expanded_indices, expanded_indptr),
  84. shape=(X.indptr.shape[0] - 1, expanded_col),
  85. dtype=X.dtype,
  86. )
  87. class PolynomialFeatures(TransformerMixin, BaseEstimator):
  88. """Generate polynomial and interaction features.
  89. Generate a new feature matrix consisting of all polynomial combinations
  90. of the features with degree less than or equal to the specified degree.
  91. For example, if an input sample is two dimensional and of the form
  92. [a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
  93. Read more in the :ref:`User Guide <polynomial_features>`.
  94. Parameters
  95. ----------
  96. degree : int or tuple (min_degree, max_degree), default=2
  97. If a single int is given, it specifies the maximal degree of the
  98. polynomial features. If a tuple `(min_degree, max_degree)` is passed,
  99. then `min_degree` is the minimum and `max_degree` is the maximum
  100. polynomial degree of the generated features. Note that `min_degree=0`
  101. and `min_degree=1` are equivalent as outputting the degree zero term is
  102. determined by `include_bias`.
  103. interaction_only : bool, default=False
  104. If `True`, only interaction features are produced: features that are
  105. products of at most `degree` *distinct* input features, i.e. terms with
  106. power of 2 or higher of the same input feature are excluded:
  107. - included: `x[0]`, `x[1]`, `x[0] * x[1]`, etc.
  108. - excluded: `x[0] ** 2`, `x[0] ** 2 * x[1]`, etc.
  109. include_bias : bool, default=True
  110. If `True` (default), then include a bias column, the feature in which
  111. all polynomial powers are zero (i.e. a column of ones - acts as an
  112. intercept term in a linear model).
  113. order : {'C', 'F'}, default='C'
  114. Order of output array in the dense case. `'F'` order is faster to
  115. compute, but may slow down subsequent estimators.
  116. .. versionadded:: 0.21
  117. Attributes
  118. ----------
  119. powers_ : ndarray of shape (`n_output_features_`, `n_features_in_`)
  120. `powers_[i, j]` is the exponent of the jth input in the ith output.
  121. n_features_in_ : int
  122. Number of features seen during :term:`fit`.
  123. .. versionadded:: 0.24
  124. feature_names_in_ : ndarray of shape (`n_features_in_`,)
  125. Names of features seen during :term:`fit`. Defined only when `X`
  126. has feature names that are all strings.
  127. .. versionadded:: 1.0
  128. n_output_features_ : int
  129. The total number of polynomial output features. The number of output
  130. features is computed by iterating over all suitably sized combinations
  131. of input features.
  132. See Also
  133. --------
  134. SplineTransformer : Transformer that generates univariate B-spline bases
  135. for features.
  136. Notes
  137. -----
  138. Be aware that the number of features in the output array scales
  139. polynomially in the number of features of the input array, and
  140. exponentially in the degree. High degrees can cause overfitting.
  141. See :ref:`examples/linear_model/plot_polynomial_interpolation.py
  142. <sphx_glr_auto_examples_linear_model_plot_polynomial_interpolation.py>`
  143. Examples
  144. --------
  145. >>> import numpy as np
  146. >>> from sklearn.preprocessing import PolynomialFeatures
  147. >>> X = np.arange(6).reshape(3, 2)
  148. >>> X
  149. array([[0, 1],
  150. [2, 3],
  151. [4, 5]])
  152. >>> poly = PolynomialFeatures(2)
  153. >>> poly.fit_transform(X)
  154. array([[ 1., 0., 1., 0., 0., 1.],
  155. [ 1., 2., 3., 4., 6., 9.],
  156. [ 1., 4., 5., 16., 20., 25.]])
  157. >>> poly = PolynomialFeatures(interaction_only=True)
  158. >>> poly.fit_transform(X)
  159. array([[ 1., 0., 1., 0.],
  160. [ 1., 2., 3., 6.],
  161. [ 1., 4., 5., 20.]])
  162. """
  163. _parameter_constraints: dict = {
  164. "degree": [Interval(Integral, 0, None, closed="left"), "array-like"],
  165. "interaction_only": ["boolean"],
  166. "include_bias": ["boolean"],
  167. "order": [StrOptions({"C", "F"})],
  168. }
  169. def __init__(
  170. self, degree=2, *, interaction_only=False, include_bias=True, order="C"
  171. ):
  172. self.degree = degree
  173. self.interaction_only = interaction_only
  174. self.include_bias = include_bias
  175. self.order = order
  176. @staticmethod
  177. def _combinations(
  178. n_features, min_degree, max_degree, interaction_only, include_bias
  179. ):
  180. comb = combinations if interaction_only else combinations_w_r
  181. start = max(1, min_degree)
  182. iter = chain.from_iterable(
  183. comb(range(n_features), i) for i in range(start, max_degree + 1)
  184. )
  185. if include_bias:
  186. iter = chain(comb(range(n_features), 0), iter)
  187. return iter
  188. @staticmethod
  189. def _num_combinations(
  190. n_features, min_degree, max_degree, interaction_only, include_bias
  191. ):
  192. """Calculate number of terms in polynomial expansion
  193. This should be equivalent to counting the number of terms returned by
  194. _combinations(...) but much faster.
  195. """
  196. if interaction_only:
  197. combinations = sum(
  198. [
  199. comb(n_features, i, exact=True)
  200. for i in range(max(1, min_degree), min(max_degree, n_features) + 1)
  201. ]
  202. )
  203. else:
  204. combinations = comb(n_features + max_degree, max_degree, exact=True) - 1
  205. if min_degree > 0:
  206. d = min_degree - 1
  207. combinations -= comb(n_features + d, d, exact=True) - 1
  208. if include_bias:
  209. combinations += 1
  210. return combinations
  211. @property
  212. def powers_(self):
  213. """Exponent for each of the inputs in the output."""
  214. check_is_fitted(self)
  215. combinations = self._combinations(
  216. n_features=self.n_features_in_,
  217. min_degree=self._min_degree,
  218. max_degree=self._max_degree,
  219. interaction_only=self.interaction_only,
  220. include_bias=self.include_bias,
  221. )
  222. return np.vstack(
  223. [np.bincount(c, minlength=self.n_features_in_) for c in combinations]
  224. )
  225. def get_feature_names_out(self, input_features=None):
  226. """Get output feature names for transformation.
  227. Parameters
  228. ----------
  229. input_features : array-like of str or None, default=None
  230. Input features.
  231. - If `input_features is None`, then `feature_names_in_` is
  232. used as feature names in. If `feature_names_in_` is not defined,
  233. then the following input feature names are generated:
  234. `["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
  235. - If `input_features` is an array-like, then `input_features` must
  236. match `feature_names_in_` if `feature_names_in_` is defined.
  237. Returns
  238. -------
  239. feature_names_out : ndarray of str objects
  240. Transformed feature names.
  241. """
  242. powers = self.powers_
  243. input_features = _check_feature_names_in(self, input_features)
  244. feature_names = []
  245. for row in powers:
  246. inds = np.where(row)[0]
  247. if len(inds):
  248. name = " ".join(
  249. (
  250. "%s^%d" % (input_features[ind], exp)
  251. if exp != 1
  252. else input_features[ind]
  253. )
  254. for ind, exp in zip(inds, row[inds])
  255. )
  256. else:
  257. name = "1"
  258. feature_names.append(name)
  259. return np.asarray(feature_names, dtype=object)
  260. @_fit_context(prefer_skip_nested_validation=True)
  261. def fit(self, X, y=None):
  262. """
  263. Compute number of output features.
  264. Parameters
  265. ----------
  266. X : {array-like, sparse matrix} of shape (n_samples, n_features)
  267. The data.
  268. y : Ignored
  269. Not used, present here for API consistency by convention.
  270. Returns
  271. -------
  272. self : object
  273. Fitted transformer.
  274. """
  275. _, n_features = self._validate_data(X, accept_sparse=True).shape
  276. if isinstance(self.degree, Integral):
  277. if self.degree == 0 and not self.include_bias:
  278. raise ValueError(
  279. "Setting degree to zero and include_bias to False would result in"
  280. " an empty output array."
  281. )
  282. self._min_degree = 0
  283. self._max_degree = self.degree
  284. elif (
  285. isinstance(self.degree, collections.abc.Iterable) and len(self.degree) == 2
  286. ):
  287. self._min_degree, self._max_degree = self.degree
  288. if not (
  289. isinstance(self._min_degree, Integral)
  290. and isinstance(self._max_degree, Integral)
  291. and self._min_degree >= 0
  292. and self._min_degree <= self._max_degree
  293. ):
  294. raise ValueError(
  295. "degree=(min_degree, max_degree) must "
  296. "be non-negative integers that fulfil "
  297. "min_degree <= max_degree, got "
  298. f"{self.degree}."
  299. )
  300. elif self._max_degree == 0 and not self.include_bias:
  301. raise ValueError(
  302. "Setting both min_degree and max_degree to zero and include_bias to"
  303. " False would result in an empty output array."
  304. )
  305. else:
  306. raise ValueError(
  307. "degree must be a non-negative int or tuple "
  308. "(min_degree, max_degree), got "
  309. f"{self.degree}."
  310. )
  311. self.n_output_features_ = self._num_combinations(
  312. n_features=n_features,
  313. min_degree=self._min_degree,
  314. max_degree=self._max_degree,
  315. interaction_only=self.interaction_only,
  316. include_bias=self.include_bias,
  317. )
  318. if self.n_output_features_ > np.iinfo(np.intp).max:
  319. msg = (
  320. "The output that would result from the current configuration would"
  321. f" have {self.n_output_features_} features which is too large to be"
  322. f" indexed by {np.intp().dtype.name}. Please change some or all of the"
  323. " following:\n- The number of features in the input, currently"
  324. f" {n_features=}\n- The range of degrees to calculate, currently"
  325. f" [{self._min_degree}, {self._max_degree}]\n- Whether to include only"
  326. f" interaction terms, currently {self.interaction_only}\n- Whether to"
  327. f" include a bias term, currently {self.include_bias}."
  328. )
  329. if (
  330. np.intp == np.int32
  331. and self.n_output_features_ <= np.iinfo(np.int64).max
  332. ): # pragma: nocover
  333. msg += (
  334. "\nNote that the current Python runtime has a limited 32 bit "
  335. "address space and that this configuration would have been "
  336. "admissible if run on a 64 bit Python runtime."
  337. )
  338. raise ValueError(msg)
  339. # We also record the number of output features for
  340. # _max_degree = 0
  341. self._n_out_full = self._num_combinations(
  342. n_features=n_features,
  343. min_degree=0,
  344. max_degree=self._max_degree,
  345. interaction_only=self.interaction_only,
  346. include_bias=self.include_bias,
  347. )
  348. return self
  349. def transform(self, X):
  350. """Transform data to polynomial features.
  351. Parameters
  352. ----------
  353. X : {array-like, sparse matrix} of shape (n_samples, n_features)
  354. The data to transform, row by row.
  355. Prefer CSR over CSC for sparse input (for speed), but CSC is
  356. required if the degree is 4 or higher. If the degree is less than
  357. 4 and the input format is CSC, it will be converted to CSR, have
  358. its polynomial features generated, then converted back to CSC.
  359. If the degree is 2 or 3, the method described in "Leveraging
  360. Sparsity to Speed Up Polynomial Feature Expansions of CSR Matrices
  361. Using K-Simplex Numbers" by Andrew Nystrom and John Hughes is
  362. used, which is much faster than the method used on CSC input. For
  363. this reason, a CSC input will be converted to CSR, and the output
  364. will be converted back to CSC prior to being returned, hence the
  365. preference of CSR.
  366. Returns
  367. -------
  368. XP : {ndarray, sparse matrix} of shape (n_samples, NP)
  369. The matrix of features, where `NP` is the number of polynomial
  370. features generated from the combination of inputs. If a sparse
  371. matrix is provided, it will be converted into a sparse
  372. `csr_matrix`.
  373. """
  374. check_is_fitted(self)
  375. X = self._validate_data(
  376. X, order="F", dtype=FLOAT_DTYPES, reset=False, accept_sparse=("csr", "csc")
  377. )
  378. n_samples, n_features = X.shape
  379. max_int32 = np.iinfo(np.int32).max
  380. if sparse.issparse(X) and X.format == "csr":
  381. if self._max_degree > 3:
  382. return self.transform(X.tocsc()).tocsr()
  383. to_stack = []
  384. if self.include_bias:
  385. to_stack.append(
  386. sparse.csr_matrix(np.ones(shape=(n_samples, 1), dtype=X.dtype))
  387. )
  388. if self._min_degree <= 1 and self._max_degree > 0:
  389. to_stack.append(X)
  390. cumulative_size = sum(mat.shape[1] for mat in to_stack)
  391. for deg in range(max(2, self._min_degree), self._max_degree + 1):
  392. expanded = _create_expansion(
  393. X=X,
  394. interaction_only=self.interaction_only,
  395. deg=deg,
  396. n_features=n_features,
  397. cumulative_size=cumulative_size,
  398. )
  399. if expanded is not None:
  400. to_stack.append(expanded)
  401. cumulative_size += expanded.shape[1]
  402. if len(to_stack) == 0:
  403. # edge case: deal with empty matrix
  404. XP = sparse.csr_matrix((n_samples, 0), dtype=X.dtype)
  405. else:
  406. # `scipy.sparse.hstack` breaks in scipy<1.9.2
  407. # when `n_output_features_ > max_int32`
  408. all_int32 = all(mat.indices.dtype == np.int32 for mat in to_stack)
  409. if (
  410. sp_version < parse_version("1.9.2")
  411. and self.n_output_features_ > max_int32
  412. and all_int32
  413. ):
  414. raise ValueError( # pragma: no cover
  415. "In scipy versions `<1.9.2`, the function `scipy.sparse.hstack`"
  416. " produces negative columns when:\n1. The output shape contains"
  417. " `n_cols` too large to be represented by a 32bit signed"
  418. " integer.\n2. All sub-matrices to be stacked have indices of"
  419. " dtype `np.int32`.\nTo avoid this error, either use a version"
  420. " of scipy `>=1.9.2` or alter the `PolynomialFeatures`"
  421. " transformer to produce fewer than 2^31 output features"
  422. )
  423. XP = sparse.hstack(to_stack, dtype=X.dtype, format="csr")
  424. elif sparse.issparse(X) and X.format == "csc" and self._max_degree < 4:
  425. return self.transform(X.tocsr()).tocsc()
  426. elif sparse.issparse(X):
  427. combinations = self._combinations(
  428. n_features=n_features,
  429. min_degree=self._min_degree,
  430. max_degree=self._max_degree,
  431. interaction_only=self.interaction_only,
  432. include_bias=self.include_bias,
  433. )
  434. columns = []
  435. for combi in combinations:
  436. if combi:
  437. out_col = 1
  438. for col_idx in combi:
  439. out_col = X[:, col_idx].multiply(out_col)
  440. columns.append(out_col)
  441. else:
  442. bias = sparse.csc_matrix(np.ones((X.shape[0], 1)))
  443. columns.append(bias)
  444. XP = sparse.hstack(columns, dtype=X.dtype).tocsc()
  445. else:
  446. # Do as if _min_degree = 0 and cut down array after the
  447. # computation, i.e. use _n_out_full instead of n_output_features_.
  448. XP = np.empty(
  449. shape=(n_samples, self._n_out_full), dtype=X.dtype, order=self.order
  450. )
  451. # What follows is a faster implementation of:
  452. # for i, comb in enumerate(combinations):
  453. # XP[:, i] = X[:, comb].prod(1)
  454. # This implementation uses two optimisations.
  455. # First one is broadcasting,
  456. # multiply ([X1, ..., Xn], X1) -> [X1 X1, ..., Xn X1]
  457. # multiply ([X2, ..., Xn], X2) -> [X2 X2, ..., Xn X2]
  458. # ...
  459. # multiply ([X[:, start:end], X[:, start]) -> ...
  460. # Second optimisation happens for degrees >= 3.
  461. # Xi^3 is computed reusing previous computation:
  462. # Xi^3 = Xi^2 * Xi.
  463. # degree 0 term
  464. if self.include_bias:
  465. XP[:, 0] = 1
  466. current_col = 1
  467. else:
  468. current_col = 0
  469. if self._max_degree == 0:
  470. return XP
  471. # degree 1 term
  472. XP[:, current_col : current_col + n_features] = X
  473. index = list(range(current_col, current_col + n_features))
  474. current_col += n_features
  475. index.append(current_col)
  476. # loop over degree >= 2 terms
  477. for _ in range(2, self._max_degree + 1):
  478. new_index = []
  479. end = index[-1]
  480. for feature_idx in range(n_features):
  481. start = index[feature_idx]
  482. new_index.append(current_col)
  483. if self.interaction_only:
  484. start += index[feature_idx + 1] - index[feature_idx]
  485. next_col = current_col + end - start
  486. if next_col <= current_col:
  487. break
  488. # XP[:, start:end] are terms of degree d - 1
  489. # that exclude feature #feature_idx.
  490. np.multiply(
  491. XP[:, start:end],
  492. X[:, feature_idx : feature_idx + 1],
  493. out=XP[:, current_col:next_col],
  494. casting="no",
  495. )
  496. current_col = next_col
  497. new_index.append(current_col)
  498. index = new_index
  499. if self._min_degree > 1:
  500. n_XP, n_Xout = self._n_out_full, self.n_output_features_
  501. if self.include_bias:
  502. Xout = np.empty(
  503. shape=(n_samples, n_Xout), dtype=XP.dtype, order=self.order
  504. )
  505. Xout[:, 0] = 1
  506. Xout[:, 1:] = XP[:, n_XP - n_Xout + 1 :]
  507. else:
  508. Xout = XP[:, n_XP - n_Xout :].copy()
  509. XP = Xout
  510. return XP
  511. class SplineTransformer(TransformerMixin, BaseEstimator):
  512. """Generate univariate B-spline bases for features.
  513. Generate a new feature matrix consisting of
  514. `n_splines=n_knots + degree - 1` (`n_knots - 1` for
  515. `extrapolation="periodic"`) spline basis functions
  516. (B-splines) of polynomial order=`degree` for each feature.
  517. Read more in the :ref:`User Guide <spline_transformer>`.
  518. .. versionadded:: 1.0
  519. Parameters
  520. ----------
  521. n_knots : int, default=5
  522. Number of knots of the splines if `knots` equals one of
  523. {'uniform', 'quantile'}. Must be larger or equal 2. Ignored if `knots`
  524. is array-like.
  525. degree : int, default=3
  526. The polynomial degree of the spline basis. Must be a non-negative
  527. integer.
  528. knots : {'uniform', 'quantile'} or array-like of shape \
  529. (n_knots, n_features), default='uniform'
  530. Set knot positions such that first knot <= features <= last knot.
  531. - If 'uniform', `n_knots` number of knots are distributed uniformly
  532. from min to max values of the features.
  533. - If 'quantile', they are distributed uniformly along the quantiles of
  534. the features.
  535. - If an array-like is given, it directly specifies the sorted knot
  536. positions including the boundary knots. Note that, internally,
  537. `degree` number of knots are added before the first knot, the same
  538. after the last knot.
  539. extrapolation : {'error', 'constant', 'linear', 'continue', 'periodic'}, \
  540. default='constant'
  541. If 'error', values outside the min and max values of the training
  542. features raises a `ValueError`. If 'constant', the value of the
  543. splines at minimum and maximum value of the features is used as
  544. constant extrapolation. If 'linear', a linear extrapolation is used.
  545. If 'continue', the splines are extrapolated as is, i.e. option
  546. `extrapolate=True` in :class:`scipy.interpolate.BSpline`. If
  547. 'periodic', periodic splines with a periodicity equal to the distance
  548. between the first and last knot are used. Periodic splines enforce
  549. equal function values and derivatives at the first and last knot.
  550. For example, this makes it possible to avoid introducing an arbitrary
  551. jump between Dec 31st and Jan 1st in spline features derived from a
  552. naturally periodic "day-of-year" input feature. In this case it is
  553. recommended to manually set the knot values to control the period.
  554. include_bias : bool, default=True
  555. If False, then the last spline element inside the data range
  556. of a feature is dropped. As B-splines sum to one over the spline basis
  557. functions for each data point, they implicitly include a bias term,
  558. i.e. a column of ones. It acts as an intercept term in a linear models.
  559. order : {'C', 'F'}, default='C'
  560. Order of output array in the dense case. `'F'` order is faster to compute, but
  561. may slow down subsequent estimators.
  562. sparse_output : bool, default=False
  563. Will return sparse CSR matrix if set True else will return an array. This
  564. option is only available with `scipy>=1.8`.
  565. .. versionadded:: 1.2
  566. Attributes
  567. ----------
  568. bsplines_ : list of shape (n_features,)
  569. List of BSplines objects, one for each feature.
  570. n_features_in_ : int
  571. The total number of input features.
  572. feature_names_in_ : ndarray of shape (`n_features_in_`,)
  573. Names of features seen during :term:`fit`. Defined only when `X`
  574. has feature names that are all strings.
  575. .. versionadded:: 1.0
  576. n_features_out_ : int
  577. The total number of output features, which is computed as
  578. `n_features * n_splines`, where `n_splines` is
  579. the number of bases elements of the B-splines,
  580. `n_knots + degree - 1` for non-periodic splines and
  581. `n_knots - 1` for periodic ones.
  582. If `include_bias=False`, then it is only
  583. `n_features * (n_splines - 1)`.
  584. See Also
  585. --------
  586. KBinsDiscretizer : Transformer that bins continuous data into intervals.
  587. PolynomialFeatures : Transformer that generates polynomial and interaction
  588. features.
  589. Notes
  590. -----
  591. High degrees and a high number of knots can cause overfitting.
  592. See :ref:`examples/linear_model/plot_polynomial_interpolation.py
  593. <sphx_glr_auto_examples_linear_model_plot_polynomial_interpolation.py>`.
  594. Examples
  595. --------
  596. >>> import numpy as np
  597. >>> from sklearn.preprocessing import SplineTransformer
  598. >>> X = np.arange(6).reshape(6, 1)
  599. >>> spline = SplineTransformer(degree=2, n_knots=3)
  600. >>> spline.fit_transform(X)
  601. array([[0.5 , 0.5 , 0. , 0. ],
  602. [0.18, 0.74, 0.08, 0. ],
  603. [0.02, 0.66, 0.32, 0. ],
  604. [0. , 0.32, 0.66, 0.02],
  605. [0. , 0.08, 0.74, 0.18],
  606. [0. , 0. , 0.5 , 0.5 ]])
  607. """
  608. _parameter_constraints: dict = {
  609. "n_knots": [Interval(Integral, 2, None, closed="left")],
  610. "degree": [Interval(Integral, 0, None, closed="left")],
  611. "knots": [StrOptions({"uniform", "quantile"}), "array-like"],
  612. "extrapolation": [
  613. StrOptions({"error", "constant", "linear", "continue", "periodic"})
  614. ],
  615. "include_bias": ["boolean"],
  616. "order": [StrOptions({"C", "F"})],
  617. "sparse_output": ["boolean"],
  618. }
  619. def __init__(
  620. self,
  621. n_knots=5,
  622. degree=3,
  623. *,
  624. knots="uniform",
  625. extrapolation="constant",
  626. include_bias=True,
  627. order="C",
  628. sparse_output=False,
  629. ):
  630. self.n_knots = n_knots
  631. self.degree = degree
  632. self.knots = knots
  633. self.extrapolation = extrapolation
  634. self.include_bias = include_bias
  635. self.order = order
  636. self.sparse_output = sparse_output
  637. @staticmethod
  638. def _get_base_knot_positions(X, n_knots=10, knots="uniform", sample_weight=None):
  639. """Calculate base knot positions.
  640. Base knots such that first knot <= feature <= last knot. For the
  641. B-spline construction with scipy.interpolate.BSpline, 2*degree knots
  642. beyond the base interval are added.
  643. Returns
  644. -------
  645. knots : ndarray of shape (n_knots, n_features), dtype=np.float64
  646. Knot positions (points) of base interval.
  647. """
  648. if knots == "quantile":
  649. percentiles = 100 * np.linspace(
  650. start=0, stop=1, num=n_knots, dtype=np.float64
  651. )
  652. if sample_weight is None:
  653. knots = np.percentile(X, percentiles, axis=0)
  654. else:
  655. knots = np.array(
  656. [
  657. _weighted_percentile(X, sample_weight, percentile)
  658. for percentile in percentiles
  659. ]
  660. )
  661. else:
  662. # knots == 'uniform':
  663. # Note that the variable `knots` has already been validated and
  664. # `else` is therefore safe.
  665. # Disregard observations with zero weight.
  666. mask = slice(None, None, 1) if sample_weight is None else sample_weight > 0
  667. x_min = np.amin(X[mask], axis=0)
  668. x_max = np.amax(X[mask], axis=0)
  669. knots = np.linspace(
  670. start=x_min,
  671. stop=x_max,
  672. num=n_knots,
  673. endpoint=True,
  674. dtype=np.float64,
  675. )
  676. return knots
  677. def get_feature_names_out(self, input_features=None):
  678. """Get output feature names for transformation.
  679. Parameters
  680. ----------
  681. input_features : array-like of str or None, default=None
  682. Input features.
  683. - If `input_features` is `None`, then `feature_names_in_` is
  684. used as feature names in. If `feature_names_in_` is not defined,
  685. then the following input feature names are generated:
  686. `["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
  687. - If `input_features` is an array-like, then `input_features` must
  688. match `feature_names_in_` if `feature_names_in_` is defined.
  689. Returns
  690. -------
  691. feature_names_out : ndarray of str objects
  692. Transformed feature names.
  693. """
  694. check_is_fitted(self, "n_features_in_")
  695. n_splines = self.bsplines_[0].c.shape[1]
  696. input_features = _check_feature_names_in(self, input_features)
  697. feature_names = []
  698. for i in range(self.n_features_in_):
  699. for j in range(n_splines - 1 + self.include_bias):
  700. feature_names.append(f"{input_features[i]}_sp_{j}")
  701. return np.asarray(feature_names, dtype=object)
  702. @_fit_context(prefer_skip_nested_validation=True)
  703. def fit(self, X, y=None, sample_weight=None):
  704. """Compute knot positions of splines.
  705. Parameters
  706. ----------
  707. X : array-like of shape (n_samples, n_features)
  708. The data.
  709. y : None
  710. Ignored.
  711. sample_weight : array-like of shape (n_samples,), default = None
  712. Individual weights for each sample. Used to calculate quantiles if
  713. `knots="quantile"`. For `knots="uniform"`, zero weighted
  714. observations are ignored for finding the min and max of `X`.
  715. Returns
  716. -------
  717. self : object
  718. Fitted transformer.
  719. """
  720. X = self._validate_data(
  721. X,
  722. reset=True,
  723. accept_sparse=False,
  724. ensure_min_samples=2,
  725. ensure_2d=True,
  726. )
  727. if sample_weight is not None:
  728. sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype)
  729. _, n_features = X.shape
  730. if isinstance(self.knots, str):
  731. base_knots = self._get_base_knot_positions(
  732. X, n_knots=self.n_knots, knots=self.knots, sample_weight=sample_weight
  733. )
  734. else:
  735. base_knots = check_array(self.knots, dtype=np.float64)
  736. if base_knots.shape[0] < 2:
  737. raise ValueError("Number of knots, knots.shape[0], must be >= 2.")
  738. elif base_knots.shape[1] != n_features:
  739. raise ValueError("knots.shape[1] == n_features is violated.")
  740. elif not np.all(np.diff(base_knots, axis=0) > 0):
  741. raise ValueError("knots must be sorted without duplicates.")
  742. if self.sparse_output and sp_version < parse_version("1.8.0"):
  743. raise ValueError(
  744. "Option sparse_output=True is only available with scipy>=1.8.0, "
  745. f"but here scipy=={sp_version} is used."
  746. )
  747. # number of knots for base interval
  748. n_knots = base_knots.shape[0]
  749. if self.extrapolation == "periodic" and n_knots <= self.degree:
  750. raise ValueError(
  751. "Periodic splines require degree < n_knots. Got n_knots="
  752. f"{n_knots} and degree={self.degree}."
  753. )
  754. # number of splines basis functions
  755. if self.extrapolation != "periodic":
  756. n_splines = n_knots + self.degree - 1
  757. else:
  758. # periodic splines have self.degree less degrees of freedom
  759. n_splines = n_knots - 1
  760. degree = self.degree
  761. n_out = n_features * n_splines
  762. # We have to add degree number of knots below, and degree number knots
  763. # above the base knots in order to make the spline basis complete.
  764. if self.extrapolation == "periodic":
  765. # For periodic splines the spacing of the first / last degree knots
  766. # needs to be a continuation of the spacing of the last / first
  767. # base knots.
  768. period = base_knots[-1] - base_knots[0]
  769. knots = np.r_[
  770. base_knots[-(degree + 1) : -1] - period,
  771. base_knots,
  772. base_knots[1 : (degree + 1)] + period,
  773. ]
  774. else:
  775. # Eilers & Marx in "Flexible smoothing with B-splines and
  776. # penalties" https://doi.org/10.1214/ss/1038425655 advice
  777. # against repeating first and last knot several times, which
  778. # would have inferior behaviour at boundaries if combined with
  779. # a penalty (hence P-Spline). We follow this advice even if our
  780. # splines are unpenalized. Meaning we do not:
  781. # knots = np.r_[
  782. # np.tile(base_knots.min(axis=0), reps=[degree, 1]),
  783. # base_knots,
  784. # np.tile(base_knots.max(axis=0), reps=[degree, 1])
  785. # ]
  786. # Instead, we reuse the distance of the 2 fist/last knots.
  787. dist_min = base_knots[1] - base_knots[0]
  788. dist_max = base_knots[-1] - base_knots[-2]
  789. knots = np.r_[
  790. np.linspace(
  791. base_knots[0] - degree * dist_min,
  792. base_knots[0] - dist_min,
  793. num=degree,
  794. ),
  795. base_knots,
  796. np.linspace(
  797. base_knots[-1] + dist_max,
  798. base_knots[-1] + degree * dist_max,
  799. num=degree,
  800. ),
  801. ]
  802. # With a diagonal coefficient matrix, we get back the spline basis
  803. # elements, i.e. the design matrix of the spline.
  804. # Note, BSpline appreciates C-contiguous float64 arrays as c=coef.
  805. coef = np.eye(n_splines, dtype=np.float64)
  806. if self.extrapolation == "periodic":
  807. coef = np.concatenate((coef, coef[:degree, :]))
  808. extrapolate = self.extrapolation in ["periodic", "continue"]
  809. bsplines = [
  810. BSpline.construct_fast(
  811. knots[:, i], coef, self.degree, extrapolate=extrapolate
  812. )
  813. for i in range(n_features)
  814. ]
  815. self.bsplines_ = bsplines
  816. self.n_features_out_ = n_out - n_features * (1 - self.include_bias)
  817. return self
  818. def transform(self, X):
  819. """Transform each feature data to B-splines.
  820. Parameters
  821. ----------
  822. X : array-like of shape (n_samples, n_features)
  823. The data to transform.
  824. Returns
  825. -------
  826. XBS : {ndarray, sparse matrix} of shape (n_samples, n_features * n_splines)
  827. The matrix of features, where n_splines is the number of bases
  828. elements of the B-splines, n_knots + degree - 1.
  829. """
  830. check_is_fitted(self)
  831. X = self._validate_data(X, reset=False, accept_sparse=False, ensure_2d=True)
  832. n_samples, n_features = X.shape
  833. n_splines = self.bsplines_[0].c.shape[1]
  834. degree = self.degree
  835. # TODO: Remove this condition, once scipy 1.10 is the minimum version.
  836. # Only scipy => 1.10 supports design_matrix(.., extrapolate=..).
  837. # The default (implicit in scipy < 1.10) is extrapolate=False.
  838. scipy_1_10 = sp_version >= parse_version("1.10.0")
  839. # Note: self.bsplines_[0].extrapolate is True for extrapolation in
  840. # ["periodic", "continue"]
  841. if scipy_1_10:
  842. use_sparse = self.sparse_output
  843. kwargs_extrapolate = {"extrapolate": self.bsplines_[0].extrapolate}
  844. else:
  845. use_sparse = self.sparse_output and not self.bsplines_[0].extrapolate
  846. kwargs_extrapolate = dict()
  847. # Note that scipy BSpline returns float64 arrays and converts input
  848. # x=X[:, i] to c-contiguous float64.
  849. n_out = self.n_features_out_ + n_features * (1 - self.include_bias)
  850. if X.dtype in FLOAT_DTYPES:
  851. dtype = X.dtype
  852. else:
  853. dtype = np.float64
  854. if use_sparse:
  855. output_list = []
  856. else:
  857. XBS = np.zeros((n_samples, n_out), dtype=dtype, order=self.order)
  858. for i in range(n_features):
  859. spl = self.bsplines_[i]
  860. if self.extrapolation in ("continue", "error", "periodic"):
  861. if self.extrapolation == "periodic":
  862. # With periodic extrapolation we map x to the segment
  863. # [spl.t[k], spl.t[n]].
  864. # This is equivalent to BSpline(.., extrapolate="periodic")
  865. # for scipy>=1.0.0.
  866. n = spl.t.size - spl.k - 1
  867. # Assign to new array to avoid inplace operation
  868. x = spl.t[spl.k] + (X[:, i] - spl.t[spl.k]) % (
  869. spl.t[n] - spl.t[spl.k]
  870. )
  871. else:
  872. x = X[:, i]
  873. if use_sparse:
  874. XBS_sparse = BSpline.design_matrix(
  875. x, spl.t, spl.k, **kwargs_extrapolate
  876. )
  877. if self.extrapolation == "periodic":
  878. # See the construction of coef in fit. We need to add the last
  879. # degree spline basis function to the first degree ones and
  880. # then drop the last ones.
  881. # Note: See comment about SparseEfficiencyWarning below.
  882. XBS_sparse = XBS_sparse.tolil()
  883. XBS_sparse[:, :degree] += XBS_sparse[:, -degree:]
  884. XBS_sparse = XBS_sparse[:, :-degree]
  885. else:
  886. XBS[:, (i * n_splines) : ((i + 1) * n_splines)] = spl(x)
  887. else: # extrapolation in ("constant", "linear")
  888. xmin, xmax = spl.t[degree], spl.t[-degree - 1]
  889. # spline values at boundaries
  890. f_min, f_max = spl(xmin), spl(xmax)
  891. mask = (xmin <= X[:, i]) & (X[:, i] <= xmax)
  892. if use_sparse:
  893. mask_inv = ~mask
  894. x = X[:, i].copy()
  895. # Set some arbitrary values outside boundary that will be reassigned
  896. # later.
  897. x[mask_inv] = spl.t[self.degree]
  898. XBS_sparse = BSpline.design_matrix(x, spl.t, spl.k)
  899. # Note: Without converting to lil_matrix we would get:
  900. # scipy.sparse._base.SparseEfficiencyWarning: Changing the sparsity
  901. # structure of a csr_matrix is expensive. lil_matrix is more
  902. # efficient.
  903. if np.any(mask_inv):
  904. XBS_sparse = XBS_sparse.tolil()
  905. XBS_sparse[mask_inv, :] = 0
  906. else:
  907. XBS[mask, (i * n_splines) : ((i + 1) * n_splines)] = spl(X[mask, i])
  908. # Note for extrapolation:
  909. # 'continue' is already returned as is by scipy BSplines
  910. if self.extrapolation == "error":
  911. # BSpline with extrapolate=False does not raise an error, but
  912. # outputs np.nan.
  913. if (use_sparse and np.any(np.isnan(XBS_sparse.data))) or (
  914. not use_sparse
  915. and np.any(
  916. np.isnan(XBS[:, (i * n_splines) : ((i + 1) * n_splines)])
  917. )
  918. ):
  919. raise ValueError(
  920. "X contains values beyond the limits of the knots."
  921. )
  922. elif self.extrapolation == "constant":
  923. # Set all values beyond xmin and xmax to the value of the
  924. # spline basis functions at those two positions.
  925. # Only the first degree and last degree number of splines
  926. # have non-zero values at the boundaries.
  927. mask = X[:, i] < xmin
  928. if np.any(mask):
  929. if use_sparse:
  930. # Note: See comment about SparseEfficiencyWarning above.
  931. XBS_sparse = XBS_sparse.tolil()
  932. XBS_sparse[mask, :degree] = f_min[:degree]
  933. else:
  934. XBS[mask, (i * n_splines) : (i * n_splines + degree)] = f_min[
  935. :degree
  936. ]
  937. mask = X[:, i] > xmax
  938. if np.any(mask):
  939. if use_sparse:
  940. # Note: See comment about SparseEfficiencyWarning above.
  941. XBS_sparse = XBS_sparse.tolil()
  942. XBS_sparse[mask, -degree:] = f_max[-degree:]
  943. else:
  944. XBS[
  945. mask,
  946. ((i + 1) * n_splines - degree) : ((i + 1) * n_splines),
  947. ] = f_max[-degree:]
  948. elif self.extrapolation == "linear":
  949. # Continue the degree first and degree last spline bases
  950. # linearly beyond the boundaries, with slope = derivative at
  951. # the boundary.
  952. # Note that all others have derivative = value = 0 at the
  953. # boundaries.
  954. # spline derivatives = slopes at boundaries
  955. fp_min, fp_max = spl(xmin, nu=1), spl(xmax, nu=1)
  956. # Compute the linear continuation.
  957. if degree <= 1:
  958. # For degree=1, the derivative of 2nd spline is not zero at
  959. # boundary. For degree=0 it is the same as 'constant'.
  960. degree += 1
  961. for j in range(degree):
  962. mask = X[:, i] < xmin
  963. if np.any(mask):
  964. linear_extr = f_min[j] + (X[mask, i] - xmin) * fp_min[j]
  965. if use_sparse:
  966. # Note: See comment about SparseEfficiencyWarning above.
  967. XBS_sparse = XBS_sparse.tolil()
  968. XBS_sparse[mask, j] = linear_extr
  969. else:
  970. XBS[mask, i * n_splines + j] = linear_extr
  971. mask = X[:, i] > xmax
  972. if np.any(mask):
  973. k = n_splines - 1 - j
  974. linear_extr = f_max[k] + (X[mask, i] - xmax) * fp_max[k]
  975. if use_sparse:
  976. # Note: See comment about SparseEfficiencyWarning above.
  977. XBS_sparse = XBS_sparse.tolil()
  978. XBS_sparse[mask, k : k + 1] = linear_extr[:, None]
  979. else:
  980. XBS[mask, i * n_splines + k] = linear_extr
  981. if use_sparse:
  982. XBS_sparse = XBS_sparse.tocsr()
  983. output_list.append(XBS_sparse)
  984. if use_sparse:
  985. # TODO: Remove this conditional error when the minimum supported version of
  986. # SciPy is 1.9.2
  987. # `scipy.sparse.hstack` breaks in scipy<1.9.2
  988. # when `n_features_out_ > max_int32`
  989. max_int32 = np.iinfo(np.int32).max
  990. all_int32 = True
  991. for mat in output_list:
  992. all_int32 &= mat.indices.dtype == np.int32
  993. if (
  994. sp_version < parse_version("1.9.2")
  995. and self.n_features_out_ > max_int32
  996. and all_int32
  997. ):
  998. raise ValueError(
  999. "In scipy versions `<1.9.2`, the function `scipy.sparse.hstack`"
  1000. " produces negative columns when:\n1. The output shape contains"
  1001. " `n_cols` too large to be represented by a 32bit signed"
  1002. " integer.\n. All sub-matrices to be stacked have indices of"
  1003. " dtype `np.int32`.\nTo avoid this error, either use a version"
  1004. " of scipy `>=1.9.2` or alter the `SplineTransformer`"
  1005. " transformer to produce fewer than 2^31 output features"
  1006. )
  1007. XBS = sparse.hstack(output_list, format="csr")
  1008. elif self.sparse_output:
  1009. # TODO: Remove ones scipy 1.10 is the minimum version. See comments above.
  1010. XBS = sparse.csr_matrix(XBS)
  1011. if self.include_bias:
  1012. return XBS
  1013. else:
  1014. # We throw away one spline basis per feature.
  1015. # We chose the last one.
  1016. indices = [j for j in range(XBS.shape[1]) if (j + 1) % n_splines != 0]
  1017. return XBS[:, indices]
  1018. def _more_tags(self):
  1019. return {
  1020. "_xfail_checks": {
  1021. "check_estimators_pickle": (
  1022. "Current Scipy implementation of _bsplines does not"
  1023. "support const memory views."
  1024. ),
  1025. }
  1026. }