_column_transformer.py 45 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168
  1. """
  2. The :mod:`sklearn.compose._column_transformer` module implements utilities
  3. to work with heterogeneous data and to apply different transformers to
  4. different columns.
  5. """
  6. # Author: Andreas Mueller
  7. # Joris Van den Bossche
  8. # License: BSD
  9. from collections import Counter
  10. from itertools import chain
  11. from numbers import Integral, Real
  12. import numpy as np
  13. from scipy import sparse
  14. from ..base import TransformerMixin, _fit_context, clone
  15. from ..pipeline import _fit_transform_one, _name_estimators, _transform_one
  16. from ..preprocessing import FunctionTransformer
  17. from ..utils import Bunch, _get_column_indices, _safe_indexing, check_pandas_support
  18. from ..utils._estimator_html_repr import _VisualBlock
  19. from ..utils._param_validation import HasMethods, Hidden, Interval, StrOptions
  20. from ..utils._set_output import _get_output_config, _safe_set_output
  21. from ..utils.metaestimators import _BaseComposition
  22. from ..utils.parallel import Parallel, delayed
  23. from ..utils.validation import (
  24. _check_feature_names_in,
  25. _num_samples,
  26. check_array,
  27. check_is_fitted,
  28. )
  29. __all__ = ["ColumnTransformer", "make_column_transformer", "make_column_selector"]
  30. _ERR_MSG_1DCOLUMN = (
  31. "1D data passed to a transformer that expects 2D data. "
  32. "Try to specify the column selection as a list of one "
  33. "item instead of a scalar."
  34. )
  35. class ColumnTransformer(TransformerMixin, _BaseComposition):
  36. """Applies transformers to columns of an array or pandas DataFrame.
  37. This estimator allows different columns or column subsets of the input
  38. to be transformed separately and the features generated by each transformer
  39. will be concatenated to form a single feature space.
  40. This is useful for heterogeneous or columnar data, to combine several
  41. feature extraction mechanisms or transformations into a single transformer.
  42. Read more in the :ref:`User Guide <column_transformer>`.
  43. .. versionadded:: 0.20
  44. Parameters
  45. ----------
  46. transformers : list of tuples
  47. List of (name, transformer, columns) tuples specifying the
  48. transformer objects to be applied to subsets of the data.
  49. name : str
  50. Like in Pipeline and FeatureUnion, this allows the transformer and
  51. its parameters to be set using ``set_params`` and searched in grid
  52. search.
  53. transformer : {'drop', 'passthrough'} or estimator
  54. Estimator must support :term:`fit` and :term:`transform`.
  55. Special-cased strings 'drop' and 'passthrough' are accepted as
  56. well, to indicate to drop the columns or to pass them through
  57. untransformed, respectively.
  58. columns : str, array-like of str, int, array-like of int, \
  59. array-like of bool, slice or callable
  60. Indexes the data on its second axis. Integers are interpreted as
  61. positional columns, while strings can reference DataFrame columns
  62. by name. A scalar string or int should be used where
  63. ``transformer`` expects X to be a 1d array-like (vector),
  64. otherwise a 2d array will be passed to the transformer.
  65. A callable is passed the input data `X` and can return any of the
  66. above. To select multiple columns by name or dtype, you can use
  67. :obj:`make_column_selector`.
  68. remainder : {'drop', 'passthrough'} or estimator, default='drop'
  69. By default, only the specified columns in `transformers` are
  70. transformed and combined in the output, and the non-specified
  71. columns are dropped. (default of ``'drop'``).
  72. By specifying ``remainder='passthrough'``, all remaining columns that
  73. were not specified in `transformers`, but present in the data passed
  74. to `fit` will be automatically passed through. This subset of columns
  75. is concatenated with the output of the transformers. For dataframes,
  76. extra columns not seen during `fit` will be excluded from the output
  77. of `transform`.
  78. By setting ``remainder`` to be an estimator, the remaining
  79. non-specified columns will use the ``remainder`` estimator. The
  80. estimator must support :term:`fit` and :term:`transform`.
  81. Note that using this feature requires that the DataFrame columns
  82. input at :term:`fit` and :term:`transform` have identical order.
  83. sparse_threshold : float, default=0.3
  84. If the output of the different transformers contains sparse matrices,
  85. these will be stacked as a sparse matrix if the overall density is
  86. lower than this value. Use ``sparse_threshold=0`` to always return
  87. dense. When the transformed output consists of all dense data, the
  88. stacked result will be dense, and this keyword will be ignored.
  89. n_jobs : int, default=None
  90. Number of jobs to run in parallel.
  91. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
  92. ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
  93. for more details.
  94. transformer_weights : dict, default=None
  95. Multiplicative weights for features per transformer. The output of the
  96. transformer is multiplied by these weights. Keys are transformer names,
  97. values the weights.
  98. verbose : bool, default=False
  99. If True, the time elapsed while fitting each transformer will be
  100. printed as it is completed.
  101. verbose_feature_names_out : bool, default=True
  102. If True, :meth:`ColumnTransformer.get_feature_names_out` will prefix
  103. all feature names with the name of the transformer that generated that
  104. feature.
  105. If False, :meth:`ColumnTransformer.get_feature_names_out` will not
  106. prefix any feature names and will error if feature names are not
  107. unique.
  108. .. versionadded:: 1.0
  109. Attributes
  110. ----------
  111. transformers_ : list
  112. The collection of fitted transformers as tuples of
  113. (name, fitted_transformer, column). `fitted_transformer` can be an
  114. estimator, 'drop', or 'passthrough'. In case there were no columns
  115. selected, this will be the unfitted transformer.
  116. If there are remaining columns, the final element is a tuple of the
  117. form:
  118. ('remainder', transformer, remaining_columns) corresponding to the
  119. ``remainder`` parameter. If there are remaining columns, then
  120. ``len(transformers_)==len(transformers)+1``, otherwise
  121. ``len(transformers_)==len(transformers)``.
  122. named_transformers_ : :class:`~sklearn.utils.Bunch`
  123. Read-only attribute to access any transformer by given name.
  124. Keys are transformer names and values are the fitted transformer
  125. objects.
  126. sparse_output_ : bool
  127. Boolean flag indicating whether the output of ``transform`` is a
  128. sparse matrix or a dense numpy array, which depends on the output
  129. of the individual transformers and the `sparse_threshold` keyword.
  130. output_indices_ : dict
  131. A dictionary from each transformer name to a slice, where the slice
  132. corresponds to indices in the transformed output. This is useful to
  133. inspect which transformer is responsible for which transformed
  134. feature(s).
  135. .. versionadded:: 1.0
  136. n_features_in_ : int
  137. Number of features seen during :term:`fit`. Only defined if the
  138. underlying transformers expose such an attribute when fit.
  139. .. versionadded:: 0.24
  140. feature_names_in_ : ndarray of shape (`n_features_in_`,)
  141. Names of features seen during :term:`fit`. Defined only when `X`
  142. has feature names that are all strings.
  143. .. versionadded:: 1.0
  144. See Also
  145. --------
  146. make_column_transformer : Convenience function for
  147. combining the outputs of multiple transformer objects applied to
  148. column subsets of the original feature space.
  149. make_column_selector : Convenience function for selecting
  150. columns based on datatype or the columns name with a regex pattern.
  151. Notes
  152. -----
  153. The order of the columns in the transformed feature matrix follows the
  154. order of how the columns are specified in the `transformers` list.
  155. Columns of the original feature matrix that are not specified are
  156. dropped from the resulting transformed feature matrix, unless specified
  157. in the `passthrough` keyword. Those columns specified with `passthrough`
  158. are added at the right to the output of the transformers.
  159. Examples
  160. --------
  161. >>> import numpy as np
  162. >>> from sklearn.compose import ColumnTransformer
  163. >>> from sklearn.preprocessing import Normalizer
  164. >>> ct = ColumnTransformer(
  165. ... [("norm1", Normalizer(norm='l1'), [0, 1]),
  166. ... ("norm2", Normalizer(norm='l1'), slice(2, 4))])
  167. >>> X = np.array([[0., 1., 2., 2.],
  168. ... [1., 1., 0., 1.]])
  169. >>> # Normalizer scales each row of X to unit norm. A separate scaling
  170. >>> # is applied for the two first and two last elements of each
  171. >>> # row independently.
  172. >>> ct.fit_transform(X)
  173. array([[0. , 1. , 0.5, 0.5],
  174. [0.5, 0.5, 0. , 1. ]])
  175. :class:`ColumnTransformer` can be configured with a transformer that requires
  176. a 1d array by setting the column to a string:
  177. >>> from sklearn.feature_extraction import FeatureHasher
  178. >>> from sklearn.preprocessing import MinMaxScaler
  179. >>> import pandas as pd # doctest: +SKIP
  180. >>> X = pd.DataFrame({
  181. ... "documents": ["First item", "second one here", "Is this the last?"],
  182. ... "width": [3, 4, 5],
  183. ... }) # doctest: +SKIP
  184. >>> # "documents" is a string which configures ColumnTransformer to
  185. >>> # pass the documents column as a 1d array to the FeatureHasher
  186. >>> ct = ColumnTransformer(
  187. ... [("text_preprocess", FeatureHasher(input_type="string"), "documents"),
  188. ... ("num_preprocess", MinMaxScaler(), ["width"])])
  189. >>> X_trans = ct.fit_transform(X) # doctest: +SKIP
  190. For a more detailed example of usage, see
  191. :ref:`sphx_glr_auto_examples_compose_plot_column_transformer_mixed_types.py`.
  192. """
  193. _required_parameters = ["transformers"]
  194. _parameter_constraints: dict = {
  195. "transformers": [list, Hidden(tuple)],
  196. "remainder": [
  197. StrOptions({"drop", "passthrough"}),
  198. HasMethods(["fit", "transform"]),
  199. HasMethods(["fit_transform", "transform"]),
  200. ],
  201. "sparse_threshold": [Interval(Real, 0, 1, closed="both")],
  202. "n_jobs": [Integral, None],
  203. "transformer_weights": [dict, None],
  204. "verbose": ["verbose"],
  205. "verbose_feature_names_out": ["boolean"],
  206. }
  207. def __init__(
  208. self,
  209. transformers,
  210. *,
  211. remainder="drop",
  212. sparse_threshold=0.3,
  213. n_jobs=None,
  214. transformer_weights=None,
  215. verbose=False,
  216. verbose_feature_names_out=True,
  217. ):
  218. self.transformers = transformers
  219. self.remainder = remainder
  220. self.sparse_threshold = sparse_threshold
  221. self.n_jobs = n_jobs
  222. self.transformer_weights = transformer_weights
  223. self.verbose = verbose
  224. self.verbose_feature_names_out = verbose_feature_names_out
  225. @property
  226. def _transformers(self):
  227. """
  228. Internal list of transformer only containing the name and
  229. transformers, dropping the columns. This is for the implementation
  230. of get_params via BaseComposition._get_params which expects lists
  231. of tuples of len 2.
  232. """
  233. try:
  234. return [(name, trans) for name, trans, _ in self.transformers]
  235. except (TypeError, ValueError):
  236. return self.transformers
  237. @_transformers.setter
  238. def _transformers(self, value):
  239. try:
  240. self.transformers = [
  241. (name, trans, col)
  242. for ((name, trans), (_, _, col)) in zip(value, self.transformers)
  243. ]
  244. except (TypeError, ValueError):
  245. self.transformers = value
  246. def set_output(self, *, transform=None):
  247. """Set the output container when `"transform"` and `"fit_transform"` are called.
  248. Calling `set_output` will set the output of all estimators in `transformers`
  249. and `transformers_`.
  250. Parameters
  251. ----------
  252. transform : {"default", "pandas"}, default=None
  253. Configure output of `transform` and `fit_transform`.
  254. - `"default"`: Default output format of a transformer
  255. - `"pandas"`: DataFrame output
  256. - `None`: Transform configuration is unchanged
  257. Returns
  258. -------
  259. self : estimator instance
  260. Estimator instance.
  261. """
  262. super().set_output(transform=transform)
  263. transformers = (
  264. trans
  265. for _, trans, _ in chain(
  266. self.transformers, getattr(self, "transformers_", [])
  267. )
  268. if trans not in {"passthrough", "drop"}
  269. )
  270. for trans in transformers:
  271. _safe_set_output(trans, transform=transform)
  272. if self.remainder not in {"passthrough", "drop"}:
  273. _safe_set_output(self.remainder, transform=transform)
  274. return self
  275. def get_params(self, deep=True):
  276. """Get parameters for this estimator.
  277. Returns the parameters given in the constructor as well as the
  278. estimators contained within the `transformers` of the
  279. `ColumnTransformer`.
  280. Parameters
  281. ----------
  282. deep : bool, default=True
  283. If True, will return the parameters for this estimator and
  284. contained subobjects that are estimators.
  285. Returns
  286. -------
  287. params : dict
  288. Parameter names mapped to their values.
  289. """
  290. return self._get_params("_transformers", deep=deep)
  291. def set_params(self, **kwargs):
  292. """Set the parameters of this estimator.
  293. Valid parameter keys can be listed with ``get_params()``. Note that you
  294. can directly set the parameters of the estimators contained in
  295. `transformers` of `ColumnTransformer`.
  296. Parameters
  297. ----------
  298. **kwargs : dict
  299. Estimator parameters.
  300. Returns
  301. -------
  302. self : ColumnTransformer
  303. This estimator.
  304. """
  305. self._set_params("_transformers", **kwargs)
  306. return self
  307. def _iter(self, fitted=False, replace_strings=False, column_as_strings=False):
  308. """
  309. Generate (name, trans, column, weight) tuples.
  310. If fitted=True, use the fitted transformers, else use the
  311. user specified transformers updated with converted column names
  312. and potentially appended with transformer for remainder.
  313. """
  314. if fitted:
  315. if replace_strings:
  316. # Replace "passthrough" with the fitted version in
  317. # _name_to_fitted_passthrough
  318. def replace_passthrough(name, trans, columns):
  319. if name not in self._name_to_fitted_passthrough:
  320. return name, trans, columns
  321. return name, self._name_to_fitted_passthrough[name], columns
  322. transformers = [
  323. replace_passthrough(*trans) for trans in self.transformers_
  324. ]
  325. else:
  326. transformers = self.transformers_
  327. else:
  328. # interleave the validated column specifiers
  329. transformers = [
  330. (name, trans, column)
  331. for (name, trans, _), column in zip(self.transformers, self._columns)
  332. ]
  333. # add transformer tuple for remainder
  334. if self._remainder[2]:
  335. transformers = chain(transformers, [self._remainder])
  336. get_weight = (self.transformer_weights or {}).get
  337. output_config = _get_output_config("transform", self)
  338. for name, trans, columns in transformers:
  339. if replace_strings:
  340. # replace 'passthrough' with identity transformer and
  341. # skip in case of 'drop'
  342. if trans == "passthrough":
  343. trans = FunctionTransformer(
  344. accept_sparse=True,
  345. check_inverse=False,
  346. feature_names_out="one-to-one",
  347. ).set_output(transform=output_config["dense"])
  348. elif trans == "drop":
  349. continue
  350. elif _is_empty_column_selection(columns):
  351. continue
  352. if column_as_strings:
  353. # Convert all columns to using their string labels
  354. columns_is_scalar = np.isscalar(columns)
  355. indices = self._transformer_to_input_indices[name]
  356. columns = self.feature_names_in_[indices]
  357. if columns_is_scalar:
  358. # selection is done with one dimension
  359. columns = columns[0]
  360. yield (name, trans, columns, get_weight(name))
  361. def _validate_transformers(self):
  362. if not self.transformers:
  363. return
  364. names, transformers, _ = zip(*self.transformers)
  365. # validate names
  366. self._validate_names(names)
  367. # validate estimators
  368. for t in transformers:
  369. if t in ("drop", "passthrough"):
  370. continue
  371. if not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not hasattr(
  372. t, "transform"
  373. ):
  374. # Used to validate the transformers in the `transformers` list
  375. raise TypeError(
  376. "All estimators should implement fit and "
  377. "transform, or can be 'drop' or 'passthrough' "
  378. "specifiers. '%s' (type %s) doesn't." % (t, type(t))
  379. )
  380. def _validate_column_callables(self, X):
  381. """
  382. Converts callable column specifications.
  383. """
  384. all_columns = []
  385. transformer_to_input_indices = {}
  386. for name, _, columns in self.transformers:
  387. if callable(columns):
  388. columns = columns(X)
  389. all_columns.append(columns)
  390. transformer_to_input_indices[name] = _get_column_indices(X, columns)
  391. self._columns = all_columns
  392. self._transformer_to_input_indices = transformer_to_input_indices
  393. def _validate_remainder(self, X):
  394. """
  395. Validates ``remainder`` and defines ``_remainder`` targeting
  396. the remaining columns.
  397. """
  398. self._n_features = X.shape[1]
  399. cols = set(chain(*self._transformer_to_input_indices.values()))
  400. remaining = sorted(set(range(self._n_features)) - cols)
  401. self._remainder = ("remainder", self.remainder, remaining)
  402. self._transformer_to_input_indices["remainder"] = remaining
  403. @property
  404. def named_transformers_(self):
  405. """Access the fitted transformer by name.
  406. Read-only attribute to access any transformer by given name.
  407. Keys are transformer names and values are the fitted transformer
  408. objects.
  409. """
  410. # Use Bunch object to improve autocomplete
  411. return Bunch(**{name: trans for name, trans, _ in self.transformers_})
  412. def _get_feature_name_out_for_transformer(
  413. self, name, trans, column, feature_names_in
  414. ):
  415. """Gets feature names of transformer.
  416. Used in conjunction with self._iter(fitted=True) in get_feature_names_out.
  417. """
  418. column_indices = self._transformer_to_input_indices[name]
  419. names = feature_names_in[column_indices]
  420. if trans == "drop" or _is_empty_column_selection(column):
  421. return
  422. elif trans == "passthrough":
  423. return names
  424. # An actual transformer
  425. if not hasattr(trans, "get_feature_names_out"):
  426. raise AttributeError(
  427. f"Transformer {name} (type {type(trans).__name__}) does "
  428. "not provide get_feature_names_out."
  429. )
  430. return trans.get_feature_names_out(names)
  431. def get_feature_names_out(self, input_features=None):
  432. """Get output feature names for transformation.
  433. Parameters
  434. ----------
  435. input_features : array-like of str or None, default=None
  436. Input features.
  437. - If `input_features` is `None`, then `feature_names_in_` is
  438. used as feature names in. If `feature_names_in_` is not defined,
  439. then the following input feature names are generated:
  440. `["x0", "x1", ..., "x(n_features_in_ - 1)"]`.
  441. - If `input_features` is an array-like, then `input_features` must
  442. match `feature_names_in_` if `feature_names_in_` is defined.
  443. Returns
  444. -------
  445. feature_names_out : ndarray of str objects
  446. Transformed feature names.
  447. """
  448. check_is_fitted(self)
  449. input_features = _check_feature_names_in(self, input_features)
  450. # List of tuples (name, feature_names_out)
  451. transformer_with_feature_names_out = []
  452. for name, trans, column, _ in self._iter(fitted=True):
  453. feature_names_out = self._get_feature_name_out_for_transformer(
  454. name, trans, column, input_features
  455. )
  456. if feature_names_out is None:
  457. continue
  458. transformer_with_feature_names_out.append((name, feature_names_out))
  459. if not transformer_with_feature_names_out:
  460. # No feature names
  461. return np.array([], dtype=object)
  462. return self._add_prefix_for_feature_names_out(
  463. transformer_with_feature_names_out
  464. )
  465. def _add_prefix_for_feature_names_out(self, transformer_with_feature_names_out):
  466. """Add prefix for feature names out that includes the transformer names.
  467. Parameters
  468. ----------
  469. transformer_with_feature_names_out : list of tuples of (str, array-like of str)
  470. The tuple consistent of the transformer's name and its feature names out.
  471. Returns
  472. -------
  473. feature_names_out : ndarray of shape (n_features,), dtype=str
  474. Transformed feature names.
  475. """
  476. if self.verbose_feature_names_out:
  477. # Prefix the feature names out with the transformers name
  478. names = list(
  479. chain.from_iterable(
  480. (f"{name}__{i}" for i in feature_names_out)
  481. for name, feature_names_out in transformer_with_feature_names_out
  482. )
  483. )
  484. return np.asarray(names, dtype=object)
  485. # verbose_feature_names_out is False
  486. # Check that names are all unique without a prefix
  487. feature_names_count = Counter(
  488. chain.from_iterable(s for _, s in transformer_with_feature_names_out)
  489. )
  490. top_6_overlap = [
  491. name for name, count in feature_names_count.most_common(6) if count > 1
  492. ]
  493. top_6_overlap.sort()
  494. if top_6_overlap:
  495. if len(top_6_overlap) == 6:
  496. # There are more than 5 overlapping names, we only show the 5
  497. # of the feature names
  498. names_repr = str(top_6_overlap[:5])[:-1] + ", ...]"
  499. else:
  500. names_repr = str(top_6_overlap)
  501. raise ValueError(
  502. f"Output feature names: {names_repr} are not unique. Please set "
  503. "verbose_feature_names_out=True to add prefixes to feature names"
  504. )
  505. return np.concatenate(
  506. [name for _, name in transformer_with_feature_names_out],
  507. )
  508. def _update_fitted_transformers(self, transformers):
  509. # transformers are fitted; excludes 'drop' cases
  510. fitted_transformers = iter(transformers)
  511. transformers_ = []
  512. self._name_to_fitted_passthrough = {}
  513. for name, old, column, _ in self._iter():
  514. if old == "drop":
  515. trans = "drop"
  516. elif old == "passthrough":
  517. # FunctionTransformer is present in list of transformers,
  518. # so get next transformer, but save original string
  519. func_transformer = next(fitted_transformers)
  520. trans = "passthrough"
  521. # The fitted FunctionTransformer is saved in another attribute,
  522. # so it can be used during transform for set_output.
  523. self._name_to_fitted_passthrough[name] = func_transformer
  524. elif _is_empty_column_selection(column):
  525. trans = old
  526. else:
  527. trans = next(fitted_transformers)
  528. transformers_.append((name, trans, column))
  529. # sanity check that transformers is exhausted
  530. assert not list(fitted_transformers)
  531. self.transformers_ = transformers_
  532. def _validate_output(self, result):
  533. """
  534. Ensure that the output of each transformer is 2D. Otherwise
  535. hstack can raise an error or produce incorrect results.
  536. """
  537. names = [
  538. name for name, _, _, _ in self._iter(fitted=True, replace_strings=True)
  539. ]
  540. for Xs, name in zip(result, names):
  541. if not getattr(Xs, "ndim", 0) == 2:
  542. raise ValueError(
  543. "The output of the '{0}' transformer should be 2D (scipy "
  544. "matrix, array, or pandas DataFrame).".format(name)
  545. )
  546. def _record_output_indices(self, Xs):
  547. """
  548. Record which transformer produced which column.
  549. """
  550. idx = 0
  551. self.output_indices_ = {}
  552. for transformer_idx, (name, _, _, _) in enumerate(
  553. self._iter(fitted=True, replace_strings=True)
  554. ):
  555. n_columns = Xs[transformer_idx].shape[1]
  556. self.output_indices_[name] = slice(idx, idx + n_columns)
  557. idx += n_columns
  558. # `_iter` only generates transformers that have a non empty
  559. # selection. Here we set empty slices for transformers that
  560. # generate no output, which are safe for indexing
  561. all_names = [t[0] for t in self.transformers] + ["remainder"]
  562. for name in all_names:
  563. if name not in self.output_indices_:
  564. self.output_indices_[name] = slice(0, 0)
  565. def _log_message(self, name, idx, total):
  566. if not self.verbose:
  567. return None
  568. return "(%d of %d) Processing %s" % (idx, total, name)
  569. def _fit_transform(self, X, y, func, fitted=False, column_as_strings=False):
  570. """
  571. Private function to fit and/or transform on demand.
  572. Return value (transformers and/or transformed X data) depends
  573. on the passed function.
  574. ``fitted=True`` ensures the fitted transformers are used.
  575. """
  576. transformers = list(
  577. self._iter(
  578. fitted=fitted, replace_strings=True, column_as_strings=column_as_strings
  579. )
  580. )
  581. try:
  582. return Parallel(n_jobs=self.n_jobs)(
  583. delayed(func)(
  584. transformer=clone(trans) if not fitted else trans,
  585. X=_safe_indexing(X, column, axis=1),
  586. y=y,
  587. weight=weight,
  588. message_clsname="ColumnTransformer",
  589. message=self._log_message(name, idx, len(transformers)),
  590. )
  591. for idx, (name, trans, column, weight) in enumerate(transformers, 1)
  592. )
  593. except ValueError as e:
  594. if "Expected 2D array, got 1D array instead" in str(e):
  595. raise ValueError(_ERR_MSG_1DCOLUMN) from e
  596. else:
  597. raise
  598. def fit(self, X, y=None):
  599. """Fit all transformers using X.
  600. Parameters
  601. ----------
  602. X : {array-like, dataframe} of shape (n_samples, n_features)
  603. Input data, of which specified subsets are used to fit the
  604. transformers.
  605. y : array-like of shape (n_samples,...), default=None
  606. Targets for supervised learning.
  607. Returns
  608. -------
  609. self : ColumnTransformer
  610. This estimator.
  611. """
  612. # we use fit_transform to make sure to set sparse_output_ (for which we
  613. # need the transformed data) to have consistent output type in predict
  614. self.fit_transform(X, y=y)
  615. return self
  616. @_fit_context(
  617. # estimators in ColumnTransformer.transformers are not validated yet
  618. prefer_skip_nested_validation=False
  619. )
  620. def fit_transform(self, X, y=None):
  621. """Fit all transformers, transform the data and concatenate results.
  622. Parameters
  623. ----------
  624. X : {array-like, dataframe} of shape (n_samples, n_features)
  625. Input data, of which specified subsets are used to fit the
  626. transformers.
  627. y : array-like of shape (n_samples,), default=None
  628. Targets for supervised learning.
  629. Returns
  630. -------
  631. X_t : {array-like, sparse matrix} of \
  632. shape (n_samples, sum_n_components)
  633. Horizontally stacked results of transformers. sum_n_components is the
  634. sum of n_components (output dimension) over transformers. If
  635. any result is a sparse matrix, everything will be converted to
  636. sparse matrices.
  637. """
  638. self._check_feature_names(X, reset=True)
  639. X = _check_X(X)
  640. # set n_features_in_ attribute
  641. self._check_n_features(X, reset=True)
  642. self._validate_transformers()
  643. self._validate_column_callables(X)
  644. self._validate_remainder(X)
  645. result = self._fit_transform(X, y, _fit_transform_one)
  646. if not result:
  647. self._update_fitted_transformers([])
  648. # All transformers are None
  649. return np.zeros((X.shape[0], 0))
  650. Xs, transformers = zip(*result)
  651. # determine if concatenated output will be sparse or not
  652. if any(sparse.issparse(X) for X in Xs):
  653. nnz = sum(X.nnz if sparse.issparse(X) else X.size for X in Xs)
  654. total = sum(
  655. X.shape[0] * X.shape[1] if sparse.issparse(X) else X.size for X in Xs
  656. )
  657. density = nnz / total
  658. self.sparse_output_ = density < self.sparse_threshold
  659. else:
  660. self.sparse_output_ = False
  661. self._update_fitted_transformers(transformers)
  662. self._validate_output(Xs)
  663. self._record_output_indices(Xs)
  664. return self._hstack(list(Xs))
  665. def transform(self, X):
  666. """Transform X separately by each transformer, concatenate results.
  667. Parameters
  668. ----------
  669. X : {array-like, dataframe} of shape (n_samples, n_features)
  670. The data to be transformed by subset.
  671. Returns
  672. -------
  673. X_t : {array-like, sparse matrix} of \
  674. shape (n_samples, sum_n_components)
  675. Horizontally stacked results of transformers. sum_n_components is the
  676. sum of n_components (output dimension) over transformers. If
  677. any result is a sparse matrix, everything will be converted to
  678. sparse matrices.
  679. """
  680. check_is_fitted(self)
  681. X = _check_X(X)
  682. fit_dataframe_and_transform_dataframe = hasattr(
  683. self, "feature_names_in_"
  684. ) and hasattr(X, "columns")
  685. if fit_dataframe_and_transform_dataframe:
  686. named_transformers = self.named_transformers_
  687. # check that all names seen in fit are in transform, unless
  688. # they were dropped
  689. non_dropped_indices = [
  690. ind
  691. for name, ind in self._transformer_to_input_indices.items()
  692. if name in named_transformers
  693. and isinstance(named_transformers[name], str)
  694. and named_transformers[name] != "drop"
  695. ]
  696. all_indices = set(chain(*non_dropped_indices))
  697. all_names = set(self.feature_names_in_[ind] for ind in all_indices)
  698. diff = all_names - set(X.columns)
  699. if diff:
  700. raise ValueError(f"columns are missing: {diff}")
  701. else:
  702. # ndarray was used for fitting or transforming, thus we only
  703. # check that n_features_in_ is consistent
  704. self._check_n_features(X, reset=False)
  705. Xs = self._fit_transform(
  706. X,
  707. None,
  708. _transform_one,
  709. fitted=True,
  710. column_as_strings=fit_dataframe_and_transform_dataframe,
  711. )
  712. self._validate_output(Xs)
  713. if not Xs:
  714. # All transformers are None
  715. return np.zeros((X.shape[0], 0))
  716. return self._hstack(list(Xs))
  717. def _hstack(self, Xs):
  718. """Stacks Xs horizontally.
  719. This allows subclasses to control the stacking behavior, while reusing
  720. everything else from ColumnTransformer.
  721. Parameters
  722. ----------
  723. Xs : list of {array-like, sparse matrix, dataframe}
  724. """
  725. if self.sparse_output_:
  726. try:
  727. # since all columns should be numeric before stacking them
  728. # in a sparse matrix, `check_array` is used for the
  729. # dtype conversion if necessary.
  730. converted_Xs = [
  731. check_array(X, accept_sparse=True, force_all_finite=False)
  732. for X in Xs
  733. ]
  734. except ValueError as e:
  735. raise ValueError(
  736. "For a sparse output, all columns should "
  737. "be a numeric or convertible to a numeric."
  738. ) from e
  739. return sparse.hstack(converted_Xs).tocsr()
  740. else:
  741. Xs = [f.toarray() if sparse.issparse(f) else f for f in Xs]
  742. config = _get_output_config("transform", self)
  743. if config["dense"] == "pandas" and all(hasattr(X, "iloc") for X in Xs):
  744. pd = check_pandas_support("transform")
  745. output = pd.concat(Xs, axis=1)
  746. output_samples = output.shape[0]
  747. if any(_num_samples(X) != output_samples for X in Xs):
  748. raise ValueError(
  749. "Concatenating DataFrames from the transformer's output lead to"
  750. " an inconsistent number of samples. The output may have Pandas"
  751. " Indexes that do not match."
  752. )
  753. # If all transformers define `get_feature_names_out`, then transform
  754. # will adjust the column names to be consistent with
  755. # verbose_feature_names_out. Here we prefix the feature names if
  756. # verbose_feature_names_out=True.
  757. if not self.verbose_feature_names_out:
  758. return output
  759. transformer_names = [
  760. t[0] for t in self._iter(fitted=True, replace_strings=True)
  761. ]
  762. # Selection of columns might be empty.
  763. # Hence feature names are filtered for non-emptiness.
  764. feature_names_outs = [X.columns for X in Xs if X.shape[1] != 0]
  765. names_out = self._add_prefix_for_feature_names_out(
  766. list(zip(transformer_names, feature_names_outs))
  767. )
  768. output.columns = names_out
  769. return output
  770. return np.hstack(Xs)
  771. def _sk_visual_block_(self):
  772. if isinstance(self.remainder, str) and self.remainder == "drop":
  773. transformers = self.transformers
  774. elif hasattr(self, "_remainder"):
  775. remainder_columns = self._remainder[2]
  776. if (
  777. hasattr(self, "feature_names_in_")
  778. and remainder_columns
  779. and not all(isinstance(col, str) for col in remainder_columns)
  780. ):
  781. remainder_columns = self.feature_names_in_[remainder_columns].tolist()
  782. transformers = chain(
  783. self.transformers, [("remainder", self.remainder, remainder_columns)]
  784. )
  785. else:
  786. transformers = chain(self.transformers, [("remainder", self.remainder, "")])
  787. names, transformers, name_details = zip(*transformers)
  788. return _VisualBlock(
  789. "parallel", transformers, names=names, name_details=name_details
  790. )
  791. def _check_X(X):
  792. """Use check_array only on lists and other non-array-likes / sparse"""
  793. if hasattr(X, "__array__") or sparse.issparse(X):
  794. return X
  795. return check_array(X, force_all_finite="allow-nan", dtype=object)
  796. def _is_empty_column_selection(column):
  797. """
  798. Return True if the column selection is empty (empty list or all-False
  799. boolean array).
  800. """
  801. if hasattr(column, "dtype") and np.issubdtype(column.dtype, np.bool_):
  802. return not column.any()
  803. elif hasattr(column, "__len__"):
  804. return (
  805. len(column) == 0
  806. or all(isinstance(col, bool) for col in column)
  807. and not any(column)
  808. )
  809. else:
  810. return False
  811. def _get_transformer_list(estimators):
  812. """
  813. Construct (name, trans, column) tuples from list
  814. """
  815. transformers, columns = zip(*estimators)
  816. names, _ = zip(*_name_estimators(transformers))
  817. transformer_list = list(zip(names, transformers, columns))
  818. return transformer_list
  819. # This function is not validated using validate_params because
  820. # it's just a factory for ColumnTransformer.
  821. def make_column_transformer(
  822. *transformers,
  823. remainder="drop",
  824. sparse_threshold=0.3,
  825. n_jobs=None,
  826. verbose=False,
  827. verbose_feature_names_out=True,
  828. ):
  829. """Construct a ColumnTransformer from the given transformers.
  830. This is a shorthand for the ColumnTransformer constructor; it does not
  831. require, and does not permit, naming the transformers. Instead, they will
  832. be given names automatically based on their types. It also does not allow
  833. weighting with ``transformer_weights``.
  834. Read more in the :ref:`User Guide <make_column_transformer>`.
  835. Parameters
  836. ----------
  837. *transformers : tuples
  838. Tuples of the form (transformer, columns) specifying the
  839. transformer objects to be applied to subsets of the data.
  840. transformer : {'drop', 'passthrough'} or estimator
  841. Estimator must support :term:`fit` and :term:`transform`.
  842. Special-cased strings 'drop' and 'passthrough' are accepted as
  843. well, to indicate to drop the columns or to pass them through
  844. untransformed, respectively.
  845. columns : str, array-like of str, int, array-like of int, slice, \
  846. array-like of bool or callable
  847. Indexes the data on its second axis. Integers are interpreted as
  848. positional columns, while strings can reference DataFrame columns
  849. by name. A scalar string or int should be used where
  850. ``transformer`` expects X to be a 1d array-like (vector),
  851. otherwise a 2d array will be passed to the transformer.
  852. A callable is passed the input data `X` and can return any of the
  853. above. To select multiple columns by name or dtype, you can use
  854. :obj:`make_column_selector`.
  855. remainder : {'drop', 'passthrough'} or estimator, default='drop'
  856. By default, only the specified columns in `transformers` are
  857. transformed and combined in the output, and the non-specified
  858. columns are dropped. (default of ``'drop'``).
  859. By specifying ``remainder='passthrough'``, all remaining columns that
  860. were not specified in `transformers` will be automatically passed
  861. through. This subset of columns is concatenated with the output of
  862. the transformers.
  863. By setting ``remainder`` to be an estimator, the remaining
  864. non-specified columns will use the ``remainder`` estimator. The
  865. estimator must support :term:`fit` and :term:`transform`.
  866. sparse_threshold : float, default=0.3
  867. If the transformed output consists of a mix of sparse and dense data,
  868. it will be stacked as a sparse matrix if the density is lower than this
  869. value. Use ``sparse_threshold=0`` to always return dense.
  870. When the transformed output consists of all sparse or all dense data,
  871. the stacked result will be sparse or dense, respectively, and this
  872. keyword will be ignored.
  873. n_jobs : int, default=None
  874. Number of jobs to run in parallel.
  875. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
  876. ``-1`` means using all processors. See :term:`Glossary <n_jobs>`
  877. for more details.
  878. verbose : bool, default=False
  879. If True, the time elapsed while fitting each transformer will be
  880. printed as it is completed.
  881. verbose_feature_names_out : bool, default=True
  882. If True, :meth:`ColumnTransformer.get_feature_names_out` will prefix
  883. all feature names with the name of the transformer that generated that
  884. feature.
  885. If False, :meth:`ColumnTransformer.get_feature_names_out` will not
  886. prefix any feature names and will error if feature names are not
  887. unique.
  888. .. versionadded:: 1.0
  889. Returns
  890. -------
  891. ct : ColumnTransformer
  892. Returns a :class:`ColumnTransformer` object.
  893. See Also
  894. --------
  895. ColumnTransformer : Class that allows combining the
  896. outputs of multiple transformer objects used on column subsets
  897. of the data into a single feature space.
  898. Examples
  899. --------
  900. >>> from sklearn.preprocessing import StandardScaler, OneHotEncoder
  901. >>> from sklearn.compose import make_column_transformer
  902. >>> make_column_transformer(
  903. ... (StandardScaler(), ['numerical_column']),
  904. ... (OneHotEncoder(), ['categorical_column']))
  905. ColumnTransformer(transformers=[('standardscaler', StandardScaler(...),
  906. ['numerical_column']),
  907. ('onehotencoder', OneHotEncoder(...),
  908. ['categorical_column'])])
  909. """
  910. # transformer_weights keyword is not passed through because the user
  911. # would need to know the automatically generated names of the transformers
  912. transformer_list = _get_transformer_list(transformers)
  913. return ColumnTransformer(
  914. transformer_list,
  915. n_jobs=n_jobs,
  916. remainder=remainder,
  917. sparse_threshold=sparse_threshold,
  918. verbose=verbose,
  919. verbose_feature_names_out=verbose_feature_names_out,
  920. )
  921. class make_column_selector:
  922. """Create a callable to select columns to be used with
  923. :class:`ColumnTransformer`.
  924. :func:`make_column_selector` can select columns based on datatype or the
  925. columns name with a regex. When using multiple selection criteria, **all**
  926. criteria must match for a column to be selected.
  927. For an example of how to use :func:`make_column_selector` within a
  928. :class:`ColumnTransformer` to select columns based on data type (i.e.
  929. `dtype`), refer to
  930. :ref:`sphx_glr_auto_examples_compose_plot_column_transformer_mixed_types.py`.
  931. Parameters
  932. ----------
  933. pattern : str, default=None
  934. Name of columns containing this regex pattern will be included. If
  935. None, column selection will not be selected based on pattern.
  936. dtype_include : column dtype or list of column dtypes, default=None
  937. A selection of dtypes to include. For more details, see
  938. :meth:`pandas.DataFrame.select_dtypes`.
  939. dtype_exclude : column dtype or list of column dtypes, default=None
  940. A selection of dtypes to exclude. For more details, see
  941. :meth:`pandas.DataFrame.select_dtypes`.
  942. Returns
  943. -------
  944. selector : callable
  945. Callable for column selection to be used by a
  946. :class:`ColumnTransformer`.
  947. See Also
  948. --------
  949. ColumnTransformer : Class that allows combining the
  950. outputs of multiple transformer objects used on column subsets
  951. of the data into a single feature space.
  952. Examples
  953. --------
  954. >>> from sklearn.preprocessing import StandardScaler, OneHotEncoder
  955. >>> from sklearn.compose import make_column_transformer
  956. >>> from sklearn.compose import make_column_selector
  957. >>> import numpy as np
  958. >>> import pandas as pd # doctest: +SKIP
  959. >>> X = pd.DataFrame({'city': ['London', 'London', 'Paris', 'Sallisaw'],
  960. ... 'rating': [5, 3, 4, 5]}) # doctest: +SKIP
  961. >>> ct = make_column_transformer(
  962. ... (StandardScaler(),
  963. ... make_column_selector(dtype_include=np.number)), # rating
  964. ... (OneHotEncoder(),
  965. ... make_column_selector(dtype_include=object))) # city
  966. >>> ct.fit_transform(X) # doctest: +SKIP
  967. array([[ 0.90453403, 1. , 0. , 0. ],
  968. [-1.50755672, 1. , 0. , 0. ],
  969. [-0.30151134, 0. , 1. , 0. ],
  970. [ 0.90453403, 0. , 0. , 1. ]])
  971. """
  972. def __init__(self, pattern=None, *, dtype_include=None, dtype_exclude=None):
  973. self.pattern = pattern
  974. self.dtype_include = dtype_include
  975. self.dtype_exclude = dtype_exclude
  976. def __call__(self, df):
  977. """Callable for column selection to be used by a
  978. :class:`ColumnTransformer`.
  979. Parameters
  980. ----------
  981. df : dataframe of shape (n_features, n_samples)
  982. DataFrame to select columns from.
  983. """
  984. if not hasattr(df, "iloc"):
  985. raise ValueError(
  986. "make_column_selector can only be applied to pandas dataframes"
  987. )
  988. df_row = df.iloc[:1]
  989. if self.dtype_include is not None or self.dtype_exclude is not None:
  990. df_row = df_row.select_dtypes(
  991. include=self.dtype_include, exclude=self.dtype_exclude
  992. )
  993. cols = df_row.columns
  994. if self.pattern is not None:
  995. cols = cols[cols.str.contains(self.pattern, regex=True)]
  996. return cols.tolist()