_classes.py 70 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842
  1. """
  2. This module gathers tree-based methods, including decision, regression and
  3. randomized trees. Single and multi-output problems are both handled.
  4. """
  5. # Authors: Gilles Louppe <g.louppe@gmail.com>
  6. # Peter Prettenhofer <peter.prettenhofer@gmail.com>
  7. # Brian Holt <bdholt1@gmail.com>
  8. # Noel Dawe <noel@dawe.me>
  9. # Satrajit Gosh <satrajit.ghosh@gmail.com>
  10. # Joly Arnaud <arnaud.v.joly@gmail.com>
  11. # Fares Hedayati <fares.hedayati@gmail.com>
  12. # Nelson Liu <nelson@nelsonliu.me>
  13. #
  14. # License: BSD 3 clause
  15. import copy
  16. import numbers
  17. import warnings
  18. from abc import ABCMeta, abstractmethod
  19. from math import ceil
  20. from numbers import Integral, Real
  21. import numpy as np
  22. from scipy.sparse import issparse
  23. from ..base import (
  24. BaseEstimator,
  25. ClassifierMixin,
  26. MultiOutputMixin,
  27. RegressorMixin,
  28. _fit_context,
  29. clone,
  30. is_classifier,
  31. )
  32. from ..utils import Bunch, check_random_state, compute_sample_weight
  33. from ..utils._param_validation import Hidden, Interval, RealNotInt, StrOptions
  34. from ..utils.multiclass import check_classification_targets
  35. from ..utils.validation import (
  36. _assert_all_finite_element_wise,
  37. _check_sample_weight,
  38. assert_all_finite,
  39. check_is_fitted,
  40. )
  41. from . import _criterion, _splitter, _tree
  42. from ._criterion import Criterion
  43. from ._splitter import Splitter
  44. from ._tree import (
  45. BestFirstTreeBuilder,
  46. DepthFirstTreeBuilder,
  47. Tree,
  48. _build_pruned_tree_ccp,
  49. ccp_pruning_path,
  50. )
  51. from ._utils import _any_isnan_axis0
  52. __all__ = [
  53. "DecisionTreeClassifier",
  54. "DecisionTreeRegressor",
  55. "ExtraTreeClassifier",
  56. "ExtraTreeRegressor",
  57. ]
  58. # =============================================================================
  59. # Types and constants
  60. # =============================================================================
  61. DTYPE = _tree.DTYPE
  62. DOUBLE = _tree.DOUBLE
  63. CRITERIA_CLF = {
  64. "gini": _criterion.Gini,
  65. "log_loss": _criterion.Entropy,
  66. "entropy": _criterion.Entropy,
  67. }
  68. CRITERIA_REG = {
  69. "squared_error": _criterion.MSE,
  70. "friedman_mse": _criterion.FriedmanMSE,
  71. "absolute_error": _criterion.MAE,
  72. "poisson": _criterion.Poisson,
  73. }
  74. DENSE_SPLITTERS = {"best": _splitter.BestSplitter, "random": _splitter.RandomSplitter}
  75. SPARSE_SPLITTERS = {
  76. "best": _splitter.BestSparseSplitter,
  77. "random": _splitter.RandomSparseSplitter,
  78. }
  79. # =============================================================================
  80. # Base decision tree
  81. # =============================================================================
  82. class BaseDecisionTree(MultiOutputMixin, BaseEstimator, metaclass=ABCMeta):
  83. """Base class for decision trees.
  84. Warning: This class should not be used directly.
  85. Use derived classes instead.
  86. """
  87. _parameter_constraints: dict = {
  88. "splitter": [StrOptions({"best", "random"})],
  89. "max_depth": [Interval(Integral, 1, None, closed="left"), None],
  90. "min_samples_split": [
  91. Interval(Integral, 2, None, closed="left"),
  92. Interval(RealNotInt, 0.0, 1.0, closed="right"),
  93. ],
  94. "min_samples_leaf": [
  95. Interval(Integral, 1, None, closed="left"),
  96. Interval(RealNotInt, 0.0, 1.0, closed="neither"),
  97. ],
  98. "min_weight_fraction_leaf": [Interval(Real, 0.0, 0.5, closed="both")],
  99. "max_features": [
  100. Interval(Integral, 1, None, closed="left"),
  101. Interval(RealNotInt, 0.0, 1.0, closed="right"),
  102. StrOptions({"sqrt", "log2"}),
  103. None,
  104. ],
  105. "random_state": ["random_state"],
  106. "max_leaf_nodes": [Interval(Integral, 2, None, closed="left"), None],
  107. "min_impurity_decrease": [Interval(Real, 0.0, None, closed="left")],
  108. "ccp_alpha": [Interval(Real, 0.0, None, closed="left")],
  109. }
  110. @abstractmethod
  111. def __init__(
  112. self,
  113. *,
  114. criterion,
  115. splitter,
  116. max_depth,
  117. min_samples_split,
  118. min_samples_leaf,
  119. min_weight_fraction_leaf,
  120. max_features,
  121. max_leaf_nodes,
  122. random_state,
  123. min_impurity_decrease,
  124. class_weight=None,
  125. ccp_alpha=0.0,
  126. ):
  127. self.criterion = criterion
  128. self.splitter = splitter
  129. self.max_depth = max_depth
  130. self.min_samples_split = min_samples_split
  131. self.min_samples_leaf = min_samples_leaf
  132. self.min_weight_fraction_leaf = min_weight_fraction_leaf
  133. self.max_features = max_features
  134. self.max_leaf_nodes = max_leaf_nodes
  135. self.random_state = random_state
  136. self.min_impurity_decrease = min_impurity_decrease
  137. self.class_weight = class_weight
  138. self.ccp_alpha = ccp_alpha
  139. def get_depth(self):
  140. """Return the depth of the decision tree.
  141. The depth of a tree is the maximum distance between the root
  142. and any leaf.
  143. Returns
  144. -------
  145. self.tree_.max_depth : int
  146. The maximum depth of the tree.
  147. """
  148. check_is_fitted(self)
  149. return self.tree_.max_depth
  150. def get_n_leaves(self):
  151. """Return the number of leaves of the decision tree.
  152. Returns
  153. -------
  154. self.tree_.n_leaves : int
  155. Number of leaves.
  156. """
  157. check_is_fitted(self)
  158. return self.tree_.n_leaves
  159. def _support_missing_values(self, X):
  160. return not issparse(X) and self._get_tags()["allow_nan"]
  161. def _compute_missing_values_in_feature_mask(self, X):
  162. """Return boolean mask denoting if there are missing values for each feature.
  163. This method also ensures that X is finite.
  164. Parameter
  165. ---------
  166. X : array-like of shape (n_samples, n_features), dtype=DOUBLE
  167. Input data.
  168. Returns
  169. -------
  170. missing_values_in_feature_mask : ndarray of shape (n_features,), or None
  171. Missing value mask. If missing values are not supported or there
  172. are no missing values, return None.
  173. """
  174. common_kwargs = dict(estimator_name=self.__class__.__name__, input_name="X")
  175. if not self._support_missing_values(X):
  176. assert_all_finite(X, **common_kwargs)
  177. return None
  178. with np.errstate(over="ignore"):
  179. overall_sum = np.sum(X)
  180. if not np.isfinite(overall_sum):
  181. # Raise a ValueError in case of the presence of an infinite element.
  182. _assert_all_finite_element_wise(X, xp=np, allow_nan=True, **common_kwargs)
  183. # If the sum is not nan, then there are no missing values
  184. if not np.isnan(overall_sum):
  185. return None
  186. missing_values_in_feature_mask = _any_isnan_axis0(X)
  187. return missing_values_in_feature_mask
  188. def _fit(
  189. self,
  190. X,
  191. y,
  192. sample_weight=None,
  193. check_input=True,
  194. missing_values_in_feature_mask=None,
  195. ):
  196. random_state = check_random_state(self.random_state)
  197. if check_input:
  198. # Need to validate separately here.
  199. # We can't pass multi_output=True because that would allow y to be
  200. # csr.
  201. # _compute_missing_values_in_feature_mask will check for finite values and
  202. # compute the missing mask if the tree supports missing values
  203. check_X_params = dict(
  204. dtype=DTYPE, accept_sparse="csc", force_all_finite=False
  205. )
  206. check_y_params = dict(ensure_2d=False, dtype=None)
  207. X, y = self._validate_data(
  208. X, y, validate_separately=(check_X_params, check_y_params)
  209. )
  210. missing_values_in_feature_mask = (
  211. self._compute_missing_values_in_feature_mask(X)
  212. )
  213. if issparse(X):
  214. X.sort_indices()
  215. if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
  216. raise ValueError(
  217. "No support for np.int64 index based sparse matrices"
  218. )
  219. if self.criterion == "poisson":
  220. if np.any(y < 0):
  221. raise ValueError(
  222. "Some value(s) of y are negative which is"
  223. " not allowed for Poisson regression."
  224. )
  225. if np.sum(y) <= 0:
  226. raise ValueError(
  227. "Sum of y is not positive which is "
  228. "necessary for Poisson regression."
  229. )
  230. # Determine output settings
  231. n_samples, self.n_features_in_ = X.shape
  232. is_classification = is_classifier(self)
  233. y = np.atleast_1d(y)
  234. expanded_class_weight = None
  235. if y.ndim == 1:
  236. # reshape is necessary to preserve the data contiguity against vs
  237. # [:, np.newaxis] that does not.
  238. y = np.reshape(y, (-1, 1))
  239. self.n_outputs_ = y.shape[1]
  240. if is_classification:
  241. check_classification_targets(y)
  242. y = np.copy(y)
  243. self.classes_ = []
  244. self.n_classes_ = []
  245. if self.class_weight is not None:
  246. y_original = np.copy(y)
  247. y_encoded = np.zeros(y.shape, dtype=int)
  248. for k in range(self.n_outputs_):
  249. classes_k, y_encoded[:, k] = np.unique(y[:, k], return_inverse=True)
  250. self.classes_.append(classes_k)
  251. self.n_classes_.append(classes_k.shape[0])
  252. y = y_encoded
  253. if self.class_weight is not None:
  254. expanded_class_weight = compute_sample_weight(
  255. self.class_weight, y_original
  256. )
  257. self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
  258. if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
  259. y = np.ascontiguousarray(y, dtype=DOUBLE)
  260. max_depth = np.iinfo(np.int32).max if self.max_depth is None else self.max_depth
  261. if isinstance(self.min_samples_leaf, numbers.Integral):
  262. min_samples_leaf = self.min_samples_leaf
  263. else: # float
  264. min_samples_leaf = int(ceil(self.min_samples_leaf * n_samples))
  265. if isinstance(self.min_samples_split, numbers.Integral):
  266. min_samples_split = self.min_samples_split
  267. else: # float
  268. min_samples_split = int(ceil(self.min_samples_split * n_samples))
  269. min_samples_split = max(2, min_samples_split)
  270. min_samples_split = max(min_samples_split, 2 * min_samples_leaf)
  271. if isinstance(self.max_features, str):
  272. if self.max_features == "auto":
  273. if is_classification:
  274. max_features = max(1, int(np.sqrt(self.n_features_in_)))
  275. warnings.warn(
  276. (
  277. "`max_features='auto'` has been deprecated in 1.1 "
  278. "and will be removed in 1.3. To keep the past behaviour, "
  279. "explicitly set `max_features='sqrt'`."
  280. ),
  281. FutureWarning,
  282. )
  283. else:
  284. max_features = self.n_features_in_
  285. warnings.warn(
  286. (
  287. "`max_features='auto'` has been deprecated in 1.1 "
  288. "and will be removed in 1.3. To keep the past behaviour, "
  289. "explicitly set `max_features=1.0'`."
  290. ),
  291. FutureWarning,
  292. )
  293. elif self.max_features == "sqrt":
  294. max_features = max(1, int(np.sqrt(self.n_features_in_)))
  295. elif self.max_features == "log2":
  296. max_features = max(1, int(np.log2(self.n_features_in_)))
  297. elif self.max_features is None:
  298. max_features = self.n_features_in_
  299. elif isinstance(self.max_features, numbers.Integral):
  300. max_features = self.max_features
  301. else: # float
  302. if self.max_features > 0.0:
  303. max_features = max(1, int(self.max_features * self.n_features_in_))
  304. else:
  305. max_features = 0
  306. self.max_features_ = max_features
  307. max_leaf_nodes = -1 if self.max_leaf_nodes is None else self.max_leaf_nodes
  308. if len(y) != n_samples:
  309. raise ValueError(
  310. "Number of labels=%d does not match number of samples=%d"
  311. % (len(y), n_samples)
  312. )
  313. if sample_weight is not None:
  314. sample_weight = _check_sample_weight(sample_weight, X, DOUBLE)
  315. if expanded_class_weight is not None:
  316. if sample_weight is not None:
  317. sample_weight = sample_weight * expanded_class_weight
  318. else:
  319. sample_weight = expanded_class_weight
  320. # Set min_weight_leaf from min_weight_fraction_leaf
  321. if sample_weight is None:
  322. min_weight_leaf = self.min_weight_fraction_leaf * n_samples
  323. else:
  324. min_weight_leaf = self.min_weight_fraction_leaf * np.sum(sample_weight)
  325. # Build tree
  326. criterion = self.criterion
  327. if not isinstance(criterion, Criterion):
  328. if is_classification:
  329. criterion = CRITERIA_CLF[self.criterion](
  330. self.n_outputs_, self.n_classes_
  331. )
  332. else:
  333. criterion = CRITERIA_REG[self.criterion](self.n_outputs_, n_samples)
  334. else:
  335. # Make a deepcopy in case the criterion has mutable attributes that
  336. # might be shared and modified concurrently during parallel fitting
  337. criterion = copy.deepcopy(criterion)
  338. SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
  339. splitter = self.splitter
  340. if not isinstance(self.splitter, Splitter):
  341. splitter = SPLITTERS[self.splitter](
  342. criterion,
  343. self.max_features_,
  344. min_samples_leaf,
  345. min_weight_leaf,
  346. random_state,
  347. )
  348. if is_classifier(self):
  349. self.tree_ = Tree(self.n_features_in_, self.n_classes_, self.n_outputs_)
  350. else:
  351. self.tree_ = Tree(
  352. self.n_features_in_,
  353. # TODO: tree shouldn't need this in this case
  354. np.array([1] * self.n_outputs_, dtype=np.intp),
  355. self.n_outputs_,
  356. )
  357. # Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
  358. if max_leaf_nodes < 0:
  359. builder = DepthFirstTreeBuilder(
  360. splitter,
  361. min_samples_split,
  362. min_samples_leaf,
  363. min_weight_leaf,
  364. max_depth,
  365. self.min_impurity_decrease,
  366. )
  367. else:
  368. builder = BestFirstTreeBuilder(
  369. splitter,
  370. min_samples_split,
  371. min_samples_leaf,
  372. min_weight_leaf,
  373. max_depth,
  374. max_leaf_nodes,
  375. self.min_impurity_decrease,
  376. )
  377. builder.build(self.tree_, X, y, sample_weight, missing_values_in_feature_mask)
  378. if self.n_outputs_ == 1 and is_classifier(self):
  379. self.n_classes_ = self.n_classes_[0]
  380. self.classes_ = self.classes_[0]
  381. self._prune_tree()
  382. return self
  383. def _validate_X_predict(self, X, check_input):
  384. """Validate the training data on predict (probabilities)."""
  385. if check_input:
  386. if self._support_missing_values(X):
  387. force_all_finite = "allow-nan"
  388. else:
  389. force_all_finite = True
  390. X = self._validate_data(
  391. X,
  392. dtype=DTYPE,
  393. accept_sparse="csr",
  394. reset=False,
  395. force_all_finite=force_all_finite,
  396. )
  397. if issparse(X) and (
  398. X.indices.dtype != np.intc or X.indptr.dtype != np.intc
  399. ):
  400. raise ValueError("No support for np.int64 index based sparse matrices")
  401. else:
  402. # The number of features is checked regardless of `check_input`
  403. self._check_n_features(X, reset=False)
  404. return X
  405. def predict(self, X, check_input=True):
  406. """Predict class or regression value for X.
  407. For a classification model, the predicted class for each sample in X is
  408. returned. For a regression model, the predicted value based on X is
  409. returned.
  410. Parameters
  411. ----------
  412. X : {array-like, sparse matrix} of shape (n_samples, n_features)
  413. The input samples. Internally, it will be converted to
  414. ``dtype=np.float32`` and if a sparse matrix is provided
  415. to a sparse ``csr_matrix``.
  416. check_input : bool, default=True
  417. Allow to bypass several input checking.
  418. Don't use this parameter unless you know what you're doing.
  419. Returns
  420. -------
  421. y : array-like of shape (n_samples,) or (n_samples, n_outputs)
  422. The predicted classes, or the predict values.
  423. """
  424. check_is_fitted(self)
  425. X = self._validate_X_predict(X, check_input)
  426. proba = self.tree_.predict(X)
  427. n_samples = X.shape[0]
  428. # Classification
  429. if is_classifier(self):
  430. if self.n_outputs_ == 1:
  431. return self.classes_.take(np.argmax(proba, axis=1), axis=0)
  432. else:
  433. class_type = self.classes_[0].dtype
  434. predictions = np.zeros((n_samples, self.n_outputs_), dtype=class_type)
  435. for k in range(self.n_outputs_):
  436. predictions[:, k] = self.classes_[k].take(
  437. np.argmax(proba[:, k], axis=1), axis=0
  438. )
  439. return predictions
  440. # Regression
  441. else:
  442. if self.n_outputs_ == 1:
  443. return proba[:, 0]
  444. else:
  445. return proba[:, :, 0]
  446. def apply(self, X, check_input=True):
  447. """Return the index of the leaf that each sample is predicted as.
  448. .. versionadded:: 0.17
  449. Parameters
  450. ----------
  451. X : {array-like, sparse matrix} of shape (n_samples, n_features)
  452. The input samples. Internally, it will be converted to
  453. ``dtype=np.float32`` and if a sparse matrix is provided
  454. to a sparse ``csr_matrix``.
  455. check_input : bool, default=True
  456. Allow to bypass several input checking.
  457. Don't use this parameter unless you know what you're doing.
  458. Returns
  459. -------
  460. X_leaves : array-like of shape (n_samples,)
  461. For each datapoint x in X, return the index of the leaf x
  462. ends up in. Leaves are numbered within
  463. ``[0; self.tree_.node_count)``, possibly with gaps in the
  464. numbering.
  465. """
  466. check_is_fitted(self)
  467. X = self._validate_X_predict(X, check_input)
  468. return self.tree_.apply(X)
  469. def decision_path(self, X, check_input=True):
  470. """Return the decision path in the tree.
  471. .. versionadded:: 0.18
  472. Parameters
  473. ----------
  474. X : {array-like, sparse matrix} of shape (n_samples, n_features)
  475. The input samples. Internally, it will be converted to
  476. ``dtype=np.float32`` and if a sparse matrix is provided
  477. to a sparse ``csr_matrix``.
  478. check_input : bool, default=True
  479. Allow to bypass several input checking.
  480. Don't use this parameter unless you know what you're doing.
  481. Returns
  482. -------
  483. indicator : sparse matrix of shape (n_samples, n_nodes)
  484. Return a node indicator CSR matrix where non zero elements
  485. indicates that the samples goes through the nodes.
  486. """
  487. X = self._validate_X_predict(X, check_input)
  488. return self.tree_.decision_path(X)
  489. def _prune_tree(self):
  490. """Prune tree using Minimal Cost-Complexity Pruning."""
  491. check_is_fitted(self)
  492. if self.ccp_alpha == 0.0:
  493. return
  494. # build pruned tree
  495. if is_classifier(self):
  496. n_classes = np.atleast_1d(self.n_classes_)
  497. pruned_tree = Tree(self.n_features_in_, n_classes, self.n_outputs_)
  498. else:
  499. pruned_tree = Tree(
  500. self.n_features_in_,
  501. # TODO: the tree shouldn't need this param
  502. np.array([1] * self.n_outputs_, dtype=np.intp),
  503. self.n_outputs_,
  504. )
  505. _build_pruned_tree_ccp(pruned_tree, self.tree_, self.ccp_alpha)
  506. self.tree_ = pruned_tree
  507. def cost_complexity_pruning_path(self, X, y, sample_weight=None):
  508. """Compute the pruning path during Minimal Cost-Complexity Pruning.
  509. See :ref:`minimal_cost_complexity_pruning` for details on the pruning
  510. process.
  511. Parameters
  512. ----------
  513. X : {array-like, sparse matrix} of shape (n_samples, n_features)
  514. The training input samples. Internally, it will be converted to
  515. ``dtype=np.float32`` and if a sparse matrix is provided
  516. to a sparse ``csc_matrix``.
  517. y : array-like of shape (n_samples,) or (n_samples, n_outputs)
  518. The target values (class labels) as integers or strings.
  519. sample_weight : array-like of shape (n_samples,), default=None
  520. Sample weights. If None, then samples are equally weighted. Splits
  521. that would create child nodes with net zero or negative weight are
  522. ignored while searching for a split in each node. Splits are also
  523. ignored if they would result in any single class carrying a
  524. negative weight in either child node.
  525. Returns
  526. -------
  527. ccp_path : :class:`~sklearn.utils.Bunch`
  528. Dictionary-like object, with the following attributes.
  529. ccp_alphas : ndarray
  530. Effective alphas of subtree during pruning.
  531. impurities : ndarray
  532. Sum of the impurities of the subtree leaves for the
  533. corresponding alpha value in ``ccp_alphas``.
  534. """
  535. est = clone(self).set_params(ccp_alpha=0.0)
  536. est.fit(X, y, sample_weight=sample_weight)
  537. return Bunch(**ccp_pruning_path(est.tree_))
  538. @property
  539. def feature_importances_(self):
  540. """Return the feature importances.
  541. The importance of a feature is computed as the (normalized) total
  542. reduction of the criterion brought by that feature.
  543. It is also known as the Gini importance.
  544. Warning: impurity-based feature importances can be misleading for
  545. high cardinality features (many unique values). See
  546. :func:`sklearn.inspection.permutation_importance` as an alternative.
  547. Returns
  548. -------
  549. feature_importances_ : ndarray of shape (n_features,)
  550. Normalized total reduction of criteria by feature
  551. (Gini importance).
  552. """
  553. check_is_fitted(self)
  554. return self.tree_.compute_feature_importances()
  555. # =============================================================================
  556. # Public estimators
  557. # =============================================================================
  558. class DecisionTreeClassifier(ClassifierMixin, BaseDecisionTree):
  559. """A decision tree classifier.
  560. Read more in the :ref:`User Guide <tree>`.
  561. Parameters
  562. ----------
  563. criterion : {"gini", "entropy", "log_loss"}, default="gini"
  564. The function to measure the quality of a split. Supported criteria are
  565. "gini" for the Gini impurity and "log_loss" and "entropy" both for the
  566. Shannon information gain, see :ref:`tree_mathematical_formulation`.
  567. splitter : {"best", "random"}, default="best"
  568. The strategy used to choose the split at each node. Supported
  569. strategies are "best" to choose the best split and "random" to choose
  570. the best random split.
  571. max_depth : int, default=None
  572. The maximum depth of the tree. If None, then nodes are expanded until
  573. all leaves are pure or until all leaves contain less than
  574. min_samples_split samples.
  575. min_samples_split : int or float, default=2
  576. The minimum number of samples required to split an internal node:
  577. - If int, then consider `min_samples_split` as the minimum number.
  578. - If float, then `min_samples_split` is a fraction and
  579. `ceil(min_samples_split * n_samples)` are the minimum
  580. number of samples for each split.
  581. .. versionchanged:: 0.18
  582. Added float values for fractions.
  583. min_samples_leaf : int or float, default=1
  584. The minimum number of samples required to be at a leaf node.
  585. A split point at any depth will only be considered if it leaves at
  586. least ``min_samples_leaf`` training samples in each of the left and
  587. right branches. This may have the effect of smoothing the model,
  588. especially in regression.
  589. - If int, then consider `min_samples_leaf` as the minimum number.
  590. - If float, then `min_samples_leaf` is a fraction and
  591. `ceil(min_samples_leaf * n_samples)` are the minimum
  592. number of samples for each node.
  593. .. versionchanged:: 0.18
  594. Added float values for fractions.
  595. min_weight_fraction_leaf : float, default=0.0
  596. The minimum weighted fraction of the sum total of weights (of all
  597. the input samples) required to be at a leaf node. Samples have
  598. equal weight when sample_weight is not provided.
  599. max_features : int, float or {"auto", "sqrt", "log2"}, default=None
  600. The number of features to consider when looking for the best split:
  601. - If int, then consider `max_features` features at each split.
  602. - If float, then `max_features` is a fraction and
  603. `max(1, int(max_features * n_features_in_))` features are considered at
  604. each split.
  605. - If "sqrt", then `max_features=sqrt(n_features)`.
  606. - If "log2", then `max_features=log2(n_features)`.
  607. - If None, then `max_features=n_features`.
  608. Note: the search for a split does not stop until at least one
  609. valid partition of the node samples is found, even if it requires to
  610. effectively inspect more than ``max_features`` features.
  611. random_state : int, RandomState instance or None, default=None
  612. Controls the randomness of the estimator. The features are always
  613. randomly permuted at each split, even if ``splitter`` is set to
  614. ``"best"``. When ``max_features < n_features``, the algorithm will
  615. select ``max_features`` at random at each split before finding the best
  616. split among them. But the best found split may vary across different
  617. runs, even if ``max_features=n_features``. That is the case, if the
  618. improvement of the criterion is identical for several splits and one
  619. split has to be selected at random. To obtain a deterministic behaviour
  620. during fitting, ``random_state`` has to be fixed to an integer.
  621. See :term:`Glossary <random_state>` for details.
  622. max_leaf_nodes : int, default=None
  623. Grow a tree with ``max_leaf_nodes`` in best-first fashion.
  624. Best nodes are defined as relative reduction in impurity.
  625. If None then unlimited number of leaf nodes.
  626. min_impurity_decrease : float, default=0.0
  627. A node will be split if this split induces a decrease of the impurity
  628. greater than or equal to this value.
  629. The weighted impurity decrease equation is the following::
  630. N_t / N * (impurity - N_t_R / N_t * right_impurity
  631. - N_t_L / N_t * left_impurity)
  632. where ``N`` is the total number of samples, ``N_t`` is the number of
  633. samples at the current node, ``N_t_L`` is the number of samples in the
  634. left child, and ``N_t_R`` is the number of samples in the right child.
  635. ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
  636. if ``sample_weight`` is passed.
  637. .. versionadded:: 0.19
  638. class_weight : dict, list of dict or "balanced", default=None
  639. Weights associated with classes in the form ``{class_label: weight}``.
  640. If None, all classes are supposed to have weight one. For
  641. multi-output problems, a list of dicts can be provided in the same
  642. order as the columns of y.
  643. Note that for multioutput (including multilabel) weights should be
  644. defined for each class of every column in its own dict. For example,
  645. for four-class multilabel classification weights should be
  646. [{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of
  647. [{1:1}, {2:5}, {3:1}, {4:1}].
  648. The "balanced" mode uses the values of y to automatically adjust
  649. weights inversely proportional to class frequencies in the input data
  650. as ``n_samples / (n_classes * np.bincount(y))``
  651. For multi-output, the weights of each column of y will be multiplied.
  652. Note that these weights will be multiplied with sample_weight (passed
  653. through the fit method) if sample_weight is specified.
  654. ccp_alpha : non-negative float, default=0.0
  655. Complexity parameter used for Minimal Cost-Complexity Pruning. The
  656. subtree with the largest cost complexity that is smaller than
  657. ``ccp_alpha`` will be chosen. By default, no pruning is performed. See
  658. :ref:`minimal_cost_complexity_pruning` for details.
  659. .. versionadded:: 0.22
  660. Attributes
  661. ----------
  662. classes_ : ndarray of shape (n_classes,) or list of ndarray
  663. The classes labels (single output problem),
  664. or a list of arrays of class labels (multi-output problem).
  665. feature_importances_ : ndarray of shape (n_features,)
  666. The impurity-based feature importances.
  667. The higher, the more important the feature.
  668. The importance of a feature is computed as the (normalized)
  669. total reduction of the criterion brought by that feature. It is also
  670. known as the Gini importance [4]_.
  671. Warning: impurity-based feature importances can be misleading for
  672. high cardinality features (many unique values). See
  673. :func:`sklearn.inspection.permutation_importance` as an alternative.
  674. max_features_ : int
  675. The inferred value of max_features.
  676. n_classes_ : int or list of int
  677. The number of classes (for single output problems),
  678. or a list containing the number of classes for each
  679. output (for multi-output problems).
  680. n_features_in_ : int
  681. Number of features seen during :term:`fit`.
  682. .. versionadded:: 0.24
  683. feature_names_in_ : ndarray of shape (`n_features_in_`,)
  684. Names of features seen during :term:`fit`. Defined only when `X`
  685. has feature names that are all strings.
  686. .. versionadded:: 1.0
  687. n_outputs_ : int
  688. The number of outputs when ``fit`` is performed.
  689. tree_ : Tree instance
  690. The underlying Tree object. Please refer to
  691. ``help(sklearn.tree._tree.Tree)`` for attributes of Tree object and
  692. :ref:`sphx_glr_auto_examples_tree_plot_unveil_tree_structure.py`
  693. for basic usage of these attributes.
  694. See Also
  695. --------
  696. DecisionTreeRegressor : A decision tree regressor.
  697. Notes
  698. -----
  699. The default values for the parameters controlling the size of the trees
  700. (e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
  701. unpruned trees which can potentially be very large on some data sets. To
  702. reduce memory consumption, the complexity and size of the trees should be
  703. controlled by setting those parameter values.
  704. The :meth:`predict` method operates using the :func:`numpy.argmax`
  705. function on the outputs of :meth:`predict_proba`. This means that in
  706. case the highest predicted probabilities are tied, the classifier will
  707. predict the tied class with the lowest index in :term:`classes_`.
  708. References
  709. ----------
  710. .. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
  711. .. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
  712. and Regression Trees", Wadsworth, Belmont, CA, 1984.
  713. .. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
  714. Learning", Springer, 2009.
  715. .. [4] L. Breiman, and A. Cutler, "Random Forests",
  716. https://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
  717. Examples
  718. --------
  719. >>> from sklearn.datasets import load_iris
  720. >>> from sklearn.model_selection import cross_val_score
  721. >>> from sklearn.tree import DecisionTreeClassifier
  722. >>> clf = DecisionTreeClassifier(random_state=0)
  723. >>> iris = load_iris()
  724. >>> cross_val_score(clf, iris.data, iris.target, cv=10)
  725. ... # doctest: +SKIP
  726. ...
  727. array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
  728. 0.93..., 0.93..., 1. , 0.93..., 1. ])
  729. """
  730. _parameter_constraints: dict = {
  731. **BaseDecisionTree._parameter_constraints,
  732. "criterion": [StrOptions({"gini", "entropy", "log_loss"}), Hidden(Criterion)],
  733. "class_weight": [dict, list, StrOptions({"balanced"}), None],
  734. }
  735. def __init__(
  736. self,
  737. *,
  738. criterion="gini",
  739. splitter="best",
  740. max_depth=None,
  741. min_samples_split=2,
  742. min_samples_leaf=1,
  743. min_weight_fraction_leaf=0.0,
  744. max_features=None,
  745. random_state=None,
  746. max_leaf_nodes=None,
  747. min_impurity_decrease=0.0,
  748. class_weight=None,
  749. ccp_alpha=0.0,
  750. ):
  751. super().__init__(
  752. criterion=criterion,
  753. splitter=splitter,
  754. max_depth=max_depth,
  755. min_samples_split=min_samples_split,
  756. min_samples_leaf=min_samples_leaf,
  757. min_weight_fraction_leaf=min_weight_fraction_leaf,
  758. max_features=max_features,
  759. max_leaf_nodes=max_leaf_nodes,
  760. class_weight=class_weight,
  761. random_state=random_state,
  762. min_impurity_decrease=min_impurity_decrease,
  763. ccp_alpha=ccp_alpha,
  764. )
  765. @_fit_context(prefer_skip_nested_validation=True)
  766. def fit(self, X, y, sample_weight=None, check_input=True):
  767. """Build a decision tree classifier from the training set (X, y).
  768. Parameters
  769. ----------
  770. X : {array-like, sparse matrix} of shape (n_samples, n_features)
  771. The training input samples. Internally, it will be converted to
  772. ``dtype=np.float32`` and if a sparse matrix is provided
  773. to a sparse ``csc_matrix``.
  774. y : array-like of shape (n_samples,) or (n_samples, n_outputs)
  775. The target values (class labels) as integers or strings.
  776. sample_weight : array-like of shape (n_samples,), default=None
  777. Sample weights. If None, then samples are equally weighted. Splits
  778. that would create child nodes with net zero or negative weight are
  779. ignored while searching for a split in each node. Splits are also
  780. ignored if they would result in any single class carrying a
  781. negative weight in either child node.
  782. check_input : bool, default=True
  783. Allow to bypass several input checking.
  784. Don't use this parameter unless you know what you're doing.
  785. Returns
  786. -------
  787. self : DecisionTreeClassifier
  788. Fitted estimator.
  789. """
  790. super()._fit(
  791. X,
  792. y,
  793. sample_weight=sample_weight,
  794. check_input=check_input,
  795. )
  796. return self
  797. def predict_proba(self, X, check_input=True):
  798. """Predict class probabilities of the input samples X.
  799. The predicted class probability is the fraction of samples of the same
  800. class in a leaf.
  801. Parameters
  802. ----------
  803. X : {array-like, sparse matrix} of shape (n_samples, n_features)
  804. The input samples. Internally, it will be converted to
  805. ``dtype=np.float32`` and if a sparse matrix is provided
  806. to a sparse ``csr_matrix``.
  807. check_input : bool, default=True
  808. Allow to bypass several input checking.
  809. Don't use this parameter unless you know what you're doing.
  810. Returns
  811. -------
  812. proba : ndarray of shape (n_samples, n_classes) or list of n_outputs \
  813. such arrays if n_outputs > 1
  814. The class probabilities of the input samples. The order of the
  815. classes corresponds to that in the attribute :term:`classes_`.
  816. """
  817. check_is_fitted(self)
  818. X = self._validate_X_predict(X, check_input)
  819. proba = self.tree_.predict(X)
  820. if self.n_outputs_ == 1:
  821. proba = proba[:, : self.n_classes_]
  822. normalizer = proba.sum(axis=1)[:, np.newaxis]
  823. normalizer[normalizer == 0.0] = 1.0
  824. proba /= normalizer
  825. return proba
  826. else:
  827. all_proba = []
  828. for k in range(self.n_outputs_):
  829. proba_k = proba[:, k, : self.n_classes_[k]]
  830. normalizer = proba_k.sum(axis=1)[:, np.newaxis]
  831. normalizer[normalizer == 0.0] = 1.0
  832. proba_k /= normalizer
  833. all_proba.append(proba_k)
  834. return all_proba
  835. def predict_log_proba(self, X):
  836. """Predict class log-probabilities of the input samples X.
  837. Parameters
  838. ----------
  839. X : {array-like, sparse matrix} of shape (n_samples, n_features)
  840. The input samples. Internally, it will be converted to
  841. ``dtype=np.float32`` and if a sparse matrix is provided
  842. to a sparse ``csr_matrix``.
  843. Returns
  844. -------
  845. proba : ndarray of shape (n_samples, n_classes) or list of n_outputs \
  846. such arrays if n_outputs > 1
  847. The class log-probabilities of the input samples. The order of the
  848. classes corresponds to that in the attribute :term:`classes_`.
  849. """
  850. proba = self.predict_proba(X)
  851. if self.n_outputs_ == 1:
  852. return np.log(proba)
  853. else:
  854. for k in range(self.n_outputs_):
  855. proba[k] = np.log(proba[k])
  856. return proba
  857. def _more_tags(self):
  858. # XXX: nan is only support for dense arrays, but we set this for common test to
  859. # pass, specifically: check_estimators_nan_inf
  860. allow_nan = self.splitter == "best" and self.criterion in {
  861. "gini",
  862. "log_loss",
  863. "entropy",
  864. }
  865. return {"multilabel": True, "allow_nan": allow_nan}
  866. class DecisionTreeRegressor(RegressorMixin, BaseDecisionTree):
  867. """A decision tree regressor.
  868. Read more in the :ref:`User Guide <tree>`.
  869. Parameters
  870. ----------
  871. criterion : {"squared_error", "friedman_mse", "absolute_error", \
  872. "poisson"}, default="squared_error"
  873. The function to measure the quality of a split. Supported criteria
  874. are "squared_error" for the mean squared error, which is equal to
  875. variance reduction as feature selection criterion and minimizes the L2
  876. loss using the mean of each terminal node, "friedman_mse", which uses
  877. mean squared error with Friedman's improvement score for potential
  878. splits, "absolute_error" for the mean absolute error, which minimizes
  879. the L1 loss using the median of each terminal node, and "poisson" which
  880. uses reduction in Poisson deviance to find splits.
  881. .. versionadded:: 0.18
  882. Mean Absolute Error (MAE) criterion.
  883. .. versionadded:: 0.24
  884. Poisson deviance criterion.
  885. splitter : {"best", "random"}, default="best"
  886. The strategy used to choose the split at each node. Supported
  887. strategies are "best" to choose the best split and "random" to choose
  888. the best random split.
  889. max_depth : int, default=None
  890. The maximum depth of the tree. If None, then nodes are expanded until
  891. all leaves are pure or until all leaves contain less than
  892. min_samples_split samples.
  893. min_samples_split : int or float, default=2
  894. The minimum number of samples required to split an internal node:
  895. - If int, then consider `min_samples_split` as the minimum number.
  896. - If float, then `min_samples_split` is a fraction and
  897. `ceil(min_samples_split * n_samples)` are the minimum
  898. number of samples for each split.
  899. .. versionchanged:: 0.18
  900. Added float values for fractions.
  901. min_samples_leaf : int or float, default=1
  902. The minimum number of samples required to be at a leaf node.
  903. A split point at any depth will only be considered if it leaves at
  904. least ``min_samples_leaf`` training samples in each of the left and
  905. right branches. This may have the effect of smoothing the model,
  906. especially in regression.
  907. - If int, then consider `min_samples_leaf` as the minimum number.
  908. - If float, then `min_samples_leaf` is a fraction and
  909. `ceil(min_samples_leaf * n_samples)` are the minimum
  910. number of samples for each node.
  911. .. versionchanged:: 0.18
  912. Added float values for fractions.
  913. min_weight_fraction_leaf : float, default=0.0
  914. The minimum weighted fraction of the sum total of weights (of all
  915. the input samples) required to be at a leaf node. Samples have
  916. equal weight when sample_weight is not provided.
  917. max_features : int, float or {"auto", "sqrt", "log2"}, default=None
  918. The number of features to consider when looking for the best split:
  919. - If int, then consider `max_features` features at each split.
  920. - If float, then `max_features` is a fraction and
  921. `max(1, int(max_features * n_features_in_))` features are considered at each
  922. split.
  923. - If "sqrt", then `max_features=sqrt(n_features)`.
  924. - If "log2", then `max_features=log2(n_features)`.
  925. - If None, then `max_features=n_features`.
  926. Note: the search for a split does not stop until at least one
  927. valid partition of the node samples is found, even if it requires to
  928. effectively inspect more than ``max_features`` features.
  929. random_state : int, RandomState instance or None, default=None
  930. Controls the randomness of the estimator. The features are always
  931. randomly permuted at each split, even if ``splitter`` is set to
  932. ``"best"``. When ``max_features < n_features``, the algorithm will
  933. select ``max_features`` at random at each split before finding the best
  934. split among them. But the best found split may vary across different
  935. runs, even if ``max_features=n_features``. That is the case, if the
  936. improvement of the criterion is identical for several splits and one
  937. split has to be selected at random. To obtain a deterministic behaviour
  938. during fitting, ``random_state`` has to be fixed to an integer.
  939. See :term:`Glossary <random_state>` for details.
  940. max_leaf_nodes : int, default=None
  941. Grow a tree with ``max_leaf_nodes`` in best-first fashion.
  942. Best nodes are defined as relative reduction in impurity.
  943. If None then unlimited number of leaf nodes.
  944. min_impurity_decrease : float, default=0.0
  945. A node will be split if this split induces a decrease of the impurity
  946. greater than or equal to this value.
  947. The weighted impurity decrease equation is the following::
  948. N_t / N * (impurity - N_t_R / N_t * right_impurity
  949. - N_t_L / N_t * left_impurity)
  950. where ``N`` is the total number of samples, ``N_t`` is the number of
  951. samples at the current node, ``N_t_L`` is the number of samples in the
  952. left child, and ``N_t_R`` is the number of samples in the right child.
  953. ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
  954. if ``sample_weight`` is passed.
  955. .. versionadded:: 0.19
  956. ccp_alpha : non-negative float, default=0.0
  957. Complexity parameter used for Minimal Cost-Complexity Pruning. The
  958. subtree with the largest cost complexity that is smaller than
  959. ``ccp_alpha`` will be chosen. By default, no pruning is performed. See
  960. :ref:`minimal_cost_complexity_pruning` for details.
  961. .. versionadded:: 0.22
  962. Attributes
  963. ----------
  964. feature_importances_ : ndarray of shape (n_features,)
  965. The feature importances.
  966. The higher, the more important the feature.
  967. The importance of a feature is computed as the
  968. (normalized) total reduction of the criterion brought
  969. by that feature. It is also known as the Gini importance [4]_.
  970. Warning: impurity-based feature importances can be misleading for
  971. high cardinality features (many unique values). See
  972. :func:`sklearn.inspection.permutation_importance` as an alternative.
  973. max_features_ : int
  974. The inferred value of max_features.
  975. n_features_in_ : int
  976. Number of features seen during :term:`fit`.
  977. .. versionadded:: 0.24
  978. feature_names_in_ : ndarray of shape (`n_features_in_`,)
  979. Names of features seen during :term:`fit`. Defined only when `X`
  980. has feature names that are all strings.
  981. .. versionadded:: 1.0
  982. n_outputs_ : int
  983. The number of outputs when ``fit`` is performed.
  984. tree_ : Tree instance
  985. The underlying Tree object. Please refer to
  986. ``help(sklearn.tree._tree.Tree)`` for attributes of Tree object and
  987. :ref:`sphx_glr_auto_examples_tree_plot_unveil_tree_structure.py`
  988. for basic usage of these attributes.
  989. See Also
  990. --------
  991. DecisionTreeClassifier : A decision tree classifier.
  992. Notes
  993. -----
  994. The default values for the parameters controlling the size of the trees
  995. (e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
  996. unpruned trees which can potentially be very large on some data sets. To
  997. reduce memory consumption, the complexity and size of the trees should be
  998. controlled by setting those parameter values.
  999. References
  1000. ----------
  1001. .. [1] https://en.wikipedia.org/wiki/Decision_tree_learning
  1002. .. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
  1003. and Regression Trees", Wadsworth, Belmont, CA, 1984.
  1004. .. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
  1005. Learning", Springer, 2009.
  1006. .. [4] L. Breiman, and A. Cutler, "Random Forests",
  1007. https://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
  1008. Examples
  1009. --------
  1010. >>> from sklearn.datasets import load_diabetes
  1011. >>> from sklearn.model_selection import cross_val_score
  1012. >>> from sklearn.tree import DecisionTreeRegressor
  1013. >>> X, y = load_diabetes(return_X_y=True)
  1014. >>> regressor = DecisionTreeRegressor(random_state=0)
  1015. >>> cross_val_score(regressor, X, y, cv=10)
  1016. ... # doctest: +SKIP
  1017. ...
  1018. array([-0.39..., -0.46..., 0.02..., 0.06..., -0.50...,
  1019. 0.16..., 0.11..., -0.73..., -0.30..., -0.00...])
  1020. """
  1021. _parameter_constraints: dict = {
  1022. **BaseDecisionTree._parameter_constraints,
  1023. "criterion": [
  1024. StrOptions({"squared_error", "friedman_mse", "absolute_error", "poisson"}),
  1025. Hidden(Criterion),
  1026. ],
  1027. }
  1028. def __init__(
  1029. self,
  1030. *,
  1031. criterion="squared_error",
  1032. splitter="best",
  1033. max_depth=None,
  1034. min_samples_split=2,
  1035. min_samples_leaf=1,
  1036. min_weight_fraction_leaf=0.0,
  1037. max_features=None,
  1038. random_state=None,
  1039. max_leaf_nodes=None,
  1040. min_impurity_decrease=0.0,
  1041. ccp_alpha=0.0,
  1042. ):
  1043. super().__init__(
  1044. criterion=criterion,
  1045. splitter=splitter,
  1046. max_depth=max_depth,
  1047. min_samples_split=min_samples_split,
  1048. min_samples_leaf=min_samples_leaf,
  1049. min_weight_fraction_leaf=min_weight_fraction_leaf,
  1050. max_features=max_features,
  1051. max_leaf_nodes=max_leaf_nodes,
  1052. random_state=random_state,
  1053. min_impurity_decrease=min_impurity_decrease,
  1054. ccp_alpha=ccp_alpha,
  1055. )
  1056. @_fit_context(prefer_skip_nested_validation=True)
  1057. def fit(self, X, y, sample_weight=None, check_input=True):
  1058. """Build a decision tree regressor from the training set (X, y).
  1059. Parameters
  1060. ----------
  1061. X : {array-like, sparse matrix} of shape (n_samples, n_features)
  1062. The training input samples. Internally, it will be converted to
  1063. ``dtype=np.float32`` and if a sparse matrix is provided
  1064. to a sparse ``csc_matrix``.
  1065. y : array-like of shape (n_samples,) or (n_samples, n_outputs)
  1066. The target values (real numbers). Use ``dtype=np.float64`` and
  1067. ``order='C'`` for maximum efficiency.
  1068. sample_weight : array-like of shape (n_samples,), default=None
  1069. Sample weights. If None, then samples are equally weighted. Splits
  1070. that would create child nodes with net zero or negative weight are
  1071. ignored while searching for a split in each node.
  1072. check_input : bool, default=True
  1073. Allow to bypass several input checking.
  1074. Don't use this parameter unless you know what you're doing.
  1075. Returns
  1076. -------
  1077. self : DecisionTreeRegressor
  1078. Fitted estimator.
  1079. """
  1080. super()._fit(
  1081. X,
  1082. y,
  1083. sample_weight=sample_weight,
  1084. check_input=check_input,
  1085. )
  1086. return self
  1087. def _compute_partial_dependence_recursion(self, grid, target_features):
  1088. """Fast partial dependence computation.
  1089. Parameters
  1090. ----------
  1091. grid : ndarray of shape (n_samples, n_target_features)
  1092. The grid points on which the partial dependence should be
  1093. evaluated.
  1094. target_features : ndarray of shape (n_target_features)
  1095. The set of target features for which the partial dependence
  1096. should be evaluated.
  1097. Returns
  1098. -------
  1099. averaged_predictions : ndarray of shape (n_samples,)
  1100. The value of the partial dependence function on each grid point.
  1101. """
  1102. grid = np.asarray(grid, dtype=DTYPE, order="C")
  1103. averaged_predictions = np.zeros(
  1104. shape=grid.shape[0], dtype=np.float64, order="C"
  1105. )
  1106. self.tree_.compute_partial_dependence(
  1107. grid, target_features, averaged_predictions
  1108. )
  1109. return averaged_predictions
  1110. def _more_tags(self):
  1111. # XXX: nan is only support for dense arrays, but we set this for common test to
  1112. # pass, specifically: check_estimators_nan_inf
  1113. allow_nan = self.splitter == "best" and self.criterion in {
  1114. "squared_error",
  1115. "friedman_mse",
  1116. "poisson",
  1117. }
  1118. return {"allow_nan": allow_nan}
  1119. class ExtraTreeClassifier(DecisionTreeClassifier):
  1120. """An extremely randomized tree classifier.
  1121. Extra-trees differ from classic decision trees in the way they are built.
  1122. When looking for the best split to separate the samples of a node into two
  1123. groups, random splits are drawn for each of the `max_features` randomly
  1124. selected features and the best split among those is chosen. When
  1125. `max_features` is set 1, this amounts to building a totally random
  1126. decision tree.
  1127. Warning: Extra-trees should only be used within ensemble methods.
  1128. Read more in the :ref:`User Guide <tree>`.
  1129. Parameters
  1130. ----------
  1131. criterion : {"gini", "entropy", "log_loss"}, default="gini"
  1132. The function to measure the quality of a split. Supported criteria are
  1133. "gini" for the Gini impurity and "log_loss" and "entropy" both for the
  1134. Shannon information gain, see :ref:`tree_mathematical_formulation`.
  1135. splitter : {"random", "best"}, default="random"
  1136. The strategy used to choose the split at each node. Supported
  1137. strategies are "best" to choose the best split and "random" to choose
  1138. the best random split.
  1139. max_depth : int, default=None
  1140. The maximum depth of the tree. If None, then nodes are expanded until
  1141. all leaves are pure or until all leaves contain less than
  1142. min_samples_split samples.
  1143. min_samples_split : int or float, default=2
  1144. The minimum number of samples required to split an internal node:
  1145. - If int, then consider `min_samples_split` as the minimum number.
  1146. - If float, then `min_samples_split` is a fraction and
  1147. `ceil(min_samples_split * n_samples)` are the minimum
  1148. number of samples for each split.
  1149. .. versionchanged:: 0.18
  1150. Added float values for fractions.
  1151. min_samples_leaf : int or float, default=1
  1152. The minimum number of samples required to be at a leaf node.
  1153. A split point at any depth will only be considered if it leaves at
  1154. least ``min_samples_leaf`` training samples in each of the left and
  1155. right branches. This may have the effect of smoothing the model,
  1156. especially in regression.
  1157. - If int, then consider `min_samples_leaf` as the minimum number.
  1158. - If float, then `min_samples_leaf` is a fraction and
  1159. `ceil(min_samples_leaf * n_samples)` are the minimum
  1160. number of samples for each node.
  1161. .. versionchanged:: 0.18
  1162. Added float values for fractions.
  1163. min_weight_fraction_leaf : float, default=0.0
  1164. The minimum weighted fraction of the sum total of weights (of all
  1165. the input samples) required to be at a leaf node. Samples have
  1166. equal weight when sample_weight is not provided.
  1167. max_features : int, float, {"auto", "sqrt", "log2"} or None, default="sqrt"
  1168. The number of features to consider when looking for the best split:
  1169. - If int, then consider `max_features` features at each split.
  1170. - If float, then `max_features` is a fraction and
  1171. `max(1, int(max_features * n_features_in_))` features are considered at
  1172. each split.
  1173. - If "sqrt", then `max_features=sqrt(n_features)`.
  1174. - If "log2", then `max_features=log2(n_features)`.
  1175. - If None, then `max_features=n_features`.
  1176. .. versionchanged:: 1.1
  1177. The default of `max_features` changed from `"auto"` to `"sqrt"`.
  1178. Note: the search for a split does not stop until at least one
  1179. valid partition of the node samples is found, even if it requires to
  1180. effectively inspect more than ``max_features`` features.
  1181. random_state : int, RandomState instance or None, default=None
  1182. Used to pick randomly the `max_features` used at each split.
  1183. See :term:`Glossary <random_state>` for details.
  1184. max_leaf_nodes : int, default=None
  1185. Grow a tree with ``max_leaf_nodes`` in best-first fashion.
  1186. Best nodes are defined as relative reduction in impurity.
  1187. If None then unlimited number of leaf nodes.
  1188. min_impurity_decrease : float, default=0.0
  1189. A node will be split if this split induces a decrease of the impurity
  1190. greater than or equal to this value.
  1191. The weighted impurity decrease equation is the following::
  1192. N_t / N * (impurity - N_t_R / N_t * right_impurity
  1193. - N_t_L / N_t * left_impurity)
  1194. where ``N`` is the total number of samples, ``N_t`` is the number of
  1195. samples at the current node, ``N_t_L`` is the number of samples in the
  1196. left child, and ``N_t_R`` is the number of samples in the right child.
  1197. ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
  1198. if ``sample_weight`` is passed.
  1199. .. versionadded:: 0.19
  1200. class_weight : dict, list of dict or "balanced", default=None
  1201. Weights associated with classes in the form ``{class_label: weight}``.
  1202. If None, all classes are supposed to have weight one. For
  1203. multi-output problems, a list of dicts can be provided in the same
  1204. order as the columns of y.
  1205. Note that for multioutput (including multilabel) weights should be
  1206. defined for each class of every column in its own dict. For example,
  1207. for four-class multilabel classification weights should be
  1208. [{0: 1, 1: 1}, {0: 1, 1: 5}, {0: 1, 1: 1}, {0: 1, 1: 1}] instead of
  1209. [{1:1}, {2:5}, {3:1}, {4:1}].
  1210. The "balanced" mode uses the values of y to automatically adjust
  1211. weights inversely proportional to class frequencies in the input data
  1212. as ``n_samples / (n_classes * np.bincount(y))``
  1213. For multi-output, the weights of each column of y will be multiplied.
  1214. Note that these weights will be multiplied with sample_weight (passed
  1215. through the fit method) if sample_weight is specified.
  1216. ccp_alpha : non-negative float, default=0.0
  1217. Complexity parameter used for Minimal Cost-Complexity Pruning. The
  1218. subtree with the largest cost complexity that is smaller than
  1219. ``ccp_alpha`` will be chosen. By default, no pruning is performed. See
  1220. :ref:`minimal_cost_complexity_pruning` for details.
  1221. .. versionadded:: 0.22
  1222. Attributes
  1223. ----------
  1224. classes_ : ndarray of shape (n_classes,) or list of ndarray
  1225. The classes labels (single output problem),
  1226. or a list of arrays of class labels (multi-output problem).
  1227. max_features_ : int
  1228. The inferred value of max_features.
  1229. n_classes_ : int or list of int
  1230. The number of classes (for single output problems),
  1231. or a list containing the number of classes for each
  1232. output (for multi-output problems).
  1233. feature_importances_ : ndarray of shape (n_features,)
  1234. The impurity-based feature importances.
  1235. The higher, the more important the feature.
  1236. The importance of a feature is computed as the (normalized)
  1237. total reduction of the criterion brought by that feature. It is also
  1238. known as the Gini importance.
  1239. Warning: impurity-based feature importances can be misleading for
  1240. high cardinality features (many unique values). See
  1241. :func:`sklearn.inspection.permutation_importance` as an alternative.
  1242. n_features_in_ : int
  1243. Number of features seen during :term:`fit`.
  1244. .. versionadded:: 0.24
  1245. feature_names_in_ : ndarray of shape (`n_features_in_`,)
  1246. Names of features seen during :term:`fit`. Defined only when `X`
  1247. has feature names that are all strings.
  1248. .. versionadded:: 1.0
  1249. n_outputs_ : int
  1250. The number of outputs when ``fit`` is performed.
  1251. tree_ : Tree instance
  1252. The underlying Tree object. Please refer to
  1253. ``help(sklearn.tree._tree.Tree)`` for attributes of Tree object and
  1254. :ref:`sphx_glr_auto_examples_tree_plot_unveil_tree_structure.py`
  1255. for basic usage of these attributes.
  1256. See Also
  1257. --------
  1258. ExtraTreeRegressor : An extremely randomized tree regressor.
  1259. sklearn.ensemble.ExtraTreesClassifier : An extra-trees classifier.
  1260. sklearn.ensemble.ExtraTreesRegressor : An extra-trees regressor.
  1261. sklearn.ensemble.RandomForestClassifier : A random forest classifier.
  1262. sklearn.ensemble.RandomForestRegressor : A random forest regressor.
  1263. sklearn.ensemble.RandomTreesEmbedding : An ensemble of
  1264. totally random trees.
  1265. Notes
  1266. -----
  1267. The default values for the parameters controlling the size of the trees
  1268. (e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
  1269. unpruned trees which can potentially be very large on some data sets. To
  1270. reduce memory consumption, the complexity and size of the trees should be
  1271. controlled by setting those parameter values.
  1272. References
  1273. ----------
  1274. .. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
  1275. Machine Learning, 63(1), 3-42, 2006.
  1276. Examples
  1277. --------
  1278. >>> from sklearn.datasets import load_iris
  1279. >>> from sklearn.model_selection import train_test_split
  1280. >>> from sklearn.ensemble import BaggingClassifier
  1281. >>> from sklearn.tree import ExtraTreeClassifier
  1282. >>> X, y = load_iris(return_X_y=True)
  1283. >>> X_train, X_test, y_train, y_test = train_test_split(
  1284. ... X, y, random_state=0)
  1285. >>> extra_tree = ExtraTreeClassifier(random_state=0)
  1286. >>> cls = BaggingClassifier(extra_tree, random_state=0).fit(
  1287. ... X_train, y_train)
  1288. >>> cls.score(X_test, y_test)
  1289. 0.8947...
  1290. """
  1291. def __init__(
  1292. self,
  1293. *,
  1294. criterion="gini",
  1295. splitter="random",
  1296. max_depth=None,
  1297. min_samples_split=2,
  1298. min_samples_leaf=1,
  1299. min_weight_fraction_leaf=0.0,
  1300. max_features="sqrt",
  1301. random_state=None,
  1302. max_leaf_nodes=None,
  1303. min_impurity_decrease=0.0,
  1304. class_weight=None,
  1305. ccp_alpha=0.0,
  1306. ):
  1307. super().__init__(
  1308. criterion=criterion,
  1309. splitter=splitter,
  1310. max_depth=max_depth,
  1311. min_samples_split=min_samples_split,
  1312. min_samples_leaf=min_samples_leaf,
  1313. min_weight_fraction_leaf=min_weight_fraction_leaf,
  1314. max_features=max_features,
  1315. max_leaf_nodes=max_leaf_nodes,
  1316. class_weight=class_weight,
  1317. min_impurity_decrease=min_impurity_decrease,
  1318. random_state=random_state,
  1319. ccp_alpha=ccp_alpha,
  1320. )
  1321. class ExtraTreeRegressor(DecisionTreeRegressor):
  1322. """An extremely randomized tree regressor.
  1323. Extra-trees differ from classic decision trees in the way they are built.
  1324. When looking for the best split to separate the samples of a node into two
  1325. groups, random splits are drawn for each of the `max_features` randomly
  1326. selected features and the best split among those is chosen. When
  1327. `max_features` is set 1, this amounts to building a totally random
  1328. decision tree.
  1329. Warning: Extra-trees should only be used within ensemble methods.
  1330. Read more in the :ref:`User Guide <tree>`.
  1331. Parameters
  1332. ----------
  1333. criterion : {"squared_error", "friedman_mse", "absolute_error", "poisson"}, \
  1334. default="squared_error"
  1335. The function to measure the quality of a split. Supported criteria
  1336. are "squared_error" for the mean squared error, which is equal to
  1337. variance reduction as feature selection criterion and minimizes the L2
  1338. loss using the mean of each terminal node, "friedman_mse", which uses
  1339. mean squared error with Friedman's improvement score for potential
  1340. splits, "absolute_error" for the mean absolute error, which minimizes
  1341. the L1 loss using the median of each terminal node, and "poisson" which
  1342. uses reduction in Poisson deviance to find splits.
  1343. .. versionadded:: 0.18
  1344. Mean Absolute Error (MAE) criterion.
  1345. .. versionadded:: 0.24
  1346. Poisson deviance criterion.
  1347. splitter : {"random", "best"}, default="random"
  1348. The strategy used to choose the split at each node. Supported
  1349. strategies are "best" to choose the best split and "random" to choose
  1350. the best random split.
  1351. max_depth : int, default=None
  1352. The maximum depth of the tree. If None, then nodes are expanded until
  1353. all leaves are pure or until all leaves contain less than
  1354. min_samples_split samples.
  1355. min_samples_split : int or float, default=2
  1356. The minimum number of samples required to split an internal node:
  1357. - If int, then consider `min_samples_split` as the minimum number.
  1358. - If float, then `min_samples_split` is a fraction and
  1359. `ceil(min_samples_split * n_samples)` are the minimum
  1360. number of samples for each split.
  1361. .. versionchanged:: 0.18
  1362. Added float values for fractions.
  1363. min_samples_leaf : int or float, default=1
  1364. The minimum number of samples required to be at a leaf node.
  1365. A split point at any depth will only be considered if it leaves at
  1366. least ``min_samples_leaf`` training samples in each of the left and
  1367. right branches. This may have the effect of smoothing the model,
  1368. especially in regression.
  1369. - If int, then consider `min_samples_leaf` as the minimum number.
  1370. - If float, then `min_samples_leaf` is a fraction and
  1371. `ceil(min_samples_leaf * n_samples)` are the minimum
  1372. number of samples for each node.
  1373. .. versionchanged:: 0.18
  1374. Added float values for fractions.
  1375. min_weight_fraction_leaf : float, default=0.0
  1376. The minimum weighted fraction of the sum total of weights (of all
  1377. the input samples) required to be at a leaf node. Samples have
  1378. equal weight when sample_weight is not provided.
  1379. max_features : int, float, {"auto", "sqrt", "log2"} or None, default=1.0
  1380. The number of features to consider when looking for the best split:
  1381. - If int, then consider `max_features` features at each split.
  1382. - If float, then `max_features` is a fraction and
  1383. `max(1, int(max_features * n_features_in_))` features are considered at each
  1384. split.
  1385. - If "sqrt", then `max_features=sqrt(n_features)`.
  1386. - If "log2", then `max_features=log2(n_features)`.
  1387. - If None, then `max_features=n_features`.
  1388. .. versionchanged:: 1.1
  1389. The default of `max_features` changed from `"auto"` to `1.0`.
  1390. Note: the search for a split does not stop until at least one
  1391. valid partition of the node samples is found, even if it requires to
  1392. effectively inspect more than ``max_features`` features.
  1393. random_state : int, RandomState instance or None, default=None
  1394. Used to pick randomly the `max_features` used at each split.
  1395. See :term:`Glossary <random_state>` for details.
  1396. min_impurity_decrease : float, default=0.0
  1397. A node will be split if this split induces a decrease of the impurity
  1398. greater than or equal to this value.
  1399. The weighted impurity decrease equation is the following::
  1400. N_t / N * (impurity - N_t_R / N_t * right_impurity
  1401. - N_t_L / N_t * left_impurity)
  1402. where ``N`` is the total number of samples, ``N_t`` is the number of
  1403. samples at the current node, ``N_t_L`` is the number of samples in the
  1404. left child, and ``N_t_R`` is the number of samples in the right child.
  1405. ``N``, ``N_t``, ``N_t_R`` and ``N_t_L`` all refer to the weighted sum,
  1406. if ``sample_weight`` is passed.
  1407. .. versionadded:: 0.19
  1408. max_leaf_nodes : int, default=None
  1409. Grow a tree with ``max_leaf_nodes`` in best-first fashion.
  1410. Best nodes are defined as relative reduction in impurity.
  1411. If None then unlimited number of leaf nodes.
  1412. ccp_alpha : non-negative float, default=0.0
  1413. Complexity parameter used for Minimal Cost-Complexity Pruning. The
  1414. subtree with the largest cost complexity that is smaller than
  1415. ``ccp_alpha`` will be chosen. By default, no pruning is performed. See
  1416. :ref:`minimal_cost_complexity_pruning` for details.
  1417. .. versionadded:: 0.22
  1418. Attributes
  1419. ----------
  1420. max_features_ : int
  1421. The inferred value of max_features.
  1422. n_features_in_ : int
  1423. Number of features seen during :term:`fit`.
  1424. .. versionadded:: 0.24
  1425. feature_names_in_ : ndarray of shape (`n_features_in_`,)
  1426. Names of features seen during :term:`fit`. Defined only when `X`
  1427. has feature names that are all strings.
  1428. .. versionadded:: 1.0
  1429. feature_importances_ : ndarray of shape (n_features,)
  1430. Return impurity-based feature importances (the higher, the more
  1431. important the feature).
  1432. Warning: impurity-based feature importances can be misleading for
  1433. high cardinality features (many unique values). See
  1434. :func:`sklearn.inspection.permutation_importance` as an alternative.
  1435. n_outputs_ : int
  1436. The number of outputs when ``fit`` is performed.
  1437. tree_ : Tree instance
  1438. The underlying Tree object. Please refer to
  1439. ``help(sklearn.tree._tree.Tree)`` for attributes of Tree object and
  1440. :ref:`sphx_glr_auto_examples_tree_plot_unveil_tree_structure.py`
  1441. for basic usage of these attributes.
  1442. See Also
  1443. --------
  1444. ExtraTreeClassifier : An extremely randomized tree classifier.
  1445. sklearn.ensemble.ExtraTreesClassifier : An extra-trees classifier.
  1446. sklearn.ensemble.ExtraTreesRegressor : An extra-trees regressor.
  1447. Notes
  1448. -----
  1449. The default values for the parameters controlling the size of the trees
  1450. (e.g. ``max_depth``, ``min_samples_leaf``, etc.) lead to fully grown and
  1451. unpruned trees which can potentially be very large on some data sets. To
  1452. reduce memory consumption, the complexity and size of the trees should be
  1453. controlled by setting those parameter values.
  1454. References
  1455. ----------
  1456. .. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
  1457. Machine Learning, 63(1), 3-42, 2006.
  1458. Examples
  1459. --------
  1460. >>> from sklearn.datasets import load_diabetes
  1461. >>> from sklearn.model_selection import train_test_split
  1462. >>> from sklearn.ensemble import BaggingRegressor
  1463. >>> from sklearn.tree import ExtraTreeRegressor
  1464. >>> X, y = load_diabetes(return_X_y=True)
  1465. >>> X_train, X_test, y_train, y_test = train_test_split(
  1466. ... X, y, random_state=0)
  1467. >>> extra_tree = ExtraTreeRegressor(random_state=0)
  1468. >>> reg = BaggingRegressor(extra_tree, random_state=0).fit(
  1469. ... X_train, y_train)
  1470. >>> reg.score(X_test, y_test)
  1471. 0.33...
  1472. """
  1473. def __init__(
  1474. self,
  1475. *,
  1476. criterion="squared_error",
  1477. splitter="random",
  1478. max_depth=None,
  1479. min_samples_split=2,
  1480. min_samples_leaf=1,
  1481. min_weight_fraction_leaf=0.0,
  1482. max_features=1.0,
  1483. random_state=None,
  1484. min_impurity_decrease=0.0,
  1485. max_leaf_nodes=None,
  1486. ccp_alpha=0.0,
  1487. ):
  1488. super().__init__(
  1489. criterion=criterion,
  1490. splitter=splitter,
  1491. max_depth=max_depth,
  1492. min_samples_split=min_samples_split,
  1493. min_samples_leaf=min_samples_leaf,
  1494. min_weight_fraction_leaf=min_weight_fraction_leaf,
  1495. max_features=max_features,
  1496. max_leaf_nodes=max_leaf_nodes,
  1497. min_impurity_decrease=min_impurity_decrease,
  1498. random_state=random_state,
  1499. ccp_alpha=ccp_alpha,
  1500. )