test_forest.py 57 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811
  1. """
  2. Testing for the forest module (sklearn.ensemble.forest).
  3. """
  4. # Authors: Gilles Louppe,
  5. # Brian Holt,
  6. # Andreas Mueller,
  7. # Arnaud Joly
  8. # License: BSD 3 clause
  9. import itertools
  10. import math
  11. import pickle
  12. from collections import defaultdict
  13. from functools import partial
  14. from itertools import combinations, product
  15. from typing import Any, Dict
  16. from unittest.mock import patch
  17. import joblib
  18. import numpy as np
  19. import pytest
  20. from scipy.sparse import coo_matrix, csc_matrix, csr_matrix
  21. from scipy.special import comb
  22. import sklearn
  23. from sklearn import datasets
  24. from sklearn.datasets import make_classification
  25. from sklearn.decomposition import TruncatedSVD
  26. from sklearn.dummy import DummyRegressor
  27. from sklearn.ensemble import (
  28. ExtraTreesClassifier,
  29. ExtraTreesRegressor,
  30. RandomForestClassifier,
  31. RandomForestRegressor,
  32. RandomTreesEmbedding,
  33. )
  34. from sklearn.exceptions import NotFittedError
  35. from sklearn.metrics import (
  36. explained_variance_score,
  37. f1_score,
  38. mean_poisson_deviance,
  39. mean_squared_error,
  40. )
  41. from sklearn.model_selection import GridSearchCV, cross_val_score, train_test_split
  42. from sklearn.svm import LinearSVC
  43. from sklearn.tree._classes import SPARSE_SPLITTERS
  44. from sklearn.utils._testing import (
  45. _convert_container,
  46. assert_almost_equal,
  47. assert_array_almost_equal,
  48. assert_array_equal,
  49. ignore_warnings,
  50. skip_if_no_parallel,
  51. )
  52. from sklearn.utils.parallel import Parallel
  53. from sklearn.utils.validation import check_random_state
  54. # toy sample
  55. X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
  56. y = [-1, -1, -1, 1, 1, 1]
  57. T = [[-1, -1], [2, 2], [3, 2]]
  58. true_result = [-1, 1, 1]
  59. # Larger classification sample used for testing feature importances
  60. X_large, y_large = datasets.make_classification(
  61. n_samples=500,
  62. n_features=10,
  63. n_informative=3,
  64. n_redundant=0,
  65. n_repeated=0,
  66. shuffle=False,
  67. random_state=0,
  68. )
  69. # also load the iris dataset
  70. # and randomly permute it
  71. iris = datasets.load_iris()
  72. rng = check_random_state(0)
  73. perm = rng.permutation(iris.target.size)
  74. iris.data = iris.data[perm]
  75. iris.target = iris.target[perm]
  76. # Make regression dataset
  77. X_reg, y_reg = datasets.make_regression(n_samples=500, n_features=10, random_state=1)
  78. # also make a hastie_10_2 dataset
  79. hastie_X, hastie_y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
  80. hastie_X = hastie_X.astype(np.float32)
  81. # Get the default backend in joblib to test parallelism and interaction with
  82. # different backends
  83. DEFAULT_JOBLIB_BACKEND = joblib.parallel.get_active_backend()[0].__class__
  84. FOREST_CLASSIFIERS = {
  85. "ExtraTreesClassifier": ExtraTreesClassifier,
  86. "RandomForestClassifier": RandomForestClassifier,
  87. }
  88. FOREST_REGRESSORS = {
  89. "ExtraTreesRegressor": ExtraTreesRegressor,
  90. "RandomForestRegressor": RandomForestRegressor,
  91. }
  92. FOREST_TRANSFORMERS = {
  93. "RandomTreesEmbedding": RandomTreesEmbedding,
  94. }
  95. FOREST_ESTIMATORS: Dict[str, Any] = dict()
  96. FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
  97. FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
  98. FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
  99. FOREST_CLASSIFIERS_REGRESSORS: Dict[str, Any] = FOREST_CLASSIFIERS.copy()
  100. FOREST_CLASSIFIERS_REGRESSORS.update(FOREST_REGRESSORS)
  101. def check_classification_toy(name):
  102. """Check classification on a toy dataset."""
  103. ForestClassifier = FOREST_CLASSIFIERS[name]
  104. clf = ForestClassifier(n_estimators=10, random_state=1)
  105. clf.fit(X, y)
  106. assert_array_equal(clf.predict(T), true_result)
  107. assert 10 == len(clf)
  108. clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
  109. clf.fit(X, y)
  110. assert_array_equal(clf.predict(T), true_result)
  111. assert 10 == len(clf)
  112. # also test apply
  113. leaf_indices = clf.apply(X)
  114. assert leaf_indices.shape == (len(X), clf.n_estimators)
  115. @pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
  116. def test_classification_toy(name):
  117. check_classification_toy(name)
  118. def check_iris_criterion(name, criterion):
  119. # Check consistency on dataset iris.
  120. ForestClassifier = FOREST_CLASSIFIERS[name]
  121. clf = ForestClassifier(n_estimators=10, criterion=criterion, random_state=1)
  122. clf.fit(iris.data, iris.target)
  123. score = clf.score(iris.data, iris.target)
  124. assert score > 0.9, "Failed with criterion %s and score = %f" % (criterion, score)
  125. clf = ForestClassifier(
  126. n_estimators=10, criterion=criterion, max_features=2, random_state=1
  127. )
  128. clf.fit(iris.data, iris.target)
  129. score = clf.score(iris.data, iris.target)
  130. assert score > 0.5, "Failed with criterion %s and score = %f" % (criterion, score)
  131. @pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
  132. @pytest.mark.parametrize("criterion", ("gini", "log_loss"))
  133. def test_iris(name, criterion):
  134. check_iris_criterion(name, criterion)
  135. def check_regression_criterion(name, criterion):
  136. # Check consistency on regression dataset.
  137. ForestRegressor = FOREST_REGRESSORS[name]
  138. reg = ForestRegressor(n_estimators=5, criterion=criterion, random_state=1)
  139. reg.fit(X_reg, y_reg)
  140. score = reg.score(X_reg, y_reg)
  141. assert (
  142. score > 0.93
  143. ), "Failed with max_features=None, criterion %s and score = %f" % (
  144. criterion,
  145. score,
  146. )
  147. reg = ForestRegressor(
  148. n_estimators=5, criterion=criterion, max_features=6, random_state=1
  149. )
  150. reg.fit(X_reg, y_reg)
  151. score = reg.score(X_reg, y_reg)
  152. assert score > 0.92, "Failed with max_features=6, criterion %s and score = %f" % (
  153. criterion,
  154. score,
  155. )
  156. @pytest.mark.parametrize("name", FOREST_REGRESSORS)
  157. @pytest.mark.parametrize(
  158. "criterion", ("squared_error", "absolute_error", "friedman_mse")
  159. )
  160. def test_regression(name, criterion):
  161. check_regression_criterion(name, criterion)
  162. def test_poisson_vs_mse():
  163. """Test that random forest with poisson criterion performs better than
  164. mse for a poisson target.
  165. There is a similar test for DecisionTreeRegressor.
  166. """
  167. rng = np.random.RandomState(42)
  168. n_train, n_test, n_features = 500, 500, 10
  169. X = datasets.make_low_rank_matrix(
  170. n_samples=n_train + n_test, n_features=n_features, random_state=rng
  171. )
  172. # We create a log-linear Poisson model and downscale coef as it will get
  173. # exponentiated.
  174. coef = rng.uniform(low=-2, high=2, size=n_features) / np.max(X, axis=0)
  175. y = rng.poisson(lam=np.exp(X @ coef))
  176. X_train, X_test, y_train, y_test = train_test_split(
  177. X, y, test_size=n_test, random_state=rng
  178. )
  179. # We prevent some overfitting by setting min_samples_split=10.
  180. forest_poi = RandomForestRegressor(
  181. criterion="poisson", min_samples_leaf=10, max_features="sqrt", random_state=rng
  182. )
  183. forest_mse = RandomForestRegressor(
  184. criterion="squared_error",
  185. min_samples_leaf=10,
  186. max_features="sqrt",
  187. random_state=rng,
  188. )
  189. forest_poi.fit(X_train, y_train)
  190. forest_mse.fit(X_train, y_train)
  191. dummy = DummyRegressor(strategy="mean").fit(X_train, y_train)
  192. for X, y, data_name in [(X_train, y_train, "train"), (X_test, y_test, "test")]:
  193. metric_poi = mean_poisson_deviance(y, forest_poi.predict(X))
  194. # squared_error forest might produce non-positive predictions => clip
  195. # If y = 0 for those, the poisson deviance gets too good.
  196. # If we drew more samples, we would eventually get y > 0 and the
  197. # poisson deviance would explode, i.e. be undefined. Therefore, we do
  198. # not clip to a tiny value like 1e-15, but to 1e-6. This acts like a
  199. # small penalty to the non-positive predictions.
  200. metric_mse = mean_poisson_deviance(
  201. y, np.clip(forest_mse.predict(X), 1e-6, None)
  202. )
  203. metric_dummy = mean_poisson_deviance(y, dummy.predict(X))
  204. # As squared_error might correctly predict 0 in train set, its train
  205. # score can be better than Poisson. This is no longer the case for the
  206. # test set. But keep the above comment for clipping in mind.
  207. if data_name == "test":
  208. assert metric_poi < metric_mse
  209. assert metric_poi < 0.8 * metric_dummy
  210. @pytest.mark.parametrize("criterion", ("poisson", "squared_error"))
  211. def test_balance_property_random_forest(criterion):
  212. """ "Test that sum(y_pred)==sum(y_true) on the training set."""
  213. rng = np.random.RandomState(42)
  214. n_train, n_test, n_features = 500, 500, 10
  215. X = datasets.make_low_rank_matrix(
  216. n_samples=n_train + n_test, n_features=n_features, random_state=rng
  217. )
  218. coef = rng.uniform(low=-2, high=2, size=n_features) / np.max(X, axis=0)
  219. y = rng.poisson(lam=np.exp(X @ coef))
  220. reg = RandomForestRegressor(
  221. criterion=criterion, n_estimators=10, bootstrap=False, random_state=rng
  222. )
  223. reg.fit(X, y)
  224. assert np.sum(reg.predict(X)) == pytest.approx(np.sum(y))
  225. def check_regressor_attributes(name):
  226. # Regression models should not have a classes_ attribute.
  227. r = FOREST_REGRESSORS[name](random_state=0)
  228. assert not hasattr(r, "classes_")
  229. assert not hasattr(r, "n_classes_")
  230. r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
  231. assert not hasattr(r, "classes_")
  232. assert not hasattr(r, "n_classes_")
  233. @pytest.mark.parametrize("name", FOREST_REGRESSORS)
  234. def test_regressor_attributes(name):
  235. check_regressor_attributes(name)
  236. def check_probability(name):
  237. # Predict probabilities.
  238. ForestClassifier = FOREST_CLASSIFIERS[name]
  239. with np.errstate(divide="ignore"):
  240. clf = ForestClassifier(
  241. n_estimators=10, random_state=1, max_features=1, max_depth=1
  242. )
  243. clf.fit(iris.data, iris.target)
  244. assert_array_almost_equal(
  245. np.sum(clf.predict_proba(iris.data), axis=1), np.ones(iris.data.shape[0])
  246. )
  247. assert_array_almost_equal(
  248. clf.predict_proba(iris.data), np.exp(clf.predict_log_proba(iris.data))
  249. )
  250. @pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
  251. def test_probability(name):
  252. check_probability(name)
  253. def check_importances(name, criterion, dtype, tolerance):
  254. # cast as dype
  255. X = X_large.astype(dtype, copy=False)
  256. y = y_large.astype(dtype, copy=False)
  257. ForestEstimator = FOREST_ESTIMATORS[name]
  258. est = ForestEstimator(n_estimators=10, criterion=criterion, random_state=0)
  259. est.fit(X, y)
  260. importances = est.feature_importances_
  261. # The forest estimator can detect that only the first 3 features of the
  262. # dataset are informative:
  263. n_important = np.sum(importances > 0.1)
  264. assert importances.shape[0] == 10
  265. assert n_important == 3
  266. assert np.all(importances[:3] > 0.1)
  267. # Check with parallel
  268. importances = est.feature_importances_
  269. est.set_params(n_jobs=2)
  270. importances_parallel = est.feature_importances_
  271. assert_array_almost_equal(importances, importances_parallel)
  272. # Check with sample weights
  273. sample_weight = check_random_state(0).randint(1, 10, len(X))
  274. est = ForestEstimator(n_estimators=10, random_state=0, criterion=criterion)
  275. est.fit(X, y, sample_weight=sample_weight)
  276. importances = est.feature_importances_
  277. assert np.all(importances >= 0.0)
  278. for scale in [0.5, 100]:
  279. est = ForestEstimator(n_estimators=10, random_state=0, criterion=criterion)
  280. est.fit(X, y, sample_weight=scale * sample_weight)
  281. importances_bis = est.feature_importances_
  282. assert np.abs(importances - importances_bis).mean() < tolerance
  283. @pytest.mark.parametrize("dtype", (np.float64, np.float32))
  284. @pytest.mark.parametrize(
  285. "name, criterion",
  286. itertools.chain(
  287. product(FOREST_CLASSIFIERS, ["gini", "log_loss"]),
  288. product(FOREST_REGRESSORS, ["squared_error", "friedman_mse", "absolute_error"]),
  289. ),
  290. )
  291. def test_importances(dtype, name, criterion):
  292. tolerance = 0.01
  293. if name in FOREST_REGRESSORS and criterion == "absolute_error":
  294. tolerance = 0.05
  295. check_importances(name, criterion, dtype, tolerance)
  296. def test_importances_asymptotic():
  297. # Check whether variable importances of totally randomized trees
  298. # converge towards their theoretical values (See Louppe et al,
  299. # Understanding variable importances in forests of randomized trees, 2013).
  300. def binomial(k, n):
  301. return 0 if k < 0 or k > n else comb(int(n), int(k), exact=True)
  302. def entropy(samples):
  303. n_samples = len(samples)
  304. entropy = 0.0
  305. for count in np.bincount(samples):
  306. p = 1.0 * count / n_samples
  307. if p > 0:
  308. entropy -= p * np.log2(p)
  309. return entropy
  310. def mdi_importance(X_m, X, y):
  311. n_samples, n_features = X.shape
  312. features = list(range(n_features))
  313. features.pop(X_m)
  314. values = [np.unique(X[:, i]) for i in range(n_features)]
  315. imp = 0.0
  316. for k in range(n_features):
  317. # Weight of each B of size k
  318. coef = 1.0 / (binomial(k, n_features) * (n_features - k))
  319. # For all B of size k
  320. for B in combinations(features, k):
  321. # For all values B=b
  322. for b in product(*[values[B[j]] for j in range(k)]):
  323. mask_b = np.ones(n_samples, dtype=bool)
  324. for j in range(k):
  325. mask_b &= X[:, B[j]] == b[j]
  326. X_, y_ = X[mask_b, :], y[mask_b]
  327. n_samples_b = len(X_)
  328. if n_samples_b > 0:
  329. children = []
  330. for xi in values[X_m]:
  331. mask_xi = X_[:, X_m] == xi
  332. children.append(y_[mask_xi])
  333. imp += (
  334. coef
  335. * (1.0 * n_samples_b / n_samples) # P(B=b)
  336. * (
  337. entropy(y_)
  338. - sum(
  339. [
  340. entropy(c) * len(c) / n_samples_b
  341. for c in children
  342. ]
  343. )
  344. )
  345. )
  346. return imp
  347. data = np.array(
  348. [
  349. [0, 0, 1, 0, 0, 1, 0, 1],
  350. [1, 0, 1, 1, 1, 0, 1, 2],
  351. [1, 0, 1, 1, 0, 1, 1, 3],
  352. [0, 1, 1, 1, 0, 1, 0, 4],
  353. [1, 1, 0, 1, 0, 1, 1, 5],
  354. [1, 1, 0, 1, 1, 1, 1, 6],
  355. [1, 0, 1, 0, 0, 1, 0, 7],
  356. [1, 1, 1, 1, 1, 1, 1, 8],
  357. [1, 1, 1, 1, 0, 1, 1, 9],
  358. [1, 1, 1, 0, 1, 1, 1, 0],
  359. ]
  360. )
  361. X, y = np.array(data[:, :7], dtype=bool), data[:, 7]
  362. n_features = X.shape[1]
  363. # Compute true importances
  364. true_importances = np.zeros(n_features)
  365. for i in range(n_features):
  366. true_importances[i] = mdi_importance(i, X, y)
  367. # Estimate importances with totally randomized trees
  368. clf = ExtraTreesClassifier(
  369. n_estimators=500, max_features=1, criterion="log_loss", random_state=0
  370. ).fit(X, y)
  371. importances = (
  372. sum(
  373. tree.tree_.compute_feature_importances(normalize=False)
  374. for tree in clf.estimators_
  375. )
  376. / clf.n_estimators
  377. )
  378. # Check correctness
  379. assert_almost_equal(entropy(y), sum(importances))
  380. assert np.abs(true_importances - importances).mean() < 0.01
  381. @pytest.mark.parametrize("name", FOREST_ESTIMATORS)
  382. def test_unfitted_feature_importances(name):
  383. err_msg = (
  384. "This {} instance is not fitted yet. Call 'fit' with "
  385. "appropriate arguments before using this estimator.".format(name)
  386. )
  387. with pytest.raises(NotFittedError, match=err_msg):
  388. getattr(FOREST_ESTIMATORS[name](), "feature_importances_")
  389. @pytest.mark.parametrize("ForestClassifier", FOREST_CLASSIFIERS.values())
  390. @pytest.mark.parametrize("X_type", ["array", "sparse_csr", "sparse_csc"])
  391. @pytest.mark.parametrize(
  392. "X, y, lower_bound_accuracy",
  393. [
  394. (
  395. *datasets.make_classification(n_samples=300, n_classes=2, random_state=0),
  396. 0.9,
  397. ),
  398. (
  399. *datasets.make_classification(
  400. n_samples=1000, n_classes=3, n_informative=6, random_state=0
  401. ),
  402. 0.65,
  403. ),
  404. (
  405. iris.data,
  406. iris.target * 2 + 1,
  407. 0.65,
  408. ),
  409. (
  410. *datasets.make_multilabel_classification(n_samples=300, random_state=0),
  411. 0.18,
  412. ),
  413. ],
  414. )
  415. @pytest.mark.parametrize("oob_score", [True, partial(f1_score, average="micro")])
  416. def test_forest_classifier_oob(
  417. ForestClassifier, X, y, X_type, lower_bound_accuracy, oob_score
  418. ):
  419. """Check that OOB score is close to score on a test set."""
  420. X = _convert_container(X, constructor_name=X_type)
  421. X_train, X_test, y_train, y_test = train_test_split(
  422. X,
  423. y,
  424. test_size=0.5,
  425. random_state=0,
  426. )
  427. classifier = ForestClassifier(
  428. n_estimators=40,
  429. bootstrap=True,
  430. oob_score=oob_score,
  431. random_state=0,
  432. )
  433. assert not hasattr(classifier, "oob_score_")
  434. assert not hasattr(classifier, "oob_decision_function_")
  435. classifier.fit(X_train, y_train)
  436. if callable(oob_score):
  437. test_score = oob_score(y_test, classifier.predict(X_test))
  438. else:
  439. test_score = classifier.score(X_test, y_test)
  440. assert classifier.oob_score_ >= lower_bound_accuracy
  441. assert abs(test_score - classifier.oob_score_) <= 0.1
  442. assert hasattr(classifier, "oob_score_")
  443. assert not hasattr(classifier, "oob_prediction_")
  444. assert hasattr(classifier, "oob_decision_function_")
  445. if y.ndim == 1:
  446. expected_shape = (X_train.shape[0], len(set(y)))
  447. else:
  448. expected_shape = (X_train.shape[0], len(set(y[:, 0])), y.shape[1])
  449. assert classifier.oob_decision_function_.shape == expected_shape
  450. @pytest.mark.parametrize("ForestRegressor", FOREST_REGRESSORS.values())
  451. @pytest.mark.parametrize("X_type", ["array", "sparse_csr", "sparse_csc"])
  452. @pytest.mark.parametrize(
  453. "X, y, lower_bound_r2",
  454. [
  455. (
  456. *datasets.make_regression(
  457. n_samples=500, n_features=10, n_targets=1, random_state=0
  458. ),
  459. 0.7,
  460. ),
  461. (
  462. *datasets.make_regression(
  463. n_samples=500, n_features=10, n_targets=2, random_state=0
  464. ),
  465. 0.55,
  466. ),
  467. ],
  468. )
  469. @pytest.mark.parametrize("oob_score", [True, explained_variance_score])
  470. def test_forest_regressor_oob(ForestRegressor, X, y, X_type, lower_bound_r2, oob_score):
  471. """Check that forest-based regressor provide an OOB score close to the
  472. score on a test set."""
  473. X = _convert_container(X, constructor_name=X_type)
  474. X_train, X_test, y_train, y_test = train_test_split(
  475. X,
  476. y,
  477. test_size=0.5,
  478. random_state=0,
  479. )
  480. regressor = ForestRegressor(
  481. n_estimators=50,
  482. bootstrap=True,
  483. oob_score=oob_score,
  484. random_state=0,
  485. )
  486. assert not hasattr(regressor, "oob_score_")
  487. assert not hasattr(regressor, "oob_prediction_")
  488. regressor.fit(X_train, y_train)
  489. if callable(oob_score):
  490. test_score = oob_score(y_test, regressor.predict(X_test))
  491. else:
  492. test_score = regressor.score(X_test, y_test)
  493. assert regressor.oob_score_ >= lower_bound_r2
  494. assert abs(test_score - regressor.oob_score_) <= 0.1
  495. assert hasattr(regressor, "oob_score_")
  496. assert hasattr(regressor, "oob_prediction_")
  497. assert not hasattr(regressor, "oob_decision_function_")
  498. if y.ndim == 1:
  499. expected_shape = (X_train.shape[0],)
  500. else:
  501. expected_shape = (X_train.shape[0], y.ndim)
  502. assert regressor.oob_prediction_.shape == expected_shape
  503. @pytest.mark.parametrize("ForestEstimator", FOREST_CLASSIFIERS_REGRESSORS.values())
  504. def test_forest_oob_warning(ForestEstimator):
  505. """Check that a warning is raised when not enough estimator and the OOB
  506. estimates will be inaccurate."""
  507. estimator = ForestEstimator(
  508. n_estimators=1,
  509. oob_score=True,
  510. bootstrap=True,
  511. random_state=0,
  512. )
  513. with pytest.warns(UserWarning, match="Some inputs do not have OOB scores"):
  514. estimator.fit(iris.data, iris.target)
  515. @pytest.mark.parametrize("ForestEstimator", FOREST_CLASSIFIERS_REGRESSORS.values())
  516. @pytest.mark.parametrize(
  517. "X, y, params, err_msg",
  518. [
  519. (
  520. iris.data,
  521. iris.target,
  522. {"oob_score": True, "bootstrap": False},
  523. "Out of bag estimation only available if bootstrap=True",
  524. ),
  525. (
  526. iris.data,
  527. rng.randint(low=0, high=5, size=(iris.data.shape[0], 2)),
  528. {"oob_score": True, "bootstrap": True},
  529. "The type of target cannot be used to compute OOB estimates",
  530. ),
  531. ],
  532. )
  533. def test_forest_oob_error(ForestEstimator, X, y, params, err_msg):
  534. estimator = ForestEstimator(**params)
  535. with pytest.raises(ValueError, match=err_msg):
  536. estimator.fit(X, y)
  537. @pytest.mark.parametrize("oob_score", [True, False])
  538. def test_random_trees_embedding_raise_error_oob(oob_score):
  539. with pytest.raises(TypeError, match="got an unexpected keyword argument"):
  540. RandomTreesEmbedding(oob_score=oob_score)
  541. with pytest.raises(NotImplementedError, match="OOB score not supported"):
  542. RandomTreesEmbedding()._set_oob_score_and_attributes(X, y)
  543. def check_gridsearch(name):
  544. forest = FOREST_CLASSIFIERS[name]()
  545. clf = GridSearchCV(forest, {"n_estimators": (1, 2), "max_depth": (1, 2)})
  546. clf.fit(iris.data, iris.target)
  547. @pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
  548. def test_gridsearch(name):
  549. # Check that base trees can be grid-searched.
  550. check_gridsearch(name)
  551. def check_parallel(name, X, y):
  552. """Check parallel computations in classification"""
  553. ForestEstimator = FOREST_ESTIMATORS[name]
  554. forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
  555. forest.fit(X, y)
  556. assert len(forest) == 10
  557. forest.set_params(n_jobs=1)
  558. y1 = forest.predict(X)
  559. forest.set_params(n_jobs=2)
  560. y2 = forest.predict(X)
  561. assert_array_almost_equal(y1, y2, 3)
  562. @pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS)
  563. def test_parallel(name):
  564. if name in FOREST_CLASSIFIERS:
  565. X = iris.data
  566. y = iris.target
  567. elif name in FOREST_REGRESSORS:
  568. X = X_reg
  569. y = y_reg
  570. check_parallel(name, X, y)
  571. def check_pickle(name, X, y):
  572. # Check pickability.
  573. ForestEstimator = FOREST_ESTIMATORS[name]
  574. obj = ForestEstimator(random_state=0)
  575. obj.fit(X, y)
  576. score = obj.score(X, y)
  577. pickle_object = pickle.dumps(obj)
  578. obj2 = pickle.loads(pickle_object)
  579. assert type(obj2) == obj.__class__
  580. score2 = obj2.score(X, y)
  581. assert score == score2
  582. @pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS)
  583. def test_pickle(name):
  584. if name in FOREST_CLASSIFIERS:
  585. X = iris.data
  586. y = iris.target
  587. elif name in FOREST_REGRESSORS:
  588. X = X_reg
  589. y = y_reg
  590. check_pickle(name, X[::2], y[::2])
  591. def check_multioutput(name):
  592. # Check estimators on multi-output problems.
  593. X_train = [
  594. [-2, -1],
  595. [-1, -1],
  596. [-1, -2],
  597. [1, 1],
  598. [1, 2],
  599. [2, 1],
  600. [-2, 1],
  601. [-1, 1],
  602. [-1, 2],
  603. [2, -1],
  604. [1, -1],
  605. [1, -2],
  606. ]
  607. y_train = [
  608. [-1, 0],
  609. [-1, 0],
  610. [-1, 0],
  611. [1, 1],
  612. [1, 1],
  613. [1, 1],
  614. [-1, 2],
  615. [-1, 2],
  616. [-1, 2],
  617. [1, 3],
  618. [1, 3],
  619. [1, 3],
  620. ]
  621. X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
  622. y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
  623. est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
  624. y_pred = est.fit(X_train, y_train).predict(X_test)
  625. assert_array_almost_equal(y_pred, y_test)
  626. if name in FOREST_CLASSIFIERS:
  627. with np.errstate(divide="ignore"):
  628. proba = est.predict_proba(X_test)
  629. assert len(proba) == 2
  630. assert proba[0].shape == (4, 2)
  631. assert proba[1].shape == (4, 4)
  632. log_proba = est.predict_log_proba(X_test)
  633. assert len(log_proba) == 2
  634. assert log_proba[0].shape == (4, 2)
  635. assert log_proba[1].shape == (4, 4)
  636. @pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS)
  637. def test_multioutput(name):
  638. check_multioutput(name)
  639. @pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
  640. def test_multioutput_string(name):
  641. # Check estimators on multi-output problems with string outputs.
  642. X_train = [
  643. [-2, -1],
  644. [-1, -1],
  645. [-1, -2],
  646. [1, 1],
  647. [1, 2],
  648. [2, 1],
  649. [-2, 1],
  650. [-1, 1],
  651. [-1, 2],
  652. [2, -1],
  653. [1, -1],
  654. [1, -2],
  655. ]
  656. y_train = [
  657. ["red", "blue"],
  658. ["red", "blue"],
  659. ["red", "blue"],
  660. ["green", "green"],
  661. ["green", "green"],
  662. ["green", "green"],
  663. ["red", "purple"],
  664. ["red", "purple"],
  665. ["red", "purple"],
  666. ["green", "yellow"],
  667. ["green", "yellow"],
  668. ["green", "yellow"],
  669. ]
  670. X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
  671. y_test = [
  672. ["red", "blue"],
  673. ["green", "green"],
  674. ["red", "purple"],
  675. ["green", "yellow"],
  676. ]
  677. est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
  678. y_pred = est.fit(X_train, y_train).predict(X_test)
  679. assert_array_equal(y_pred, y_test)
  680. with np.errstate(divide="ignore"):
  681. proba = est.predict_proba(X_test)
  682. assert len(proba) == 2
  683. assert proba[0].shape == (4, 2)
  684. assert proba[1].shape == (4, 4)
  685. log_proba = est.predict_log_proba(X_test)
  686. assert len(log_proba) == 2
  687. assert log_proba[0].shape == (4, 2)
  688. assert log_proba[1].shape == (4, 4)
  689. def check_classes_shape(name):
  690. # Test that n_classes_ and classes_ have proper shape.
  691. ForestClassifier = FOREST_CLASSIFIERS[name]
  692. # Classification, single output
  693. clf = ForestClassifier(random_state=0).fit(X, y)
  694. assert clf.n_classes_ == 2
  695. assert_array_equal(clf.classes_, [-1, 1])
  696. # Classification, multi-output
  697. _y = np.vstack((y, np.array(y) * 2)).T
  698. clf = ForestClassifier(random_state=0).fit(X, _y)
  699. assert_array_equal(clf.n_classes_, [2, 2])
  700. assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
  701. @pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
  702. def test_classes_shape(name):
  703. check_classes_shape(name)
  704. def test_random_trees_dense_type():
  705. # Test that the `sparse_output` parameter of RandomTreesEmbedding
  706. # works by returning a dense array.
  707. # Create the RTE with sparse=False
  708. hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
  709. X, y = datasets.make_circles(factor=0.5)
  710. X_transformed = hasher.fit_transform(X)
  711. # Assert that type is ndarray, not scipy.sparse.csr_matrix
  712. assert type(X_transformed) == np.ndarray
  713. def test_random_trees_dense_equal():
  714. # Test that the `sparse_output` parameter of RandomTreesEmbedding
  715. # works by returning the same array for both argument values.
  716. # Create the RTEs
  717. hasher_dense = RandomTreesEmbedding(
  718. n_estimators=10, sparse_output=False, random_state=0
  719. )
  720. hasher_sparse = RandomTreesEmbedding(
  721. n_estimators=10, sparse_output=True, random_state=0
  722. )
  723. X, y = datasets.make_circles(factor=0.5)
  724. X_transformed_dense = hasher_dense.fit_transform(X)
  725. X_transformed_sparse = hasher_sparse.fit_transform(X)
  726. # Assert that dense and sparse hashers have same array.
  727. assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
  728. # Ignore warnings from switching to more power iterations in randomized_svd
  729. @ignore_warnings
  730. def test_random_hasher():
  731. # test random forest hashing on circles dataset
  732. # make sure that it is linearly separable.
  733. # even after projected to two SVD dimensions
  734. # Note: Not all random_states produce perfect results.
  735. hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
  736. X, y = datasets.make_circles(factor=0.5)
  737. X_transformed = hasher.fit_transform(X)
  738. # test fit and transform:
  739. hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
  740. assert_array_equal(hasher.fit(X).transform(X).toarray(), X_transformed.toarray())
  741. # one leaf active per data point per forest
  742. assert X_transformed.shape[0] == X.shape[0]
  743. assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
  744. svd = TruncatedSVD(n_components=2)
  745. X_reduced = svd.fit_transform(X_transformed)
  746. linear_clf = LinearSVC()
  747. linear_clf.fit(X_reduced, y)
  748. assert linear_clf.score(X_reduced, y) == 1.0
  749. def test_random_hasher_sparse_data():
  750. X, y = datasets.make_multilabel_classification(random_state=0)
  751. hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
  752. X_transformed = hasher.fit_transform(X)
  753. X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
  754. assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
  755. def test_parallel_train():
  756. rng = check_random_state(12321)
  757. n_samples, n_features = 80, 30
  758. X_train = rng.randn(n_samples, n_features)
  759. y_train = rng.randint(0, 2, n_samples)
  760. clfs = [
  761. RandomForestClassifier(n_estimators=20, n_jobs=n_jobs, random_state=12345).fit(
  762. X_train, y_train
  763. )
  764. for n_jobs in [1, 2, 3, 8, 16, 32]
  765. ]
  766. X_test = rng.randn(n_samples, n_features)
  767. probas = [clf.predict_proba(X_test) for clf in clfs]
  768. for proba1, proba2 in zip(probas, probas[1:]):
  769. assert_array_almost_equal(proba1, proba2)
  770. def test_distribution():
  771. rng = check_random_state(12321)
  772. # Single variable with 4 values
  773. X = rng.randint(0, 4, size=(1000, 1))
  774. y = rng.rand(1000)
  775. n_trees = 500
  776. reg = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
  777. uniques = defaultdict(int)
  778. for tree in reg.estimators_:
  779. tree = "".join(
  780. ("%d,%d/" % (f, int(t)) if f >= 0 else "-")
  781. for f, t in zip(tree.tree_.feature, tree.tree_.threshold)
  782. )
  783. uniques[tree] += 1
  784. uniques = sorted([(1.0 * count / n_trees, tree) for tree, count in uniques.items()])
  785. # On a single variable problem where X_0 has 4 equiprobable values, there
  786. # are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
  787. # them has probability 1/3 while the 4 others have probability 1/6.
  788. assert len(uniques) == 5
  789. assert 0.20 > uniques[0][0] # Rough approximation of 1/6.
  790. assert 0.20 > uniques[1][0]
  791. assert 0.20 > uniques[2][0]
  792. assert 0.20 > uniques[3][0]
  793. assert uniques[4][0] > 0.3
  794. assert uniques[4][1] == "0,1/0,0/--0,2/--"
  795. # Two variables, one with 2 values, one with 3 values
  796. X = np.empty((1000, 2))
  797. X[:, 0] = np.random.randint(0, 2, 1000)
  798. X[:, 1] = np.random.randint(0, 3, 1000)
  799. y = rng.rand(1000)
  800. reg = ExtraTreesRegressor(max_features=1, random_state=1).fit(X, y)
  801. uniques = defaultdict(int)
  802. for tree in reg.estimators_:
  803. tree = "".join(
  804. ("%d,%d/" % (f, int(t)) if f >= 0 else "-")
  805. for f, t in zip(tree.tree_.feature, tree.tree_.threshold)
  806. )
  807. uniques[tree] += 1
  808. uniques = [(count, tree) for tree, count in uniques.items()]
  809. assert len(uniques) == 8
  810. def check_max_leaf_nodes_max_depth(name):
  811. X, y = hastie_X, hastie_y
  812. # Test precedence of max_leaf_nodes over max_depth.
  813. ForestEstimator = FOREST_ESTIMATORS[name]
  814. est = ForestEstimator(
  815. max_depth=1, max_leaf_nodes=4, n_estimators=1, random_state=0
  816. ).fit(X, y)
  817. assert est.estimators_[0].get_depth() == 1
  818. est = ForestEstimator(max_depth=1, n_estimators=1, random_state=0).fit(X, y)
  819. assert est.estimators_[0].get_depth() == 1
  820. @pytest.mark.parametrize("name", FOREST_ESTIMATORS)
  821. def test_max_leaf_nodes_max_depth(name):
  822. check_max_leaf_nodes_max_depth(name)
  823. def check_min_samples_split(name):
  824. X, y = hastie_X, hastie_y
  825. ForestEstimator = FOREST_ESTIMATORS[name]
  826. est = ForestEstimator(min_samples_split=10, n_estimators=1, random_state=0)
  827. est.fit(X, y)
  828. node_idx = est.estimators_[0].tree_.children_left != -1
  829. node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
  830. assert np.min(node_samples) > len(X) * 0.5 - 1, "Failed with {0}".format(name)
  831. est = ForestEstimator(min_samples_split=0.5, n_estimators=1, random_state=0)
  832. est.fit(X, y)
  833. node_idx = est.estimators_[0].tree_.children_left != -1
  834. node_samples = est.estimators_[0].tree_.n_node_samples[node_idx]
  835. assert np.min(node_samples) > len(X) * 0.5 - 1, "Failed with {0}".format(name)
  836. @pytest.mark.parametrize("name", FOREST_ESTIMATORS)
  837. def test_min_samples_split(name):
  838. check_min_samples_split(name)
  839. def check_min_samples_leaf(name):
  840. X, y = hastie_X, hastie_y
  841. # Test if leaves contain more than leaf_count training examples
  842. ForestEstimator = FOREST_ESTIMATORS[name]
  843. est = ForestEstimator(min_samples_leaf=5, n_estimators=1, random_state=0)
  844. est.fit(X, y)
  845. out = est.estimators_[0].tree_.apply(X)
  846. node_counts = np.bincount(out)
  847. # drop inner nodes
  848. leaf_count = node_counts[node_counts != 0]
  849. assert np.min(leaf_count) > 4, "Failed with {0}".format(name)
  850. est = ForestEstimator(min_samples_leaf=0.25, n_estimators=1, random_state=0)
  851. est.fit(X, y)
  852. out = est.estimators_[0].tree_.apply(X)
  853. node_counts = np.bincount(out)
  854. # drop inner nodes
  855. leaf_count = node_counts[node_counts != 0]
  856. assert np.min(leaf_count) > len(X) * 0.25 - 1, "Failed with {0}".format(name)
  857. @pytest.mark.parametrize("name", FOREST_ESTIMATORS)
  858. def test_min_samples_leaf(name):
  859. check_min_samples_leaf(name)
  860. def check_min_weight_fraction_leaf(name):
  861. X, y = hastie_X, hastie_y
  862. # Test if leaves contain at least min_weight_fraction_leaf of the
  863. # training set
  864. ForestEstimator = FOREST_ESTIMATORS[name]
  865. rng = np.random.RandomState(0)
  866. weights = rng.rand(X.shape[0])
  867. total_weight = np.sum(weights)
  868. # test both DepthFirstTreeBuilder and BestFirstTreeBuilder
  869. # by setting max_leaf_nodes
  870. for frac in np.linspace(0, 0.5, 6):
  871. est = ForestEstimator(
  872. min_weight_fraction_leaf=frac, n_estimators=1, random_state=0
  873. )
  874. if "RandomForest" in name:
  875. est.bootstrap = False
  876. est.fit(X, y, sample_weight=weights)
  877. out = est.estimators_[0].tree_.apply(X)
  878. node_weights = np.bincount(out, weights=weights)
  879. # drop inner nodes
  880. leaf_weights = node_weights[node_weights != 0]
  881. assert (
  882. np.min(leaf_weights) >= total_weight * est.min_weight_fraction_leaf
  883. ), "Failed with {0} min_weight_fraction_leaf={1}".format(
  884. name, est.min_weight_fraction_leaf
  885. )
  886. @pytest.mark.parametrize("name", FOREST_ESTIMATORS)
  887. def test_min_weight_fraction_leaf(name):
  888. check_min_weight_fraction_leaf(name)
  889. def check_sparse_input(name, X, X_sparse, y):
  890. ForestEstimator = FOREST_ESTIMATORS[name]
  891. dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
  892. sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
  893. assert_array_almost_equal(sparse.apply(X), dense.apply(X))
  894. if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
  895. assert_array_almost_equal(sparse.predict(X), dense.predict(X))
  896. assert_array_almost_equal(
  897. sparse.feature_importances_, dense.feature_importances_
  898. )
  899. if name in FOREST_CLASSIFIERS:
  900. assert_array_almost_equal(sparse.predict_proba(X), dense.predict_proba(X))
  901. assert_array_almost_equal(
  902. sparse.predict_log_proba(X), dense.predict_log_proba(X)
  903. )
  904. if name in FOREST_TRANSFORMERS:
  905. assert_array_almost_equal(
  906. sparse.transform(X).toarray(), dense.transform(X).toarray()
  907. )
  908. assert_array_almost_equal(
  909. sparse.fit_transform(X).toarray(), dense.fit_transform(X).toarray()
  910. )
  911. @pytest.mark.parametrize("name", FOREST_ESTIMATORS)
  912. @pytest.mark.parametrize("sparse_matrix", (csr_matrix, csc_matrix, coo_matrix))
  913. def test_sparse_input(name, sparse_matrix):
  914. X, y = datasets.make_multilabel_classification(random_state=0, n_samples=50)
  915. check_sparse_input(name, X, sparse_matrix(X), y)
  916. def check_memory_layout(name, dtype):
  917. # Check that it works no matter the memory layout
  918. est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
  919. # Nothing
  920. X = np.asarray(iris.data, dtype=dtype)
  921. y = iris.target
  922. assert_array_almost_equal(est.fit(X, y).predict(X), y)
  923. # C-order
  924. X = np.asarray(iris.data, order="C", dtype=dtype)
  925. y = iris.target
  926. assert_array_almost_equal(est.fit(X, y).predict(X), y)
  927. # F-order
  928. X = np.asarray(iris.data, order="F", dtype=dtype)
  929. y = iris.target
  930. assert_array_almost_equal(est.fit(X, y).predict(X), y)
  931. # Contiguous
  932. X = np.ascontiguousarray(iris.data, dtype=dtype)
  933. y = iris.target
  934. assert_array_almost_equal(est.fit(X, y).predict(X), y)
  935. if est.estimator.splitter in SPARSE_SPLITTERS:
  936. # csr matrix
  937. X = csr_matrix(iris.data, dtype=dtype)
  938. y = iris.target
  939. assert_array_almost_equal(est.fit(X, y).predict(X), y)
  940. # csc_matrix
  941. X = csc_matrix(iris.data, dtype=dtype)
  942. y = iris.target
  943. assert_array_almost_equal(est.fit(X, y).predict(X), y)
  944. # coo_matrix
  945. X = coo_matrix(iris.data, dtype=dtype)
  946. y = iris.target
  947. assert_array_almost_equal(est.fit(X, y).predict(X), y)
  948. # Strided
  949. X = np.asarray(iris.data[::3], dtype=dtype)
  950. y = iris.target[::3]
  951. assert_array_almost_equal(est.fit(X, y).predict(X), y)
  952. @pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS)
  953. @pytest.mark.parametrize("dtype", (np.float64, np.float32))
  954. def test_memory_layout(name, dtype):
  955. check_memory_layout(name, dtype)
  956. @ignore_warnings
  957. def check_1d_input(name, X, X_2d, y):
  958. ForestEstimator = FOREST_ESTIMATORS[name]
  959. with pytest.raises(ValueError):
  960. ForestEstimator(n_estimators=1, random_state=0).fit(X, y)
  961. est = ForestEstimator(random_state=0)
  962. est.fit(X_2d, y)
  963. if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
  964. with pytest.raises(ValueError):
  965. est.predict(X)
  966. @pytest.mark.parametrize("name", FOREST_ESTIMATORS)
  967. def test_1d_input(name):
  968. X = iris.data[:, 0]
  969. X_2d = iris.data[:, 0].reshape((-1, 1))
  970. y = iris.target
  971. with ignore_warnings():
  972. check_1d_input(name, X, X_2d, y)
  973. def check_class_weights(name):
  974. # Check class_weights resemble sample_weights behavior.
  975. ForestClassifier = FOREST_CLASSIFIERS[name]
  976. # Iris is balanced, so no effect expected for using 'balanced' weights
  977. clf1 = ForestClassifier(random_state=0)
  978. clf1.fit(iris.data, iris.target)
  979. clf2 = ForestClassifier(class_weight="balanced", random_state=0)
  980. clf2.fit(iris.data, iris.target)
  981. assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
  982. # Make a multi-output problem with three copies of Iris
  983. iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
  984. # Create user-defined weights that should balance over the outputs
  985. clf3 = ForestClassifier(
  986. class_weight=[
  987. {0: 2.0, 1: 2.0, 2: 1.0},
  988. {0: 2.0, 1: 1.0, 2: 2.0},
  989. {0: 1.0, 1: 2.0, 2: 2.0},
  990. ],
  991. random_state=0,
  992. )
  993. clf3.fit(iris.data, iris_multi)
  994. assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
  995. # Check against multi-output "balanced" which should also have no effect
  996. clf4 = ForestClassifier(class_weight="balanced", random_state=0)
  997. clf4.fit(iris.data, iris_multi)
  998. assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
  999. # Inflate importance of class 1, check against user-defined weights
  1000. sample_weight = np.ones(iris.target.shape)
  1001. sample_weight[iris.target == 1] *= 100
  1002. class_weight = {0: 1.0, 1: 100.0, 2: 1.0}
  1003. clf1 = ForestClassifier(random_state=0)
  1004. clf1.fit(iris.data, iris.target, sample_weight)
  1005. clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
  1006. clf2.fit(iris.data, iris.target)
  1007. assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
  1008. # Check that sample_weight and class_weight are multiplicative
  1009. clf1 = ForestClassifier(random_state=0)
  1010. clf1.fit(iris.data, iris.target, sample_weight**2)
  1011. clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
  1012. clf2.fit(iris.data, iris.target, sample_weight)
  1013. assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
  1014. @pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
  1015. def test_class_weights(name):
  1016. check_class_weights(name)
  1017. def check_class_weight_balanced_and_bootstrap_multi_output(name):
  1018. # Test class_weight works for multi-output"""
  1019. ForestClassifier = FOREST_CLASSIFIERS[name]
  1020. _y = np.vstack((y, np.array(y) * 2)).T
  1021. clf = ForestClassifier(class_weight="balanced", random_state=0)
  1022. clf.fit(X, _y)
  1023. clf = ForestClassifier(
  1024. class_weight=[{-1: 0.5, 1: 1.0}, {-2: 1.0, 2: 1.0}], random_state=0
  1025. )
  1026. clf.fit(X, _y)
  1027. # smoke test for balanced subsample
  1028. clf = ForestClassifier(class_weight="balanced_subsample", random_state=0)
  1029. clf.fit(X, _y)
  1030. @pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
  1031. def test_class_weight_balanced_and_bootstrap_multi_output(name):
  1032. check_class_weight_balanced_and_bootstrap_multi_output(name)
  1033. def check_class_weight_errors(name):
  1034. # Test if class_weight raises errors and warnings when expected.
  1035. ForestClassifier = FOREST_CLASSIFIERS[name]
  1036. _y = np.vstack((y, np.array(y) * 2)).T
  1037. # Warning warm_start with preset
  1038. clf = ForestClassifier(class_weight="balanced", warm_start=True, random_state=0)
  1039. clf.fit(X, y)
  1040. warn_msg = (
  1041. "Warm-start fitting without increasing n_estimators does not fit new trees."
  1042. )
  1043. with pytest.warns(UserWarning, match=warn_msg):
  1044. clf.fit(X, _y)
  1045. # Incorrect length list for multi-output
  1046. clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.0}], random_state=0)
  1047. with pytest.raises(ValueError):
  1048. clf.fit(X, _y)
  1049. @pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
  1050. def test_class_weight_errors(name):
  1051. check_class_weight_errors(name)
  1052. def check_warm_start(name, random_state=42):
  1053. # Test if fitting incrementally with warm start gives a forest of the
  1054. # right size and the same results as a normal fit.
  1055. X, y = hastie_X, hastie_y
  1056. ForestEstimator = FOREST_ESTIMATORS[name]
  1057. est_ws = None
  1058. for n_estimators in [5, 10]:
  1059. if est_ws is None:
  1060. est_ws = ForestEstimator(
  1061. n_estimators=n_estimators, random_state=random_state, warm_start=True
  1062. )
  1063. else:
  1064. est_ws.set_params(n_estimators=n_estimators)
  1065. est_ws.fit(X, y)
  1066. assert len(est_ws) == n_estimators
  1067. est_no_ws = ForestEstimator(
  1068. n_estimators=10, random_state=random_state, warm_start=False
  1069. )
  1070. est_no_ws.fit(X, y)
  1071. assert set([tree.random_state for tree in est_ws]) == set(
  1072. [tree.random_state for tree in est_no_ws]
  1073. )
  1074. assert_array_equal(
  1075. est_ws.apply(X), est_no_ws.apply(X), err_msg="Failed with {0}".format(name)
  1076. )
  1077. @pytest.mark.parametrize("name", FOREST_ESTIMATORS)
  1078. def test_warm_start(name):
  1079. check_warm_start(name)
  1080. def check_warm_start_clear(name):
  1081. # Test if fit clears state and grows a new forest when warm_start==False.
  1082. X, y = hastie_X, hastie_y
  1083. ForestEstimator = FOREST_ESTIMATORS[name]
  1084. est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False, random_state=1)
  1085. est.fit(X, y)
  1086. est_2 = ForestEstimator(
  1087. n_estimators=5, max_depth=1, warm_start=True, random_state=2
  1088. )
  1089. est_2.fit(X, y) # inits state
  1090. est_2.set_params(warm_start=False, random_state=1)
  1091. est_2.fit(X, y) # clears old state and equals est
  1092. assert_array_almost_equal(est_2.apply(X), est.apply(X))
  1093. @pytest.mark.parametrize("name", FOREST_ESTIMATORS)
  1094. def test_warm_start_clear(name):
  1095. check_warm_start_clear(name)
  1096. def check_warm_start_smaller_n_estimators(name):
  1097. # Test if warm start second fit with smaller n_estimators raises error.
  1098. X, y = hastie_X, hastie_y
  1099. ForestEstimator = FOREST_ESTIMATORS[name]
  1100. est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
  1101. est.fit(X, y)
  1102. est.set_params(n_estimators=4)
  1103. with pytest.raises(ValueError):
  1104. est.fit(X, y)
  1105. @pytest.mark.parametrize("name", FOREST_ESTIMATORS)
  1106. def test_warm_start_smaller_n_estimators(name):
  1107. check_warm_start_smaller_n_estimators(name)
  1108. def check_warm_start_equal_n_estimators(name):
  1109. # Test if warm start with equal n_estimators does nothing and returns the
  1110. # same forest and raises a warning.
  1111. X, y = hastie_X, hastie_y
  1112. ForestEstimator = FOREST_ESTIMATORS[name]
  1113. est = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True, random_state=1)
  1114. est.fit(X, y)
  1115. est_2 = ForestEstimator(
  1116. n_estimators=5, max_depth=3, warm_start=True, random_state=1
  1117. )
  1118. est_2.fit(X, y)
  1119. # Now est_2 equals est.
  1120. est_2.set_params(random_state=2)
  1121. warn_msg = (
  1122. "Warm-start fitting without increasing n_estimators does not fit new trees."
  1123. )
  1124. with pytest.warns(UserWarning, match=warn_msg):
  1125. est_2.fit(X, y)
  1126. # If we had fit the trees again we would have got a different forest as we
  1127. # changed the random state.
  1128. assert_array_equal(est.apply(X), est_2.apply(X))
  1129. @pytest.mark.parametrize("name", FOREST_ESTIMATORS)
  1130. def test_warm_start_equal_n_estimators(name):
  1131. check_warm_start_equal_n_estimators(name)
  1132. def check_warm_start_oob(name):
  1133. # Test that the warm start computes oob score when asked.
  1134. X, y = hastie_X, hastie_y
  1135. ForestEstimator = FOREST_ESTIMATORS[name]
  1136. # Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
  1137. est = ForestEstimator(
  1138. n_estimators=15,
  1139. max_depth=3,
  1140. warm_start=False,
  1141. random_state=1,
  1142. bootstrap=True,
  1143. oob_score=True,
  1144. )
  1145. est.fit(X, y)
  1146. est_2 = ForestEstimator(
  1147. n_estimators=5,
  1148. max_depth=3,
  1149. warm_start=False,
  1150. random_state=1,
  1151. bootstrap=True,
  1152. oob_score=False,
  1153. )
  1154. est_2.fit(X, y)
  1155. est_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
  1156. est_2.fit(X, y)
  1157. assert hasattr(est_2, "oob_score_")
  1158. assert est.oob_score_ == est_2.oob_score_
  1159. # Test that oob_score is computed even if we don't need to train
  1160. # additional trees.
  1161. est_3 = ForestEstimator(
  1162. n_estimators=15,
  1163. max_depth=3,
  1164. warm_start=True,
  1165. random_state=1,
  1166. bootstrap=True,
  1167. oob_score=False,
  1168. )
  1169. est_3.fit(X, y)
  1170. assert not hasattr(est_3, "oob_score_")
  1171. est_3.set_params(oob_score=True)
  1172. ignore_warnings(est_3.fit)(X, y)
  1173. assert est.oob_score_ == est_3.oob_score_
  1174. @pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS)
  1175. def test_warm_start_oob(name):
  1176. check_warm_start_oob(name)
  1177. @pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS)
  1178. def test_oob_not_computed_twice(name):
  1179. # Check that oob_score is not computed twice when warm_start=True.
  1180. X, y = hastie_X, hastie_y
  1181. ForestEstimator = FOREST_ESTIMATORS[name]
  1182. est = ForestEstimator(
  1183. n_estimators=10, warm_start=True, bootstrap=True, oob_score=True
  1184. )
  1185. with patch.object(
  1186. est, "_set_oob_score_and_attributes", wraps=est._set_oob_score_and_attributes
  1187. ) as mock_set_oob_score_and_attributes:
  1188. est.fit(X, y)
  1189. with pytest.warns(UserWarning, match="Warm-start fitting without increasing"):
  1190. est.fit(X, y)
  1191. mock_set_oob_score_and_attributes.assert_called_once()
  1192. def test_dtype_convert(n_classes=15):
  1193. classifier = RandomForestClassifier(random_state=0, bootstrap=False)
  1194. X = np.eye(n_classes)
  1195. y = [ch for ch in "ABCDEFGHIJKLMNOPQRSTU"[:n_classes]]
  1196. result = classifier.fit(X, y).predict(X)
  1197. assert_array_equal(classifier.classes_, y)
  1198. assert_array_equal(result, y)
  1199. def check_decision_path(name):
  1200. X, y = hastie_X, hastie_y
  1201. n_samples = X.shape[0]
  1202. ForestEstimator = FOREST_ESTIMATORS[name]
  1203. est = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False, random_state=1)
  1204. est.fit(X, y)
  1205. indicator, n_nodes_ptr = est.decision_path(X)
  1206. assert indicator.shape[1] == n_nodes_ptr[-1]
  1207. assert indicator.shape[0] == n_samples
  1208. assert_array_equal(
  1209. np.diff(n_nodes_ptr), [e.tree_.node_count for e in est.estimators_]
  1210. )
  1211. # Assert that leaves index are correct
  1212. leaves = est.apply(X)
  1213. for est_id in range(leaves.shape[1]):
  1214. leave_indicator = [
  1215. indicator[i, n_nodes_ptr[est_id] + j]
  1216. for i, j in enumerate(leaves[:, est_id])
  1217. ]
  1218. assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))
  1219. @pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS)
  1220. def test_decision_path(name):
  1221. check_decision_path(name)
  1222. def test_min_impurity_decrease():
  1223. X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
  1224. all_estimators = [
  1225. RandomForestClassifier,
  1226. RandomForestRegressor,
  1227. ExtraTreesClassifier,
  1228. ExtraTreesRegressor,
  1229. ]
  1230. for Estimator in all_estimators:
  1231. est = Estimator(min_impurity_decrease=0.1)
  1232. est.fit(X, y)
  1233. for tree in est.estimators_:
  1234. # Simply check if the parameter is passed on correctly. Tree tests
  1235. # will suffice for the actual working of this param
  1236. assert tree.min_impurity_decrease == 0.1
  1237. def test_poisson_y_positive_check():
  1238. est = RandomForestRegressor(criterion="poisson")
  1239. X = np.zeros((3, 3))
  1240. y = [-1, 1, 3]
  1241. err_msg = (
  1242. r"Some value\(s\) of y are negative which is "
  1243. r"not allowed for Poisson regression."
  1244. )
  1245. with pytest.raises(ValueError, match=err_msg):
  1246. est.fit(X, y)
  1247. y = [0, 0, 0]
  1248. err_msg = (
  1249. r"Sum of y is not strictly positive which "
  1250. r"is necessary for Poisson regression."
  1251. )
  1252. with pytest.raises(ValueError, match=err_msg):
  1253. est.fit(X, y)
  1254. # mypy error: Variable "DEFAULT_JOBLIB_BACKEND" is not valid type
  1255. class MyBackend(DEFAULT_JOBLIB_BACKEND): # type: ignore
  1256. def __init__(self, *args, **kwargs):
  1257. self.count = 0
  1258. super().__init__(*args, **kwargs)
  1259. def start_call(self):
  1260. self.count += 1
  1261. return super().start_call()
  1262. joblib.register_parallel_backend("testing", MyBackend)
  1263. @skip_if_no_parallel
  1264. def test_backend_respected():
  1265. clf = RandomForestClassifier(n_estimators=10, n_jobs=2)
  1266. with joblib.parallel_backend("testing") as (ba, n_jobs):
  1267. clf.fit(X, y)
  1268. assert ba.count > 0
  1269. # predict_proba requires shared memory. Ensure that's honored.
  1270. with joblib.parallel_backend("testing") as (ba, _):
  1271. clf.predict_proba(X)
  1272. assert ba.count == 0
  1273. def test_forest_feature_importances_sum():
  1274. X, y = make_classification(
  1275. n_samples=15, n_informative=3, random_state=1, n_classes=3
  1276. )
  1277. clf = RandomForestClassifier(
  1278. min_samples_leaf=5, random_state=42, n_estimators=200
  1279. ).fit(X, y)
  1280. assert math.isclose(1, clf.feature_importances_.sum(), abs_tol=1e-7)
  1281. def test_forest_degenerate_feature_importances():
  1282. # build a forest of single node trees. See #13636
  1283. X = np.zeros((10, 10))
  1284. y = np.ones((10,))
  1285. gbr = RandomForestRegressor(n_estimators=10).fit(X, y)
  1286. assert_array_equal(gbr.feature_importances_, np.zeros(10, dtype=np.float64))
  1287. @pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS)
  1288. def test_max_samples_bootstrap(name):
  1289. # Check invalid `max_samples` values
  1290. est = FOREST_CLASSIFIERS_REGRESSORS[name](bootstrap=False, max_samples=0.5)
  1291. err_msg = (
  1292. r"`max_sample` cannot be set if `bootstrap=False`. "
  1293. r"Either switch to `bootstrap=True` or set "
  1294. r"`max_sample=None`."
  1295. )
  1296. with pytest.raises(ValueError, match=err_msg):
  1297. est.fit(X, y)
  1298. @pytest.mark.parametrize("name", FOREST_CLASSIFIERS_REGRESSORS)
  1299. def test_large_max_samples_exception(name):
  1300. # Check invalid `max_samples`
  1301. est = FOREST_CLASSIFIERS_REGRESSORS[name](bootstrap=True, max_samples=int(1e9))
  1302. match = "`max_samples` must be <= n_samples=6 but got value 1000000000"
  1303. with pytest.raises(ValueError, match=match):
  1304. est.fit(X, y)
  1305. @pytest.mark.parametrize("name", FOREST_REGRESSORS)
  1306. def test_max_samples_boundary_regressors(name):
  1307. X_train, X_test, y_train, y_test = train_test_split(
  1308. X_reg, y_reg, train_size=0.7, test_size=0.3, random_state=0
  1309. )
  1310. ms_1_model = FOREST_REGRESSORS[name](
  1311. bootstrap=True, max_samples=1.0, random_state=0
  1312. )
  1313. ms_1_predict = ms_1_model.fit(X_train, y_train).predict(X_test)
  1314. ms_None_model = FOREST_REGRESSORS[name](
  1315. bootstrap=True, max_samples=None, random_state=0
  1316. )
  1317. ms_None_predict = ms_None_model.fit(X_train, y_train).predict(X_test)
  1318. ms_1_ms = mean_squared_error(ms_1_predict, y_test)
  1319. ms_None_ms = mean_squared_error(ms_None_predict, y_test)
  1320. assert ms_1_ms == pytest.approx(ms_None_ms)
  1321. @pytest.mark.parametrize("name", FOREST_CLASSIFIERS)
  1322. def test_max_samples_boundary_classifiers(name):
  1323. X_train, X_test, y_train, _ = train_test_split(
  1324. X_large, y_large, random_state=0, stratify=y_large
  1325. )
  1326. ms_1_model = FOREST_CLASSIFIERS[name](
  1327. bootstrap=True, max_samples=1.0, random_state=0
  1328. )
  1329. ms_1_proba = ms_1_model.fit(X_train, y_train).predict_proba(X_test)
  1330. ms_None_model = FOREST_CLASSIFIERS[name](
  1331. bootstrap=True, max_samples=None, random_state=0
  1332. )
  1333. ms_None_proba = ms_None_model.fit(X_train, y_train).predict_proba(X_test)
  1334. np.testing.assert_allclose(ms_1_proba, ms_None_proba)
  1335. def test_forest_y_sparse():
  1336. X = [[1, 2, 3]]
  1337. y = csr_matrix([4, 5, 6])
  1338. est = RandomForestClassifier()
  1339. msg = "sparse multilabel-indicator for y is not supported."
  1340. with pytest.raises(ValueError, match=msg):
  1341. est.fit(X, y)
  1342. @pytest.mark.parametrize("ForestClass", [RandomForestClassifier, RandomForestRegressor])
  1343. def test_little_tree_with_small_max_samples(ForestClass):
  1344. rng = np.random.RandomState(1)
  1345. X = rng.randn(10000, 2)
  1346. y = rng.randn(10000) > 0
  1347. # First fit with no restriction on max samples
  1348. est1 = ForestClass(
  1349. n_estimators=1,
  1350. random_state=rng,
  1351. max_samples=None,
  1352. )
  1353. # Second fit with max samples restricted to just 2
  1354. est2 = ForestClass(
  1355. n_estimators=1,
  1356. random_state=rng,
  1357. max_samples=2,
  1358. )
  1359. est1.fit(X, y)
  1360. est2.fit(X, y)
  1361. tree1 = est1.estimators_[0].tree_
  1362. tree2 = est2.estimators_[0].tree_
  1363. msg = "Tree without `max_samples` restriction should have more nodes"
  1364. assert tree1.node_count > tree2.node_count, msg
  1365. @pytest.mark.parametrize("Forest", FOREST_REGRESSORS)
  1366. def test_mse_criterion_object_segfault_smoke_test(Forest):
  1367. # This is a smoke test to ensure that passing a mutable criterion
  1368. # does not cause a segfault when fitting with concurrent threads.
  1369. # Non-regression test for:
  1370. # https://github.com/scikit-learn/scikit-learn/issues/12623
  1371. from sklearn.tree._criterion import MSE
  1372. y = y_reg.reshape(-1, 1)
  1373. n_samples, n_outputs = y.shape
  1374. mse_criterion = MSE(n_outputs, n_samples)
  1375. est = FOREST_REGRESSORS[Forest](n_estimators=2, n_jobs=2, criterion=mse_criterion)
  1376. est.fit(X_reg, y)
  1377. def test_random_trees_embedding_feature_names_out():
  1378. """Check feature names out for Random Trees Embedding."""
  1379. random_state = np.random.RandomState(0)
  1380. X = np.abs(random_state.randn(100, 4))
  1381. hasher = RandomTreesEmbedding(
  1382. n_estimators=2, max_depth=2, sparse_output=False, random_state=0
  1383. ).fit(X)
  1384. names = hasher.get_feature_names_out()
  1385. expected_names = [
  1386. f"randomtreesembedding_{tree}_{leaf}"
  1387. # Note: nodes with indices 0, 1 and 4 are internal split nodes and
  1388. # therefore do not appear in the expected output feature names.
  1389. for tree, leaf in [
  1390. (0, 2),
  1391. (0, 3),
  1392. (0, 5),
  1393. (0, 6),
  1394. (1, 2),
  1395. (1, 3),
  1396. (1, 5),
  1397. (1, 6),
  1398. ]
  1399. ]
  1400. assert_array_equal(expected_names, names)
  1401. # TODO(1.4): remove in 1.4
  1402. @pytest.mark.parametrize(
  1403. "name",
  1404. FOREST_ESTIMATORS,
  1405. )
  1406. def test_base_estimator_property_deprecated(name):
  1407. X = np.array([[1, 2], [3, 4]])
  1408. y = np.array([1, 0])
  1409. model = FOREST_ESTIMATORS[name]()
  1410. model.fit(X, y)
  1411. warn_msg = (
  1412. "Attribute `base_estimator_` was deprecated in version 1.2 and "
  1413. "will be removed in 1.4. Use `estimator_` instead."
  1414. )
  1415. with pytest.warns(FutureWarning, match=warn_msg):
  1416. model.base_estimator_
  1417. def test_read_only_buffer(monkeypatch):
  1418. """RandomForestClassifier must work on readonly sparse data.
  1419. Non-regression test for: https://github.com/scikit-learn/scikit-learn/issues/25333
  1420. """
  1421. monkeypatch.setattr(
  1422. sklearn.ensemble._forest,
  1423. "Parallel",
  1424. partial(Parallel, max_nbytes=100),
  1425. )
  1426. rng = np.random.RandomState(seed=0)
  1427. X, y = make_classification(n_samples=100, n_features=200, random_state=rng)
  1428. X = csr_matrix(X, copy=True)
  1429. clf = RandomForestClassifier(n_jobs=2, random_state=rng)
  1430. cross_val_score(clf, X, y, cv=2)
  1431. @pytest.mark.parametrize("class_weight", ["balanced_subsample", None])
  1432. def test_round_samples_to_one_when_samples_too_low(class_weight):
  1433. """Check low max_samples works and is rounded to one.
  1434. Non-regression test for gh-24037.
  1435. """
  1436. X, y = datasets.load_wine(return_X_y=True)
  1437. forest = RandomForestClassifier(
  1438. n_estimators=10, max_samples=1e-4, class_weight=class_weight, random_state=0
  1439. )
  1440. forest.fit(X, y)