test_text.py 51 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642
  1. import pickle
  2. import re
  3. import warnings
  4. from collections import defaultdict
  5. from collections.abc import Mapping
  6. from functools import partial
  7. from io import StringIO
  8. import numpy as np
  9. import pytest
  10. from numpy.testing import assert_array_almost_equal, assert_array_equal
  11. from scipy import sparse
  12. from sklearn.base import clone
  13. from sklearn.feature_extraction.text import (
  14. ENGLISH_STOP_WORDS,
  15. CountVectorizer,
  16. HashingVectorizer,
  17. TfidfTransformer,
  18. TfidfVectorizer,
  19. strip_accents_ascii,
  20. strip_accents_unicode,
  21. strip_tags,
  22. )
  23. from sklearn.model_selection import GridSearchCV, cross_val_score, train_test_split
  24. from sklearn.pipeline import Pipeline
  25. from sklearn.svm import LinearSVC
  26. from sklearn.utils import IS_PYPY
  27. from sklearn.utils._testing import (
  28. assert_allclose_dense_sparse,
  29. assert_almost_equal,
  30. fails_if_pypy,
  31. skip_if_32bit,
  32. )
  33. JUNK_FOOD_DOCS = (
  34. "the pizza pizza beer copyright",
  35. "the pizza burger beer copyright",
  36. "the the pizza beer beer copyright",
  37. "the burger beer beer copyright",
  38. "the coke burger coke copyright",
  39. "the coke burger burger",
  40. )
  41. NOTJUNK_FOOD_DOCS = (
  42. "the salad celeri copyright",
  43. "the salad salad sparkling water copyright",
  44. "the the celeri celeri copyright",
  45. "the tomato tomato salad water",
  46. "the tomato salad water copyright",
  47. )
  48. ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
  49. def uppercase(s):
  50. return strip_accents_unicode(s).upper()
  51. def strip_eacute(s):
  52. return s.replace("é", "e")
  53. def split_tokenize(s):
  54. return s.split()
  55. def lazy_analyze(s):
  56. return ["the_ultimate_feature"]
  57. def test_strip_accents():
  58. # check some classical latin accentuated symbols
  59. a = "àáâãäåçèéêë"
  60. expected = "aaaaaaceeee"
  61. assert strip_accents_unicode(a) == expected
  62. a = "ìíîïñòóôõöùúûüý"
  63. expected = "iiiinooooouuuuy"
  64. assert strip_accents_unicode(a) == expected
  65. # check some arabic
  66. a = "\u0625" # alef with a hamza below: إ
  67. expected = "\u0627" # simple alef: ا
  68. assert strip_accents_unicode(a) == expected
  69. # mix letters accentuated and not
  70. a = "this is à test"
  71. expected = "this is a test"
  72. assert strip_accents_unicode(a) == expected
  73. # strings that are already decomposed
  74. a = "o\u0308" # o with diaeresis
  75. expected = "o"
  76. assert strip_accents_unicode(a) == expected
  77. # combining marks by themselves
  78. a = "\u0300\u0301\u0302\u0303"
  79. expected = ""
  80. assert strip_accents_unicode(a) == expected
  81. # Multiple combining marks on one character
  82. a = "o\u0308\u0304"
  83. expected = "o"
  84. assert strip_accents_unicode(a) == expected
  85. def test_to_ascii():
  86. # check some classical latin accentuated symbols
  87. a = "àáâãäåçèéêë"
  88. expected = "aaaaaaceeee"
  89. assert strip_accents_ascii(a) == expected
  90. a = "ìíîïñòóôõöùúûüý"
  91. expected = "iiiinooooouuuuy"
  92. assert strip_accents_ascii(a) == expected
  93. # check some arabic
  94. a = "\u0625" # halef with a hamza below
  95. expected = "" # halef has no direct ascii match
  96. assert strip_accents_ascii(a) == expected
  97. # mix letters accentuated and not
  98. a = "this is à test"
  99. expected = "this is a test"
  100. assert strip_accents_ascii(a) == expected
  101. @pytest.mark.parametrize("Vectorizer", (CountVectorizer, HashingVectorizer))
  102. def test_word_analyzer_unigrams(Vectorizer):
  103. wa = Vectorizer(strip_accents="ascii").build_analyzer()
  104. text = "J'ai mangé du kangourou ce midi, c'était pas très bon."
  105. expected = [
  106. "ai",
  107. "mange",
  108. "du",
  109. "kangourou",
  110. "ce",
  111. "midi",
  112. "etait",
  113. "pas",
  114. "tres",
  115. "bon",
  116. ]
  117. assert wa(text) == expected
  118. text = "This is a test, really.\n\n I met Harry yesterday."
  119. expected = ["this", "is", "test", "really", "met", "harry", "yesterday"]
  120. assert wa(text) == expected
  121. wa = Vectorizer(input="file").build_analyzer()
  122. text = StringIO("This is a test with a file-like object!")
  123. expected = ["this", "is", "test", "with", "file", "like", "object"]
  124. assert wa(text) == expected
  125. # with custom preprocessor
  126. wa = Vectorizer(preprocessor=uppercase).build_analyzer()
  127. text = "J'ai mangé du kangourou ce midi, c'était pas très bon."
  128. expected = [
  129. "AI",
  130. "MANGE",
  131. "DU",
  132. "KANGOUROU",
  133. "CE",
  134. "MIDI",
  135. "ETAIT",
  136. "PAS",
  137. "TRES",
  138. "BON",
  139. ]
  140. assert wa(text) == expected
  141. # with custom tokenizer
  142. wa = Vectorizer(tokenizer=split_tokenize, strip_accents="ascii").build_analyzer()
  143. text = "J'ai mangé du kangourou ce midi, c'était pas très bon."
  144. expected = [
  145. "j'ai",
  146. "mange",
  147. "du",
  148. "kangourou",
  149. "ce",
  150. "midi,",
  151. "c'etait",
  152. "pas",
  153. "tres",
  154. "bon.",
  155. ]
  156. assert wa(text) == expected
  157. def test_word_analyzer_unigrams_and_bigrams():
  158. wa = CountVectorizer(
  159. analyzer="word", strip_accents="unicode", ngram_range=(1, 2)
  160. ).build_analyzer()
  161. text = "J'ai mangé du kangourou ce midi, c'était pas très bon."
  162. expected = [
  163. "ai",
  164. "mange",
  165. "du",
  166. "kangourou",
  167. "ce",
  168. "midi",
  169. "etait",
  170. "pas",
  171. "tres",
  172. "bon",
  173. "ai mange",
  174. "mange du",
  175. "du kangourou",
  176. "kangourou ce",
  177. "ce midi",
  178. "midi etait",
  179. "etait pas",
  180. "pas tres",
  181. "tres bon",
  182. ]
  183. assert wa(text) == expected
  184. def test_unicode_decode_error():
  185. # decode_error default to strict, so this should fail
  186. # First, encode (as bytes) a unicode string.
  187. text = "J'ai mangé du kangourou ce midi, c'était pas très bon."
  188. text_bytes = text.encode("utf-8")
  189. # Then let the Analyzer try to decode it as ascii. It should fail,
  190. # because we have given it an incorrect encoding.
  191. wa = CountVectorizer(ngram_range=(1, 2), encoding="ascii").build_analyzer()
  192. with pytest.raises(UnicodeDecodeError):
  193. wa(text_bytes)
  194. ca = CountVectorizer(
  195. analyzer="char", ngram_range=(3, 6), encoding="ascii"
  196. ).build_analyzer()
  197. with pytest.raises(UnicodeDecodeError):
  198. ca(text_bytes)
  199. def test_char_ngram_analyzer():
  200. cnga = CountVectorizer(
  201. analyzer="char", strip_accents="unicode", ngram_range=(3, 6)
  202. ).build_analyzer()
  203. text = "J'ai mangé du kangourou ce midi, c'était pas très bon"
  204. expected = ["j'a", "'ai", "ai ", "i m", " ma"]
  205. assert cnga(text)[:5] == expected
  206. expected = ["s tres", " tres ", "tres b", "res bo", "es bon"]
  207. assert cnga(text)[-5:] == expected
  208. text = "This \n\tis a test, really.\n\n I met Harry yesterday"
  209. expected = ["thi", "his", "is ", "s i", " is"]
  210. assert cnga(text)[:5] == expected
  211. expected = [" yeste", "yester", "esterd", "sterda", "terday"]
  212. assert cnga(text)[-5:] == expected
  213. cnga = CountVectorizer(
  214. input="file", analyzer="char", ngram_range=(3, 6)
  215. ).build_analyzer()
  216. text = StringIO("This is a test with a file-like object!")
  217. expected = ["thi", "his", "is ", "s i", " is"]
  218. assert cnga(text)[:5] == expected
  219. def test_char_wb_ngram_analyzer():
  220. cnga = CountVectorizer(
  221. analyzer="char_wb", strip_accents="unicode", ngram_range=(3, 6)
  222. ).build_analyzer()
  223. text = "This \n\tis a test, really.\n\n I met Harry yesterday"
  224. expected = [" th", "thi", "his", "is ", " thi"]
  225. assert cnga(text)[:5] == expected
  226. expected = ["yester", "esterd", "sterda", "terday", "erday "]
  227. assert cnga(text)[-5:] == expected
  228. cnga = CountVectorizer(
  229. input="file", analyzer="char_wb", ngram_range=(3, 6)
  230. ).build_analyzer()
  231. text = StringIO("A test with a file-like object!")
  232. expected = [" a ", " te", "tes", "est", "st ", " tes"]
  233. assert cnga(text)[:6] == expected
  234. def test_word_ngram_analyzer():
  235. cnga = CountVectorizer(
  236. analyzer="word", strip_accents="unicode", ngram_range=(3, 6)
  237. ).build_analyzer()
  238. text = "This \n\tis a test, really.\n\n I met Harry yesterday"
  239. expected = ["this is test", "is test really", "test really met"]
  240. assert cnga(text)[:3] == expected
  241. expected = [
  242. "test really met harry yesterday",
  243. "this is test really met harry",
  244. "is test really met harry yesterday",
  245. ]
  246. assert cnga(text)[-3:] == expected
  247. cnga_file = CountVectorizer(
  248. input="file", analyzer="word", ngram_range=(3, 6)
  249. ).build_analyzer()
  250. file = StringIO(text)
  251. assert cnga_file(file) == cnga(text)
  252. def test_countvectorizer_custom_vocabulary():
  253. vocab = {"pizza": 0, "beer": 1}
  254. terms = set(vocab.keys())
  255. # Try a few of the supported types.
  256. for typ in [dict, list, iter, partial(defaultdict, int)]:
  257. v = typ(vocab)
  258. vect = CountVectorizer(vocabulary=v)
  259. vect.fit(JUNK_FOOD_DOCS)
  260. if isinstance(v, Mapping):
  261. assert vect.vocabulary_ == vocab
  262. else:
  263. assert set(vect.vocabulary_) == terms
  264. X = vect.transform(JUNK_FOOD_DOCS)
  265. assert X.shape[1] == len(terms)
  266. v = typ(vocab)
  267. vect = CountVectorizer(vocabulary=v)
  268. inv = vect.inverse_transform(X)
  269. assert len(inv) == X.shape[0]
  270. def test_countvectorizer_custom_vocabulary_pipeline():
  271. what_we_like = ["pizza", "beer"]
  272. pipe = Pipeline(
  273. [
  274. ("count", CountVectorizer(vocabulary=what_we_like)),
  275. ("tfidf", TfidfTransformer()),
  276. ]
  277. )
  278. X = pipe.fit_transform(ALL_FOOD_DOCS)
  279. assert set(pipe.named_steps["count"].vocabulary_) == set(what_we_like)
  280. assert X.shape[1] == len(what_we_like)
  281. def test_countvectorizer_custom_vocabulary_repeated_indices():
  282. vocab = {"pizza": 0, "beer": 0}
  283. msg = "Vocabulary contains repeated indices"
  284. with pytest.raises(ValueError, match=msg):
  285. vect = CountVectorizer(vocabulary=vocab)
  286. vect.fit(["pasta_siziliana"])
  287. def test_countvectorizer_custom_vocabulary_gap_index():
  288. vocab = {"pizza": 1, "beer": 2}
  289. with pytest.raises(ValueError, match="doesn't contain index"):
  290. vect = CountVectorizer(vocabulary=vocab)
  291. vect.fit(["pasta_verdura"])
  292. def test_countvectorizer_stop_words():
  293. cv = CountVectorizer()
  294. cv.set_params(stop_words="english")
  295. assert cv.get_stop_words() == ENGLISH_STOP_WORDS
  296. cv.set_params(stop_words="_bad_str_stop_")
  297. with pytest.raises(ValueError):
  298. cv.get_stop_words()
  299. cv.set_params(stop_words="_bad_unicode_stop_")
  300. with pytest.raises(ValueError):
  301. cv.get_stop_words()
  302. stoplist = ["some", "other", "words"]
  303. cv.set_params(stop_words=stoplist)
  304. assert cv.get_stop_words() == set(stoplist)
  305. def test_countvectorizer_empty_vocabulary():
  306. with pytest.raises(ValueError, match="empty vocabulary"):
  307. vect = CountVectorizer(vocabulary=[])
  308. vect.fit(["foo"])
  309. with pytest.raises(ValueError, match="empty vocabulary"):
  310. v = CountVectorizer(max_df=1.0, stop_words="english")
  311. # fit on stopwords only
  312. v.fit(["to be or not to be", "and me too", "and so do you"])
  313. def test_fit_countvectorizer_twice():
  314. cv = CountVectorizer()
  315. X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
  316. X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
  317. assert X1.shape[1] != X2.shape[1]
  318. def test_countvectorizer_custom_token_pattern():
  319. """Check `get_feature_names_out()` when a custom token pattern is passed.
  320. Non-regression test for:
  321. https://github.com/scikit-learn/scikit-learn/issues/12971
  322. """
  323. corpus = [
  324. "This is the 1st document in my corpus.",
  325. "This document is the 2nd sample.",
  326. "And this is the 3rd one.",
  327. "Is this the 4th document?",
  328. ]
  329. token_pattern = r"[0-9]{1,3}(?:st|nd|rd|th)\s\b(\w{2,})\b"
  330. vectorizer = CountVectorizer(token_pattern=token_pattern)
  331. vectorizer.fit_transform(corpus)
  332. expected = ["document", "one", "sample"]
  333. feature_names_out = vectorizer.get_feature_names_out()
  334. assert_array_equal(feature_names_out, expected)
  335. def test_countvectorizer_custom_token_pattern_with_several_group():
  336. """Check that we raise an error if token pattern capture several groups.
  337. Non-regression test for:
  338. https://github.com/scikit-learn/scikit-learn/issues/12971
  339. """
  340. corpus = [
  341. "This is the 1st document in my corpus.",
  342. "This document is the 2nd sample.",
  343. "And this is the 3rd one.",
  344. "Is this the 4th document?",
  345. ]
  346. token_pattern = r"([0-9]{1,3}(?:st|nd|rd|th))\s\b(\w{2,})\b"
  347. err_msg = "More than 1 capturing group in token pattern"
  348. vectorizer = CountVectorizer(token_pattern=token_pattern)
  349. with pytest.raises(ValueError, match=err_msg):
  350. vectorizer.fit(corpus)
  351. def test_countvectorizer_uppercase_in_vocab():
  352. # Check that the check for uppercase in the provided vocabulary is only done at fit
  353. # time and not at transform time (#21251)
  354. vocabulary = ["Sample", "Upper", "Case", "Vocabulary"]
  355. message = (
  356. "Upper case characters found in"
  357. " vocabulary while 'lowercase'"
  358. " is True. These entries will not"
  359. " be matched with any documents"
  360. )
  361. vectorizer = CountVectorizer(lowercase=True, vocabulary=vocabulary)
  362. with pytest.warns(UserWarning, match=message):
  363. vectorizer.fit(vocabulary)
  364. with warnings.catch_warnings():
  365. warnings.simplefilter("error", UserWarning)
  366. vectorizer.transform(vocabulary)
  367. def test_tf_transformer_feature_names_out():
  368. """Check get_feature_names_out for TfidfTransformer"""
  369. X = [[1, 1, 1], [1, 1, 0], [1, 0, 0]]
  370. tr = TfidfTransformer(smooth_idf=True, norm="l2").fit(X)
  371. feature_names_in = ["a", "c", "b"]
  372. feature_names_out = tr.get_feature_names_out(feature_names_in)
  373. assert_array_equal(feature_names_in, feature_names_out)
  374. def test_tf_idf_smoothing():
  375. X = [[1, 1, 1], [1, 1, 0], [1, 0, 0]]
  376. tr = TfidfTransformer(smooth_idf=True, norm="l2")
  377. tfidf = tr.fit_transform(X).toarray()
  378. assert (tfidf >= 0).all()
  379. # check normalization
  380. assert_array_almost_equal((tfidf**2).sum(axis=1), [1.0, 1.0, 1.0])
  381. # this is robust to features with only zeros
  382. X = [[1, 1, 0], [1, 1, 0], [1, 0, 0]]
  383. tr = TfidfTransformer(smooth_idf=True, norm="l2")
  384. tfidf = tr.fit_transform(X).toarray()
  385. assert (tfidf >= 0).all()
  386. def test_tfidf_no_smoothing():
  387. X = [[1, 1, 1], [1, 1, 0], [1, 0, 0]]
  388. tr = TfidfTransformer(smooth_idf=False, norm="l2")
  389. tfidf = tr.fit_transform(X).toarray()
  390. assert (tfidf >= 0).all()
  391. # check normalization
  392. assert_array_almost_equal((tfidf**2).sum(axis=1), [1.0, 1.0, 1.0])
  393. # the lack of smoothing make IDF fragile in the presence of feature with
  394. # only zeros
  395. X = [[1, 1, 0], [1, 1, 0], [1, 0, 0]]
  396. tr = TfidfTransformer(smooth_idf=False, norm="l2")
  397. in_warning_message = "divide by zero"
  398. with pytest.warns(RuntimeWarning, match=in_warning_message):
  399. tr.fit_transform(X).toarray()
  400. def test_sublinear_tf():
  401. X = [[1], [2], [3]]
  402. tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
  403. tfidf = tr.fit_transform(X).toarray()
  404. assert tfidf[0] == 1
  405. assert tfidf[1] > tfidf[0]
  406. assert tfidf[2] > tfidf[1]
  407. assert tfidf[1] < 2
  408. assert tfidf[2] < 3
  409. def test_vectorizer():
  410. # raw documents as an iterator
  411. train_data = iter(ALL_FOOD_DOCS[:-1])
  412. test_data = [ALL_FOOD_DOCS[-1]]
  413. n_train = len(ALL_FOOD_DOCS) - 1
  414. # test without vocabulary
  415. v1 = CountVectorizer(max_df=0.5)
  416. counts_train = v1.fit_transform(train_data)
  417. if hasattr(counts_train, "tocsr"):
  418. counts_train = counts_train.tocsr()
  419. assert counts_train[0, v1.vocabulary_["pizza"]] == 2
  420. # build a vectorizer v1 with the same vocabulary as the one fitted by v1
  421. v2 = CountVectorizer(vocabulary=v1.vocabulary_)
  422. # compare that the two vectorizer give the same output on the test sample
  423. for v in (v1, v2):
  424. counts_test = v.transform(test_data)
  425. if hasattr(counts_test, "tocsr"):
  426. counts_test = counts_test.tocsr()
  427. vocabulary = v.vocabulary_
  428. assert counts_test[0, vocabulary["salad"]] == 1
  429. assert counts_test[0, vocabulary["tomato"]] == 1
  430. assert counts_test[0, vocabulary["water"]] == 1
  431. # stop word from the fixed list
  432. assert "the" not in vocabulary
  433. # stop word found automatically by the vectorizer DF thresholding
  434. # words that are high frequent across the complete corpus are likely
  435. # to be not informative (either real stop words of extraction
  436. # artifacts)
  437. assert "copyright" not in vocabulary
  438. # not present in the sample
  439. assert counts_test[0, vocabulary["coke"]] == 0
  440. assert counts_test[0, vocabulary["burger"]] == 0
  441. assert counts_test[0, vocabulary["beer"]] == 0
  442. assert counts_test[0, vocabulary["pizza"]] == 0
  443. # test tf-idf
  444. t1 = TfidfTransformer(norm="l1")
  445. tfidf = t1.fit(counts_train).transform(counts_train).toarray()
  446. assert len(t1.idf_) == len(v1.vocabulary_)
  447. assert tfidf.shape == (n_train, len(v1.vocabulary_))
  448. # test tf-idf with new data
  449. tfidf_test = t1.transform(counts_test).toarray()
  450. assert tfidf_test.shape == (len(test_data), len(v1.vocabulary_))
  451. # test tf alone
  452. t2 = TfidfTransformer(norm="l1", use_idf=False)
  453. tf = t2.fit(counts_train).transform(counts_train).toarray()
  454. assert not hasattr(t2, "idf_")
  455. # test idf transform with unlearned idf vector
  456. t3 = TfidfTransformer(use_idf=True)
  457. with pytest.raises(ValueError):
  458. t3.transform(counts_train)
  459. # L1-normalized term frequencies sum to one
  460. assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
  461. # test the direct tfidf vectorizer
  462. # (equivalent to term count vectorizer + tfidf transformer)
  463. train_data = iter(ALL_FOOD_DOCS[:-1])
  464. tv = TfidfVectorizer(norm="l1")
  465. tv.max_df = v1.max_df
  466. tfidf2 = tv.fit_transform(train_data).toarray()
  467. assert not tv.fixed_vocabulary_
  468. assert_array_almost_equal(tfidf, tfidf2)
  469. # test the direct tfidf vectorizer with new data
  470. tfidf_test2 = tv.transform(test_data).toarray()
  471. assert_array_almost_equal(tfidf_test, tfidf_test2)
  472. # test transform on unfitted vectorizer with empty vocabulary
  473. v3 = CountVectorizer(vocabulary=None)
  474. with pytest.raises(ValueError):
  475. v3.transform(train_data)
  476. # ascii preprocessor?
  477. v3.set_params(strip_accents="ascii", lowercase=False)
  478. processor = v3.build_preprocessor()
  479. text = "J'ai mangé du kangourou ce midi, c'était pas très bon."
  480. expected = strip_accents_ascii(text)
  481. result = processor(text)
  482. assert expected == result
  483. # error on bad strip_accents param
  484. v3.set_params(strip_accents="_gabbledegook_", preprocessor=None)
  485. with pytest.raises(ValueError):
  486. v3.build_preprocessor()
  487. # error with bad analyzer type
  488. v3.set_params = "_invalid_analyzer_type_"
  489. with pytest.raises(ValueError):
  490. v3.build_analyzer()
  491. def test_tfidf_vectorizer_setters():
  492. norm, use_idf, smooth_idf, sublinear_tf = "l2", False, False, False
  493. tv = TfidfVectorizer(
  494. norm=norm, use_idf=use_idf, smooth_idf=smooth_idf, sublinear_tf=sublinear_tf
  495. )
  496. tv.fit(JUNK_FOOD_DOCS)
  497. assert tv._tfidf.norm == norm
  498. assert tv._tfidf.use_idf == use_idf
  499. assert tv._tfidf.smooth_idf == smooth_idf
  500. assert tv._tfidf.sublinear_tf == sublinear_tf
  501. # assigning value to `TfidfTransformer` should not have any effect until
  502. # fitting
  503. tv.norm = "l1"
  504. tv.use_idf = True
  505. tv.smooth_idf = True
  506. tv.sublinear_tf = True
  507. assert tv._tfidf.norm == norm
  508. assert tv._tfidf.use_idf == use_idf
  509. assert tv._tfidf.smooth_idf == smooth_idf
  510. assert tv._tfidf.sublinear_tf == sublinear_tf
  511. tv.fit(JUNK_FOOD_DOCS)
  512. assert tv._tfidf.norm == tv.norm
  513. assert tv._tfidf.use_idf == tv.use_idf
  514. assert tv._tfidf.smooth_idf == tv.smooth_idf
  515. assert tv._tfidf.sublinear_tf == tv.sublinear_tf
  516. @fails_if_pypy
  517. def test_hashing_vectorizer():
  518. v = HashingVectorizer()
  519. X = v.transform(ALL_FOOD_DOCS)
  520. token_nnz = X.nnz
  521. assert X.shape == (len(ALL_FOOD_DOCS), v.n_features)
  522. assert X.dtype == v.dtype
  523. # By default the hashed values receive a random sign and l2 normalization
  524. # makes the feature values bounded
  525. assert np.min(X.data) > -1
  526. assert np.min(X.data) < 0
  527. assert np.max(X.data) > 0
  528. assert np.max(X.data) < 1
  529. # Check that the rows are normalized
  530. for i in range(X.shape[0]):
  531. assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
  532. # Check vectorization with some non-default parameters
  533. v = HashingVectorizer(ngram_range=(1, 2), norm="l1")
  534. X = v.transform(ALL_FOOD_DOCS)
  535. assert X.shape == (len(ALL_FOOD_DOCS), v.n_features)
  536. assert X.dtype == v.dtype
  537. # ngrams generate more non zeros
  538. ngrams_nnz = X.nnz
  539. assert ngrams_nnz > token_nnz
  540. assert ngrams_nnz < 2 * token_nnz
  541. # makes the feature values bounded
  542. assert np.min(X.data) > -1
  543. assert np.max(X.data) < 1
  544. # Check that the rows are normalized
  545. for i in range(X.shape[0]):
  546. assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
  547. def test_feature_names():
  548. cv = CountVectorizer(max_df=0.5)
  549. # test for Value error on unfitted/empty vocabulary
  550. with pytest.raises(ValueError):
  551. cv.get_feature_names_out()
  552. assert not cv.fixed_vocabulary_
  553. # test for vocabulary learned from data
  554. X = cv.fit_transform(ALL_FOOD_DOCS)
  555. n_samples, n_features = X.shape
  556. assert len(cv.vocabulary_) == n_features
  557. feature_names = cv.get_feature_names_out()
  558. assert isinstance(feature_names, np.ndarray)
  559. assert feature_names.dtype == object
  560. assert len(feature_names) == n_features
  561. assert_array_equal(
  562. [
  563. "beer",
  564. "burger",
  565. "celeri",
  566. "coke",
  567. "pizza",
  568. "salad",
  569. "sparkling",
  570. "tomato",
  571. "water",
  572. ],
  573. feature_names,
  574. )
  575. for idx, name in enumerate(feature_names):
  576. assert idx == cv.vocabulary_.get(name)
  577. # test for custom vocabulary
  578. vocab = [
  579. "beer",
  580. "burger",
  581. "celeri",
  582. "coke",
  583. "pizza",
  584. "salad",
  585. "sparkling",
  586. "tomato",
  587. "water",
  588. ]
  589. cv = CountVectorizer(vocabulary=vocab)
  590. feature_names = cv.get_feature_names_out()
  591. assert_array_equal(
  592. [
  593. "beer",
  594. "burger",
  595. "celeri",
  596. "coke",
  597. "pizza",
  598. "salad",
  599. "sparkling",
  600. "tomato",
  601. "water",
  602. ],
  603. feature_names,
  604. )
  605. assert cv.fixed_vocabulary_
  606. for idx, name in enumerate(feature_names):
  607. assert idx == cv.vocabulary_.get(name)
  608. @pytest.mark.parametrize("Vectorizer", (CountVectorizer, TfidfVectorizer))
  609. def test_vectorizer_max_features(Vectorizer):
  610. expected_vocabulary = {"burger", "beer", "salad", "pizza"}
  611. expected_stop_words = {
  612. "celeri",
  613. "tomato",
  614. "copyright",
  615. "coke",
  616. "sparkling",
  617. "water",
  618. "the",
  619. }
  620. # test bounded number of extracted features
  621. vectorizer = Vectorizer(max_df=0.6, max_features=4)
  622. vectorizer.fit(ALL_FOOD_DOCS)
  623. assert set(vectorizer.vocabulary_) == expected_vocabulary
  624. assert vectorizer.stop_words_ == expected_stop_words
  625. def test_count_vectorizer_max_features():
  626. # Regression test: max_features didn't work correctly in 0.14.
  627. cv_1 = CountVectorizer(max_features=1)
  628. cv_3 = CountVectorizer(max_features=3)
  629. cv_None = CountVectorizer(max_features=None)
  630. counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
  631. counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
  632. counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
  633. features_1 = cv_1.get_feature_names_out()
  634. features_3 = cv_3.get_feature_names_out()
  635. features_None = cv_None.get_feature_names_out()
  636. # The most common feature is "the", with frequency 7.
  637. assert 7 == counts_1.max()
  638. assert 7 == counts_3.max()
  639. assert 7 == counts_None.max()
  640. # The most common feature should be the same
  641. assert "the" == features_1[np.argmax(counts_1)]
  642. assert "the" == features_3[np.argmax(counts_3)]
  643. assert "the" == features_None[np.argmax(counts_None)]
  644. def test_vectorizer_max_df():
  645. test_data = ["abc", "dea", "eat"]
  646. vect = CountVectorizer(analyzer="char", max_df=1.0)
  647. vect.fit(test_data)
  648. assert "a" in vect.vocabulary_.keys()
  649. assert len(vect.vocabulary_.keys()) == 6
  650. assert len(vect.stop_words_) == 0
  651. vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
  652. vect.fit(test_data)
  653. assert "a" not in vect.vocabulary_.keys() # {ae} ignored
  654. assert len(vect.vocabulary_.keys()) == 4 # {bcdt} remain
  655. assert "a" in vect.stop_words_
  656. assert len(vect.stop_words_) == 2
  657. vect.max_df = 1
  658. vect.fit(test_data)
  659. assert "a" not in vect.vocabulary_.keys() # {ae} ignored
  660. assert len(vect.vocabulary_.keys()) == 4 # {bcdt} remain
  661. assert "a" in vect.stop_words_
  662. assert len(vect.stop_words_) == 2
  663. def test_vectorizer_min_df():
  664. test_data = ["abc", "dea", "eat"]
  665. vect = CountVectorizer(analyzer="char", min_df=1)
  666. vect.fit(test_data)
  667. assert "a" in vect.vocabulary_.keys()
  668. assert len(vect.vocabulary_.keys()) == 6
  669. assert len(vect.stop_words_) == 0
  670. vect.min_df = 2
  671. vect.fit(test_data)
  672. assert "c" not in vect.vocabulary_.keys() # {bcdt} ignored
  673. assert len(vect.vocabulary_.keys()) == 2 # {ae} remain
  674. assert "c" in vect.stop_words_
  675. assert len(vect.stop_words_) == 4
  676. vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
  677. vect.fit(test_data)
  678. assert "c" not in vect.vocabulary_.keys() # {bcdet} ignored
  679. assert len(vect.vocabulary_.keys()) == 1 # {a} remains
  680. assert "c" in vect.stop_words_
  681. assert len(vect.stop_words_) == 5
  682. def test_count_binary_occurrences():
  683. # by default multiple occurrences are counted as longs
  684. test_data = ["aaabc", "abbde"]
  685. vect = CountVectorizer(analyzer="char", max_df=1.0)
  686. X = vect.fit_transform(test_data).toarray()
  687. assert_array_equal(["a", "b", "c", "d", "e"], vect.get_feature_names_out())
  688. assert_array_equal([[3, 1, 1, 0, 0], [1, 2, 0, 1, 1]], X)
  689. # using boolean features, we can fetch the binary occurrence info
  690. # instead.
  691. vect = CountVectorizer(analyzer="char", max_df=1.0, binary=True)
  692. X = vect.fit_transform(test_data).toarray()
  693. assert_array_equal([[1, 1, 1, 0, 0], [1, 1, 0, 1, 1]], X)
  694. # check the ability to change the dtype
  695. vect = CountVectorizer(analyzer="char", max_df=1.0, binary=True, dtype=np.float32)
  696. X_sparse = vect.fit_transform(test_data)
  697. assert X_sparse.dtype == np.float32
  698. @fails_if_pypy
  699. def test_hashed_binary_occurrences():
  700. # by default multiple occurrences are counted as longs
  701. test_data = ["aaabc", "abbde"]
  702. vect = HashingVectorizer(alternate_sign=False, analyzer="char", norm=None)
  703. X = vect.transform(test_data)
  704. assert np.max(X[0:1].data) == 3
  705. assert np.max(X[1:2].data) == 2
  706. assert X.dtype == np.float64
  707. # using boolean features, we can fetch the binary occurrence info
  708. # instead.
  709. vect = HashingVectorizer(
  710. analyzer="char", alternate_sign=False, binary=True, norm=None
  711. )
  712. X = vect.transform(test_data)
  713. assert np.max(X.data) == 1
  714. assert X.dtype == np.float64
  715. # check the ability to change the dtype
  716. vect = HashingVectorizer(
  717. analyzer="char", alternate_sign=False, binary=True, norm=None, dtype=np.float64
  718. )
  719. X = vect.transform(test_data)
  720. assert X.dtype == np.float64
  721. @pytest.mark.parametrize("Vectorizer", (CountVectorizer, TfidfVectorizer))
  722. def test_vectorizer_inverse_transform(Vectorizer):
  723. # raw documents
  724. data = ALL_FOOD_DOCS
  725. vectorizer = Vectorizer()
  726. transformed_data = vectorizer.fit_transform(data)
  727. inversed_data = vectorizer.inverse_transform(transformed_data)
  728. assert isinstance(inversed_data, list)
  729. analyze = vectorizer.build_analyzer()
  730. for doc, inversed_terms in zip(data, inversed_data):
  731. terms = np.sort(np.unique(analyze(doc)))
  732. inversed_terms = np.sort(np.unique(inversed_terms))
  733. assert_array_equal(terms, inversed_terms)
  734. assert sparse.issparse(transformed_data)
  735. assert transformed_data.format == "csr"
  736. # Test that inverse_transform also works with numpy arrays and
  737. # scipy
  738. transformed_data2 = transformed_data.toarray()
  739. inversed_data2 = vectorizer.inverse_transform(transformed_data2)
  740. for terms, terms2 in zip(inversed_data, inversed_data2):
  741. assert_array_equal(np.sort(terms), np.sort(terms2))
  742. # Check that inverse_transform also works on non CSR sparse data:
  743. transformed_data3 = transformed_data.tocsc()
  744. inversed_data3 = vectorizer.inverse_transform(transformed_data3)
  745. for terms, terms3 in zip(inversed_data, inversed_data3):
  746. assert_array_equal(np.sort(terms), np.sort(terms3))
  747. def test_count_vectorizer_pipeline_grid_selection():
  748. # raw documents
  749. data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
  750. # label junk food as -1, the others as +1
  751. target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
  752. # split the dataset for model development and final evaluation
  753. train_data, test_data, target_train, target_test = train_test_split(
  754. data, target, test_size=0.2, random_state=0
  755. )
  756. pipeline = Pipeline([("vect", CountVectorizer()), ("svc", LinearSVC(dual="auto"))])
  757. parameters = {
  758. "vect__ngram_range": [(1, 1), (1, 2)],
  759. "svc__loss": ("hinge", "squared_hinge"),
  760. }
  761. # find the best parameters for both the feature extraction and the
  762. # classifier
  763. grid_search = GridSearchCV(pipeline, parameters, n_jobs=1, cv=3)
  764. # Check that the best model found by grid search is 100% correct on the
  765. # held out evaluation set.
  766. pred = grid_search.fit(train_data, target_train).predict(test_data)
  767. assert_array_equal(pred, target_test)
  768. # on this toy dataset bigram representation which is used in the last of
  769. # the grid_search is considered the best estimator since they all converge
  770. # to 100% accuracy models
  771. assert grid_search.best_score_ == 1.0
  772. best_vectorizer = grid_search.best_estimator_.named_steps["vect"]
  773. assert best_vectorizer.ngram_range == (1, 1)
  774. def test_vectorizer_pipeline_grid_selection():
  775. # raw documents
  776. data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
  777. # label junk food as -1, the others as +1
  778. target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
  779. # split the dataset for model development and final evaluation
  780. train_data, test_data, target_train, target_test = train_test_split(
  781. data, target, test_size=0.1, random_state=0
  782. )
  783. pipeline = Pipeline([("vect", TfidfVectorizer()), ("svc", LinearSVC(dual="auto"))])
  784. parameters = {
  785. "vect__ngram_range": [(1, 1), (1, 2)],
  786. "vect__norm": ("l1", "l2"),
  787. "svc__loss": ("hinge", "squared_hinge"),
  788. }
  789. # find the best parameters for both the feature extraction and the
  790. # classifier
  791. grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
  792. # Check that the best model found by grid search is 100% correct on the
  793. # held out evaluation set.
  794. pred = grid_search.fit(train_data, target_train).predict(test_data)
  795. assert_array_equal(pred, target_test)
  796. # on this toy dataset bigram representation which is used in the last of
  797. # the grid_search is considered the best estimator since they all converge
  798. # to 100% accuracy models
  799. assert grid_search.best_score_ == 1.0
  800. best_vectorizer = grid_search.best_estimator_.named_steps["vect"]
  801. assert best_vectorizer.ngram_range == (1, 1)
  802. assert best_vectorizer.norm == "l2"
  803. assert not best_vectorizer.fixed_vocabulary_
  804. def test_vectorizer_pipeline_cross_validation():
  805. # raw documents
  806. data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
  807. # label junk food as -1, the others as +1
  808. target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
  809. pipeline = Pipeline([("vect", TfidfVectorizer()), ("svc", LinearSVC(dual="auto"))])
  810. cv_scores = cross_val_score(pipeline, data, target, cv=3)
  811. assert_array_equal(cv_scores, [1.0, 1.0, 1.0])
  812. @fails_if_pypy
  813. def test_vectorizer_unicode():
  814. # tests that the count vectorizer works with cyrillic.
  815. document = (
  816. "Машинное обучение — обширный подраздел искусственного "
  817. "интеллекта, изучающий методы построения алгоритмов, "
  818. "способных обучаться."
  819. )
  820. vect = CountVectorizer()
  821. X_counted = vect.fit_transform([document])
  822. assert X_counted.shape == (1, 12)
  823. vect = HashingVectorizer(norm=None, alternate_sign=False)
  824. X_hashed = vect.transform([document])
  825. assert X_hashed.shape == (1, 2**20)
  826. # No collisions on such a small dataset
  827. assert X_counted.nnz == X_hashed.nnz
  828. # When norm is None and not alternate_sign, the tokens are counted up to
  829. # collisions
  830. assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
  831. def test_tfidf_vectorizer_with_fixed_vocabulary():
  832. # non regression smoke test for inheritance issues
  833. vocabulary = ["pizza", "celeri"]
  834. vect = TfidfVectorizer(vocabulary=vocabulary)
  835. X_1 = vect.fit_transform(ALL_FOOD_DOCS)
  836. X_2 = vect.transform(ALL_FOOD_DOCS)
  837. assert_array_almost_equal(X_1.toarray(), X_2.toarray())
  838. assert vect.fixed_vocabulary_
  839. def test_pickling_vectorizer():
  840. instances = [
  841. HashingVectorizer(),
  842. HashingVectorizer(norm="l1"),
  843. HashingVectorizer(binary=True),
  844. HashingVectorizer(ngram_range=(1, 2)),
  845. CountVectorizer(),
  846. CountVectorizer(preprocessor=strip_tags),
  847. CountVectorizer(analyzer=lazy_analyze),
  848. CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
  849. CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
  850. TfidfVectorizer(),
  851. TfidfVectorizer(analyzer=lazy_analyze),
  852. TfidfVectorizer().fit(JUNK_FOOD_DOCS),
  853. ]
  854. for orig in instances:
  855. s = pickle.dumps(orig)
  856. copy = pickle.loads(s)
  857. assert type(copy) == orig.__class__
  858. assert copy.get_params() == orig.get_params()
  859. if IS_PYPY and isinstance(orig, HashingVectorizer):
  860. continue
  861. else:
  862. assert_allclose_dense_sparse(
  863. copy.fit_transform(JUNK_FOOD_DOCS),
  864. orig.fit_transform(JUNK_FOOD_DOCS),
  865. )
  866. @pytest.mark.parametrize(
  867. "factory",
  868. [
  869. CountVectorizer.build_analyzer,
  870. CountVectorizer.build_preprocessor,
  871. CountVectorizer.build_tokenizer,
  872. ],
  873. )
  874. def test_pickling_built_processors(factory):
  875. """Tokenizers cannot be pickled
  876. https://github.com/scikit-learn/scikit-learn/issues/12833
  877. """
  878. vec = CountVectorizer()
  879. function = factory(vec)
  880. text = "J'ai mangé du kangourou ce midi, c'était pas très bon."
  881. roundtripped_function = pickle.loads(pickle.dumps(function))
  882. expected = function(text)
  883. result = roundtripped_function(text)
  884. assert result == expected
  885. def test_countvectorizer_vocab_sets_when_pickling():
  886. # ensure that vocabulary of type set is coerced to a list to
  887. # preserve iteration ordering after deserialization
  888. rng = np.random.RandomState(0)
  889. vocab_words = np.array(
  890. [
  891. "beer",
  892. "burger",
  893. "celeri",
  894. "coke",
  895. "pizza",
  896. "salad",
  897. "sparkling",
  898. "tomato",
  899. "water",
  900. ]
  901. )
  902. for x in range(0, 100):
  903. vocab_set = set(rng.choice(vocab_words, size=5, replace=False))
  904. cv = CountVectorizer(vocabulary=vocab_set)
  905. unpickled_cv = pickle.loads(pickle.dumps(cv))
  906. cv.fit(ALL_FOOD_DOCS)
  907. unpickled_cv.fit(ALL_FOOD_DOCS)
  908. assert_array_equal(
  909. cv.get_feature_names_out(), unpickled_cv.get_feature_names_out()
  910. )
  911. def test_countvectorizer_vocab_dicts_when_pickling():
  912. rng = np.random.RandomState(0)
  913. vocab_words = np.array(
  914. [
  915. "beer",
  916. "burger",
  917. "celeri",
  918. "coke",
  919. "pizza",
  920. "salad",
  921. "sparkling",
  922. "tomato",
  923. "water",
  924. ]
  925. )
  926. for x in range(0, 100):
  927. vocab_dict = dict()
  928. words = rng.choice(vocab_words, size=5, replace=False)
  929. for y in range(0, 5):
  930. vocab_dict[words[y]] = y
  931. cv = CountVectorizer(vocabulary=vocab_dict)
  932. unpickled_cv = pickle.loads(pickle.dumps(cv))
  933. cv.fit(ALL_FOOD_DOCS)
  934. unpickled_cv.fit(ALL_FOOD_DOCS)
  935. assert_array_equal(
  936. cv.get_feature_names_out(), unpickled_cv.get_feature_names_out()
  937. )
  938. def test_stop_words_removal():
  939. # Ensure that deleting the stop_words_ attribute doesn't affect transform
  940. fitted_vectorizers = (
  941. TfidfVectorizer().fit(JUNK_FOOD_DOCS),
  942. CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
  943. CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
  944. )
  945. for vect in fitted_vectorizers:
  946. vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
  947. vect.stop_words_ = None
  948. stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
  949. delattr(vect, "stop_words_")
  950. stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
  951. assert_array_equal(stop_None_transform, vect_transform)
  952. assert_array_equal(stop_del_transform, vect_transform)
  953. def test_pickling_transformer():
  954. X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
  955. orig = TfidfTransformer().fit(X)
  956. s = pickle.dumps(orig)
  957. copy = pickle.loads(s)
  958. assert type(copy) == orig.__class__
  959. assert_array_equal(copy.fit_transform(X).toarray(), orig.fit_transform(X).toarray())
  960. def test_transformer_idf_setter():
  961. X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
  962. orig = TfidfTransformer().fit(X)
  963. copy = TfidfTransformer()
  964. copy.idf_ = orig.idf_
  965. assert_array_equal(copy.transform(X).toarray(), orig.transform(X).toarray())
  966. def test_tfidf_vectorizer_setter():
  967. orig = TfidfVectorizer(use_idf=True)
  968. orig.fit(JUNK_FOOD_DOCS)
  969. copy = TfidfVectorizer(vocabulary=orig.vocabulary_, use_idf=True)
  970. copy.idf_ = orig.idf_
  971. assert_array_equal(
  972. copy.transform(JUNK_FOOD_DOCS).toarray(),
  973. orig.transform(JUNK_FOOD_DOCS).toarray(),
  974. )
  975. # `idf_` cannot be set with `use_idf=False`
  976. copy = TfidfVectorizer(vocabulary=orig.vocabulary_, use_idf=False)
  977. err_msg = "`idf_` cannot be set when `user_idf=False`."
  978. with pytest.raises(ValueError, match=err_msg):
  979. copy.idf_ = orig.idf_
  980. def test_tfidfvectorizer_invalid_idf_attr():
  981. vect = TfidfVectorizer(use_idf=True)
  982. vect.fit(JUNK_FOOD_DOCS)
  983. copy = TfidfVectorizer(vocabulary=vect.vocabulary_, use_idf=True)
  984. expected_idf_len = len(vect.idf_)
  985. invalid_idf = [1.0] * (expected_idf_len + 1)
  986. with pytest.raises(ValueError):
  987. setattr(copy, "idf_", invalid_idf)
  988. def test_non_unique_vocab():
  989. vocab = ["a", "b", "c", "a", "a"]
  990. vect = CountVectorizer(vocabulary=vocab)
  991. with pytest.raises(ValueError):
  992. vect.fit([])
  993. @fails_if_pypy
  994. def test_hashingvectorizer_nan_in_docs():
  995. # np.nan can appear when using pandas to load text fields from a csv file
  996. # with missing values.
  997. message = "np.nan is an invalid document, expected byte or unicode string."
  998. exception = ValueError
  999. def func():
  1000. hv = HashingVectorizer()
  1001. hv.fit_transform(["hello world", np.nan, "hello hello"])
  1002. with pytest.raises(exception, match=message):
  1003. func()
  1004. def test_tfidfvectorizer_binary():
  1005. # Non-regression test: TfidfVectorizer used to ignore its "binary" param.
  1006. v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
  1007. assert v.binary
  1008. X = v.fit_transform(["hello world", "hello hello"]).toarray()
  1009. assert_array_equal(X.ravel(), [1, 1, 1, 0])
  1010. X2 = v.transform(["hello world", "hello hello"]).toarray()
  1011. assert_array_equal(X2.ravel(), [1, 1, 1, 0])
  1012. def test_tfidfvectorizer_export_idf():
  1013. vect = TfidfVectorizer(use_idf=True)
  1014. vect.fit(JUNK_FOOD_DOCS)
  1015. assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
  1016. def test_vectorizer_vocab_clone():
  1017. vect_vocab = TfidfVectorizer(vocabulary=["the"])
  1018. vect_vocab_clone = clone(vect_vocab)
  1019. vect_vocab.fit(ALL_FOOD_DOCS)
  1020. vect_vocab_clone.fit(ALL_FOOD_DOCS)
  1021. assert vect_vocab_clone.vocabulary_ == vect_vocab.vocabulary_
  1022. @pytest.mark.parametrize(
  1023. "Vectorizer", (CountVectorizer, TfidfVectorizer, HashingVectorizer)
  1024. )
  1025. def test_vectorizer_string_object_as_input(Vectorizer):
  1026. message = "Iterable over raw text documents expected, string object received."
  1027. vec = Vectorizer()
  1028. with pytest.raises(ValueError, match=message):
  1029. vec.fit_transform("hello world!")
  1030. with pytest.raises(ValueError, match=message):
  1031. vec.fit("hello world!")
  1032. vec.fit(["some text", "some other text"])
  1033. with pytest.raises(ValueError, match=message):
  1034. vec.transform("hello world!")
  1035. @pytest.mark.parametrize("X_dtype", [np.float32, np.float64])
  1036. def test_tfidf_transformer_type(X_dtype):
  1037. X = sparse.rand(10, 20000, dtype=X_dtype, random_state=42)
  1038. X_trans = TfidfTransformer().fit_transform(X)
  1039. assert X_trans.dtype == X.dtype
  1040. def test_tfidf_transformer_sparse():
  1041. X = sparse.rand(10, 20000, dtype=np.float64, random_state=42)
  1042. X_csc = sparse.csc_matrix(X)
  1043. X_csr = sparse.csr_matrix(X)
  1044. X_trans_csc = TfidfTransformer().fit_transform(X_csc)
  1045. X_trans_csr = TfidfTransformer().fit_transform(X_csr)
  1046. assert_allclose_dense_sparse(X_trans_csc, X_trans_csr)
  1047. assert X_trans_csc.format == X_trans_csr.format
  1048. @pytest.mark.parametrize(
  1049. "vectorizer_dtype, output_dtype, warning_expected",
  1050. [
  1051. (np.int32, np.float64, True),
  1052. (np.int64, np.float64, True),
  1053. (np.float32, np.float32, False),
  1054. (np.float64, np.float64, False),
  1055. ],
  1056. )
  1057. def test_tfidf_vectorizer_type(vectorizer_dtype, output_dtype, warning_expected):
  1058. X = np.array(["numpy", "scipy", "sklearn"])
  1059. vectorizer = TfidfVectorizer(dtype=vectorizer_dtype)
  1060. warning_msg_match = "'dtype' should be used."
  1061. if warning_expected:
  1062. with pytest.warns(UserWarning, match=warning_msg_match):
  1063. X_idf = vectorizer.fit_transform(X)
  1064. else:
  1065. with warnings.catch_warnings():
  1066. warnings.simplefilter("error", UserWarning)
  1067. X_idf = vectorizer.fit_transform(X)
  1068. assert X_idf.dtype == output_dtype
  1069. @pytest.mark.parametrize(
  1070. "vec",
  1071. [
  1072. HashingVectorizer(ngram_range=(2, 1)),
  1073. CountVectorizer(ngram_range=(2, 1)),
  1074. TfidfVectorizer(ngram_range=(2, 1)),
  1075. ],
  1076. )
  1077. def test_vectorizers_invalid_ngram_range(vec):
  1078. # vectorizers could be initialized with invalid ngram range
  1079. # test for raising error message
  1080. invalid_range = vec.ngram_range
  1081. message = re.escape(
  1082. f"Invalid value for ngram_range={invalid_range} "
  1083. "lower boundary larger than the upper boundary."
  1084. )
  1085. if isinstance(vec, HashingVectorizer) and IS_PYPY:
  1086. pytest.xfail(reason="HashingVectorizer is not supported on PyPy")
  1087. with pytest.raises(ValueError, match=message):
  1088. vec.fit(["good news everyone"])
  1089. with pytest.raises(ValueError, match=message):
  1090. vec.fit_transform(["good news everyone"])
  1091. if isinstance(vec, HashingVectorizer):
  1092. with pytest.raises(ValueError, match=message):
  1093. vec.transform(["good news everyone"])
  1094. def _check_stop_words_consistency(estimator):
  1095. stop_words = estimator.get_stop_words()
  1096. tokenize = estimator.build_tokenizer()
  1097. preprocess = estimator.build_preprocessor()
  1098. return estimator._check_stop_words_consistency(stop_words, preprocess, tokenize)
  1099. @fails_if_pypy
  1100. def test_vectorizer_stop_words_inconsistent():
  1101. lstr = r"\['and', 'll', 've'\]"
  1102. message = (
  1103. "Your stop_words may be inconsistent with your "
  1104. "preprocessing. Tokenizing the stop words generated "
  1105. "tokens %s not in stop_words." % lstr
  1106. )
  1107. for vec in [CountVectorizer(), TfidfVectorizer(), HashingVectorizer()]:
  1108. vec.set_params(stop_words=["you've", "you", "you'll", "AND"])
  1109. with pytest.warns(UserWarning, match=message):
  1110. vec.fit_transform(["hello world"])
  1111. # reset stop word validation
  1112. del vec._stop_words_id
  1113. assert _check_stop_words_consistency(vec) is False
  1114. # Only one warning per stop list
  1115. with warnings.catch_warnings():
  1116. warnings.simplefilter("error", UserWarning)
  1117. vec.fit_transform(["hello world"])
  1118. assert _check_stop_words_consistency(vec) is None
  1119. # Test caching of inconsistency assessment
  1120. vec.set_params(stop_words=["you've", "you", "you'll", "blah", "AND"])
  1121. with pytest.warns(UserWarning, match=message):
  1122. vec.fit_transform(["hello world"])
  1123. @skip_if_32bit
  1124. def test_countvectorizer_sort_features_64bit_sparse_indices():
  1125. """
  1126. Check that CountVectorizer._sort_features preserves the dtype of its sparse
  1127. feature matrix.
  1128. This test is skipped on 32bit platforms, see:
  1129. https://github.com/scikit-learn/scikit-learn/pull/11295
  1130. for more details.
  1131. """
  1132. X = sparse.csr_matrix((5, 5), dtype=np.int64)
  1133. # force indices and indptr to int64.
  1134. INDICES_DTYPE = np.int64
  1135. X.indices = X.indices.astype(INDICES_DTYPE)
  1136. X.indptr = X.indptr.astype(INDICES_DTYPE)
  1137. vocabulary = {"scikit-learn": 0, "is": 1, "great!": 2}
  1138. Xs = CountVectorizer()._sort_features(X, vocabulary)
  1139. assert INDICES_DTYPE == Xs.indices.dtype
  1140. @fails_if_pypy
  1141. @pytest.mark.parametrize(
  1142. "Estimator", [CountVectorizer, TfidfVectorizer, HashingVectorizer]
  1143. )
  1144. def test_stop_word_validation_custom_preprocessor(Estimator):
  1145. data = [{"text": "some text"}]
  1146. vec = Estimator()
  1147. assert _check_stop_words_consistency(vec) is True
  1148. vec = Estimator(preprocessor=lambda x: x["text"], stop_words=["and"])
  1149. assert _check_stop_words_consistency(vec) == "error"
  1150. # checks are cached
  1151. assert _check_stop_words_consistency(vec) is None
  1152. vec.fit_transform(data)
  1153. class CustomEstimator(Estimator):
  1154. def build_preprocessor(self):
  1155. return lambda x: x["text"]
  1156. vec = CustomEstimator(stop_words=["and"])
  1157. assert _check_stop_words_consistency(vec) == "error"
  1158. vec = Estimator(
  1159. tokenizer=lambda doc: re.compile(r"\w{1,}").findall(doc), stop_words=["and"]
  1160. )
  1161. assert _check_stop_words_consistency(vec) is True
  1162. @pytest.mark.parametrize(
  1163. "Estimator", [CountVectorizer, TfidfVectorizer, HashingVectorizer]
  1164. )
  1165. @pytest.mark.parametrize(
  1166. "input_type, err_type, err_msg",
  1167. [
  1168. ("filename", FileNotFoundError, ""),
  1169. ("file", AttributeError, "'str' object has no attribute 'read'"),
  1170. ],
  1171. )
  1172. def test_callable_analyzer_error(Estimator, input_type, err_type, err_msg):
  1173. if issubclass(Estimator, HashingVectorizer) and IS_PYPY:
  1174. pytest.xfail("HashingVectorizer is not supported on PyPy")
  1175. data = ["this is text, not file or filename"]
  1176. with pytest.raises(err_type, match=err_msg):
  1177. Estimator(analyzer=lambda x: x.split(), input=input_type).fit_transform(data)
  1178. @pytest.mark.parametrize(
  1179. "Estimator",
  1180. [
  1181. CountVectorizer,
  1182. TfidfVectorizer,
  1183. pytest.param(HashingVectorizer, marks=fails_if_pypy),
  1184. ],
  1185. )
  1186. @pytest.mark.parametrize(
  1187. "analyzer", [lambda doc: open(doc, "r"), lambda doc: doc.read()]
  1188. )
  1189. @pytest.mark.parametrize("input_type", ["file", "filename"])
  1190. def test_callable_analyzer_change_behavior(Estimator, analyzer, input_type):
  1191. data = ["this is text, not file or filename"]
  1192. with pytest.raises((FileNotFoundError, AttributeError)):
  1193. Estimator(analyzer=analyzer, input=input_type).fit_transform(data)
  1194. @pytest.mark.parametrize(
  1195. "Estimator", [CountVectorizer, TfidfVectorizer, HashingVectorizer]
  1196. )
  1197. def test_callable_analyzer_reraise_error(tmpdir, Estimator):
  1198. # check if a custom exception from the analyzer is shown to the user
  1199. def analyzer(doc):
  1200. raise Exception("testing")
  1201. if issubclass(Estimator, HashingVectorizer) and IS_PYPY:
  1202. pytest.xfail("HashingVectorizer is not supported on PyPy")
  1203. f = tmpdir.join("file.txt")
  1204. f.write("sample content\n")
  1205. with pytest.raises(Exception, match="testing"):
  1206. Estimator(analyzer=analyzer, input="file").fit_transform([f])
  1207. @pytest.mark.parametrize(
  1208. "Vectorizer", [CountVectorizer, HashingVectorizer, TfidfVectorizer]
  1209. )
  1210. @pytest.mark.parametrize(
  1211. (
  1212. "stop_words, tokenizer, preprocessor, ngram_range, token_pattern,"
  1213. "analyzer, unused_name, ovrd_name, ovrd_msg"
  1214. ),
  1215. [
  1216. (
  1217. ["you've", "you'll"],
  1218. None,
  1219. None,
  1220. (1, 1),
  1221. None,
  1222. "char",
  1223. "'stop_words'",
  1224. "'analyzer'",
  1225. "!= 'word'",
  1226. ),
  1227. (
  1228. None,
  1229. lambda s: s.split(),
  1230. None,
  1231. (1, 1),
  1232. None,
  1233. "char",
  1234. "'tokenizer'",
  1235. "'analyzer'",
  1236. "!= 'word'",
  1237. ),
  1238. (
  1239. None,
  1240. lambda s: s.split(),
  1241. None,
  1242. (1, 1),
  1243. r"\w+",
  1244. "word",
  1245. "'token_pattern'",
  1246. "'tokenizer'",
  1247. "is not None",
  1248. ),
  1249. (
  1250. None,
  1251. None,
  1252. lambda s: s.upper(),
  1253. (1, 1),
  1254. r"\w+",
  1255. lambda s: s.upper(),
  1256. "'preprocessor'",
  1257. "'analyzer'",
  1258. "is callable",
  1259. ),
  1260. (
  1261. None,
  1262. None,
  1263. None,
  1264. (1, 2),
  1265. None,
  1266. lambda s: s.upper(),
  1267. "'ngram_range'",
  1268. "'analyzer'",
  1269. "is callable",
  1270. ),
  1271. (
  1272. None,
  1273. None,
  1274. None,
  1275. (1, 1),
  1276. r"\w+",
  1277. "char",
  1278. "'token_pattern'",
  1279. "'analyzer'",
  1280. "!= 'word'",
  1281. ),
  1282. ],
  1283. )
  1284. def test_unused_parameters_warn(
  1285. Vectorizer,
  1286. stop_words,
  1287. tokenizer,
  1288. preprocessor,
  1289. ngram_range,
  1290. token_pattern,
  1291. analyzer,
  1292. unused_name,
  1293. ovrd_name,
  1294. ovrd_msg,
  1295. ):
  1296. train_data = JUNK_FOOD_DOCS
  1297. # setting parameter and checking for corresponding warning messages
  1298. vect = Vectorizer()
  1299. vect.set_params(
  1300. stop_words=stop_words,
  1301. tokenizer=tokenizer,
  1302. preprocessor=preprocessor,
  1303. ngram_range=ngram_range,
  1304. token_pattern=token_pattern,
  1305. analyzer=analyzer,
  1306. )
  1307. msg = "The parameter %s will not be used since %s %s" % (
  1308. unused_name,
  1309. ovrd_name,
  1310. ovrd_msg,
  1311. )
  1312. with pytest.warns(UserWarning, match=msg):
  1313. vect.fit(train_data)
  1314. @pytest.mark.parametrize(
  1315. "Vectorizer, X",
  1316. (
  1317. (HashingVectorizer, [{"foo": 1, "bar": 2}, {"foo": 3, "baz": 1}]),
  1318. (CountVectorizer, JUNK_FOOD_DOCS),
  1319. ),
  1320. )
  1321. def test_n_features_in(Vectorizer, X):
  1322. # For vectorizers, n_features_in_ does not make sense
  1323. vectorizer = Vectorizer()
  1324. assert not hasattr(vectorizer, "n_features_in_")
  1325. vectorizer.fit(X)
  1326. assert not hasattr(vectorizer, "n_features_in_")
  1327. def test_tie_breaking_sample_order_invariance():
  1328. # Checks the sample order invariance when setting max_features
  1329. # non-regression test for #17939
  1330. vec = CountVectorizer(max_features=1)
  1331. vocab1 = vec.fit(["hello", "world"]).vocabulary_
  1332. vocab2 = vec.fit(["world", "hello"]).vocabulary_
  1333. assert vocab1 == vocab2
  1334. @fails_if_pypy
  1335. def test_nonnegative_hashing_vectorizer_result_indices():
  1336. # add test for pr 19035
  1337. hashing = HashingVectorizer(n_features=1000000, ngram_range=(2, 3))
  1338. indices = hashing.transform(["22pcs efuture"]).indices
  1339. assert indices[0] >= 0
  1340. @pytest.mark.parametrize(
  1341. "Estimator", [CountVectorizer, TfidfVectorizer, TfidfTransformer, HashingVectorizer]
  1342. )
  1343. def test_vectorizers_do_not_have_set_output(Estimator):
  1344. """Check that vectorizers do not define set_output."""
  1345. est = Estimator()
  1346. assert not hasattr(est, "set_output")