testing_utils.py 88 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633
  1. # Copyright 2020 The HuggingFace Team. All rights reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import collections
  15. import contextlib
  16. import doctest
  17. import functools
  18. import importlib
  19. import inspect
  20. import logging
  21. import multiprocessing
  22. import os
  23. import re
  24. import shlex
  25. import shutil
  26. import subprocess
  27. import sys
  28. import tempfile
  29. import time
  30. import unittest
  31. from collections import defaultdict
  32. from collections.abc import Mapping
  33. from dataclasses import MISSING, fields
  34. from functools import wraps
  35. from io import StringIO
  36. from pathlib import Path
  37. from typing import Callable, Dict, Iterable, Iterator, List, Optional, Union
  38. from unittest import mock
  39. from unittest.mock import patch
  40. import urllib3
  41. from transformers import logging as transformers_logging
  42. from .integrations import (
  43. is_clearml_available,
  44. is_optuna_available,
  45. is_ray_available,
  46. is_sigopt_available,
  47. is_tensorboard_available,
  48. is_wandb_available,
  49. )
  50. from .integrations.deepspeed import is_deepspeed_available
  51. from .utils import (
  52. ACCELERATE_MIN_VERSION,
  53. GGUF_MIN_VERSION,
  54. is_accelerate_available,
  55. is_apex_available,
  56. is_aqlm_available,
  57. is_auto_awq_available,
  58. is_auto_gptq_available,
  59. is_av_available,
  60. is_bitsandbytes_available,
  61. is_bitsandbytes_multi_backend_available,
  62. is_bs4_available,
  63. is_compressed_tensors_available,
  64. is_cv2_available,
  65. is_cython_available,
  66. is_detectron2_available,
  67. is_eetq_available,
  68. is_essentia_available,
  69. is_faiss_available,
  70. is_fbgemm_gpu_available,
  71. is_flash_attn_2_available,
  72. is_flax_available,
  73. is_fsdp_available,
  74. is_ftfy_available,
  75. is_g2p_en_available,
  76. is_galore_torch_available,
  77. is_gguf_available,
  78. is_grokadamw_available,
  79. is_ipex_available,
  80. is_jieba_available,
  81. is_jinja_available,
  82. is_jumanpp_available,
  83. is_keras_nlp_available,
  84. is_levenshtein_available,
  85. is_librosa_available,
  86. is_liger_kernel_available,
  87. is_lomo_available,
  88. is_natten_available,
  89. is_nltk_available,
  90. is_onnx_available,
  91. is_optimum_available,
  92. is_optimum_quanto_available,
  93. is_pandas_available,
  94. is_peft_available,
  95. is_phonemizer_available,
  96. is_pretty_midi_available,
  97. is_pyctcdecode_available,
  98. is_pytesseract_available,
  99. is_pytest_available,
  100. is_pytorch_quantization_available,
  101. is_rjieba_available,
  102. is_sacremoses_available,
  103. is_safetensors_available,
  104. is_schedulefree_available,
  105. is_scipy_available,
  106. is_sentencepiece_available,
  107. is_seqio_available,
  108. is_soundfile_availble,
  109. is_spacy_available,
  110. is_sudachi_available,
  111. is_sudachi_projection_available,
  112. is_tensorflow_probability_available,
  113. is_tensorflow_text_available,
  114. is_tf2onnx_available,
  115. is_tf_available,
  116. is_tiktoken_available,
  117. is_timm_available,
  118. is_tokenizers_available,
  119. is_torch_available,
  120. is_torch_bf16_available_on_device,
  121. is_torch_bf16_cpu_available,
  122. is_torch_bf16_gpu_available,
  123. is_torch_deterministic,
  124. is_torch_fp16_available_on_device,
  125. is_torch_neuroncore_available,
  126. is_torch_npu_available,
  127. is_torch_sdpa_available,
  128. is_torch_tensorrt_fx_available,
  129. is_torch_tf32_available,
  130. is_torch_xla_available,
  131. is_torch_xpu_available,
  132. is_torchao_available,
  133. is_torchaudio_available,
  134. is_torchdynamo_available,
  135. is_torchvision_available,
  136. is_vision_available,
  137. strtobool,
  138. )
  139. if is_accelerate_available():
  140. from accelerate.state import AcceleratorState, PartialState
  141. if is_pytest_available():
  142. from _pytest.doctest import (
  143. Module,
  144. _get_checker,
  145. _get_continue_on_failure,
  146. _get_runner,
  147. _is_mocked,
  148. _patch_unwrap_mock_aware,
  149. get_optionflags,
  150. )
  151. from _pytest.outcomes import skip
  152. from _pytest.pathlib import import_path
  153. from pytest import DoctestItem
  154. else:
  155. Module = object
  156. DoctestItem = object
  157. SMALL_MODEL_IDENTIFIER = "julien-c/bert-xsmall-dummy"
  158. DUMMY_UNKNOWN_IDENTIFIER = "julien-c/dummy-unknown"
  159. DUMMY_DIFF_TOKENIZER_IDENTIFIER = "julien-c/dummy-diff-tokenizer"
  160. # Used to test Auto{Config, Model, Tokenizer} model_type detection.
  161. # Used to test the hub
  162. USER = "__DUMMY_TRANSFORMERS_USER__"
  163. ENDPOINT_STAGING = "https://hub-ci.huggingface.co"
  164. # Not critical, only usable on the sandboxed CI instance.
  165. TOKEN = "hf_94wBhPGp6KrrTH3KDchhKpRxZwd6dmHWLL"
  166. if is_torch_available():
  167. import torch
  168. IS_ROCM_SYSTEM = torch.version.hip is not None
  169. IS_CUDA_SYSTEM = torch.version.cuda is not None
  170. else:
  171. IS_ROCM_SYSTEM = False
  172. IS_CUDA_SYSTEM = False
  173. def parse_flag_from_env(key, default=False):
  174. try:
  175. value = os.environ[key]
  176. except KeyError:
  177. # KEY isn't set, default to `default`.
  178. _value = default
  179. else:
  180. # KEY is set, convert it to True or False.
  181. try:
  182. _value = strtobool(value)
  183. except ValueError:
  184. # More values are supported, but let's keep the message simple.
  185. raise ValueError(f"If set, {key} must be yes or no.")
  186. return _value
  187. def parse_int_from_env(key, default=None):
  188. try:
  189. value = os.environ[key]
  190. except KeyError:
  191. _value = default
  192. else:
  193. try:
  194. _value = int(value)
  195. except ValueError:
  196. raise ValueError(f"If set, {key} must be a int.")
  197. return _value
  198. _run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
  199. _run_pt_tf_cross_tests = parse_flag_from_env("RUN_PT_TF_CROSS_TESTS", default=True)
  200. _run_pt_flax_cross_tests = parse_flag_from_env("RUN_PT_FLAX_CROSS_TESTS", default=True)
  201. _run_custom_tokenizers = parse_flag_from_env("RUN_CUSTOM_TOKENIZERS", default=False)
  202. _run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
  203. _tf_gpu_memory_limit = parse_int_from_env("TF_GPU_MEMORY_LIMIT", default=None)
  204. _run_pipeline_tests = parse_flag_from_env("RUN_PIPELINE_TESTS", default=True)
  205. _run_agent_tests = parse_flag_from_env("RUN_AGENT_TESTS", default=False)
  206. _run_third_party_device_tests = parse_flag_from_env("RUN_THIRD_PARTY_DEVICE_TESTS", default=False)
  207. def get_device_count():
  208. import torch
  209. if is_torch_xpu_available():
  210. num_devices = torch.xpu.device_count()
  211. else:
  212. num_devices = torch.cuda.device_count()
  213. return num_devices
  214. def is_pt_tf_cross_test(test_case):
  215. """
  216. Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
  217. PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
  218. to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
  219. """
  220. if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
  221. return unittest.skip(reason="test is PT+TF test")(test_case)
  222. else:
  223. try:
  224. import pytest # We don't need a hard dependency on pytest in the main library
  225. except ImportError:
  226. return test_case
  227. else:
  228. return pytest.mark.is_pt_tf_cross_test()(test_case)
  229. def is_pt_flax_cross_test(test_case):
  230. """
  231. Decorator marking a test as a test that control interactions between PyTorch and Flax
  232. PT+FLAX tests are skipped by default and we can run only them by setting RUN_PT_FLAX_CROSS_TESTS environment
  233. variable to a truthy value and selecting the is_pt_flax_cross_test pytest mark.
  234. """
  235. if not _run_pt_flax_cross_tests or not is_torch_available() or not is_flax_available():
  236. return unittest.skip(reason="test is PT+FLAX test")(test_case)
  237. else:
  238. try:
  239. import pytest # We don't need a hard dependency on pytest in the main library
  240. except ImportError:
  241. return test_case
  242. else:
  243. return pytest.mark.is_pt_flax_cross_test()(test_case)
  244. def is_staging_test(test_case):
  245. """
  246. Decorator marking a test as a staging test.
  247. Those tests will run using the staging environment of huggingface.co instead of the real model hub.
  248. """
  249. if not _run_staging:
  250. return unittest.skip(reason="test is staging test")(test_case)
  251. else:
  252. try:
  253. import pytest # We don't need a hard dependency on pytest in the main library
  254. except ImportError:
  255. return test_case
  256. else:
  257. return pytest.mark.is_staging_test()(test_case)
  258. def is_pipeline_test(test_case):
  259. """
  260. Decorator marking a test as a pipeline test. If RUN_PIPELINE_TESTS is set to a falsy value, those tests will be
  261. skipped.
  262. """
  263. if not _run_pipeline_tests:
  264. return unittest.skip(reason="test is pipeline test")(test_case)
  265. else:
  266. try:
  267. import pytest # We don't need a hard dependency on pytest in the main library
  268. except ImportError:
  269. return test_case
  270. else:
  271. return pytest.mark.is_pipeline_test()(test_case)
  272. def is_agent_test(test_case):
  273. """
  274. Decorator marking a test as an agent test. If RUN_TOOL_TESTS is set to a falsy value, those tests will be skipped.
  275. """
  276. if not _run_agent_tests:
  277. return unittest.skip(reason="test is an agent test")(test_case)
  278. else:
  279. try:
  280. import pytest # We don't need a hard dependency on pytest in the main library
  281. except ImportError:
  282. return test_case
  283. else:
  284. return pytest.mark.is_agent_test()(test_case)
  285. def slow(test_case):
  286. """
  287. Decorator marking a test as slow.
  288. Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.
  289. """
  290. return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
  291. def tooslow(test_case):
  292. """
  293. Decorator marking a test as too slow.
  294. Slow tests are skipped while they're in the process of being fixed. No test should stay tagged as "tooslow" as
  295. these will not be tested by the CI.
  296. """
  297. return unittest.skip(reason="test is too slow")(test_case)
  298. def skip_if_not_implemented(test_func):
  299. @functools.wraps(test_func)
  300. def wrapper(*args, **kwargs):
  301. try:
  302. return test_func(*args, **kwargs)
  303. except NotImplementedError as e:
  304. raise unittest.SkipTest(f"Test skipped due to NotImplementedError: {e}")
  305. return wrapper
  306. def apply_skip_if_not_implemented(cls):
  307. """
  308. Class decorator to apply @skip_if_not_implemented to all test methods.
  309. """
  310. for attr_name in dir(cls):
  311. if attr_name.startswith("test_"):
  312. attr = getattr(cls, attr_name)
  313. if callable(attr):
  314. setattr(cls, attr_name, skip_if_not_implemented(attr))
  315. return cls
  316. def custom_tokenizers(test_case):
  317. """
  318. Decorator marking a test for a custom tokenizer.
  319. Custom tokenizers require additional dependencies, and are skipped by default. Set the RUN_CUSTOM_TOKENIZERS
  320. environment variable to a truthy value to run them.
  321. """
  322. return unittest.skipUnless(_run_custom_tokenizers, "test of custom tokenizers")(test_case)
  323. def require_bs4(test_case):
  324. """
  325. Decorator marking a test that requires BeautifulSoup4. These tests are skipped when BeautifulSoup4 isn't installed.
  326. """
  327. return unittest.skipUnless(is_bs4_available(), "test requires BeautifulSoup4")(test_case)
  328. def require_galore_torch(test_case):
  329. """
  330. Decorator marking a test that requires GaLore. These tests are skipped when GaLore isn't installed.
  331. https://github.com/jiaweizzhao/GaLore
  332. """
  333. return unittest.skipUnless(is_galore_torch_available(), "test requires GaLore")(test_case)
  334. def require_lomo(test_case):
  335. """
  336. Decorator marking a test that requires LOMO. These tests are skipped when LOMO-optim isn't installed.
  337. https://github.com/OpenLMLab/LOMO
  338. """
  339. return unittest.skipUnless(is_lomo_available(), "test requires LOMO")(test_case)
  340. def require_grokadamw(test_case):
  341. """
  342. Decorator marking a test that requires GrokAdamW. These tests are skipped when GrokAdamW isn't installed.
  343. """
  344. return unittest.skipUnless(is_grokadamw_available(), "test requires GrokAdamW")(test_case)
  345. def require_schedulefree(test_case):
  346. """
  347. Decorator marking a test that requires schedulefree. These tests are skipped when schedulefree isn't installed.
  348. https://github.com/facebookresearch/schedule_free
  349. """
  350. return unittest.skipUnless(is_schedulefree_available(), "test requires schedulefree")(test_case)
  351. def require_cv2(test_case):
  352. """
  353. Decorator marking a test that requires OpenCV.
  354. These tests are skipped when OpenCV isn't installed.
  355. """
  356. return unittest.skipUnless(is_cv2_available(), "test requires OpenCV")(test_case)
  357. def require_levenshtein(test_case):
  358. """
  359. Decorator marking a test that requires Levenshtein.
  360. These tests are skipped when Levenshtein isn't installed.
  361. """
  362. return unittest.skipUnless(is_levenshtein_available(), "test requires Levenshtein")(test_case)
  363. def require_nltk(test_case):
  364. """
  365. Decorator marking a test that requires NLTK.
  366. These tests are skipped when NLTK isn't installed.
  367. """
  368. return unittest.skipUnless(is_nltk_available(), "test requires NLTK")(test_case)
  369. def require_accelerate(test_case, min_version: str = ACCELERATE_MIN_VERSION):
  370. """
  371. Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
  372. """
  373. return unittest.skipUnless(
  374. is_accelerate_available(min_version), f"test requires accelerate version >= {min_version}"
  375. )(test_case)
  376. def require_gguf(test_case, min_version: str = GGUF_MIN_VERSION):
  377. """
  378. Decorator marking a test that requires ggguf. These tests are skipped when gguf isn't installed.
  379. """
  380. return unittest.skipUnless(is_gguf_available(min_version), f"test requires gguf version >= {min_version}")(
  381. test_case
  382. )
  383. def require_fsdp(test_case, min_version: str = "1.12.0"):
  384. """
  385. Decorator marking a test that requires fsdp. These tests are skipped when fsdp isn't installed.
  386. """
  387. return unittest.skipUnless(is_fsdp_available(min_version), f"test requires torch version >= {min_version}")(
  388. test_case
  389. )
  390. def require_g2p_en(test_case):
  391. """
  392. Decorator marking a test that requires g2p_en. These tests are skipped when SentencePiece isn't installed.
  393. """
  394. return unittest.skipUnless(is_g2p_en_available(), "test requires g2p_en")(test_case)
  395. def require_safetensors(test_case):
  396. """
  397. Decorator marking a test that requires safetensors. These tests are skipped when safetensors isn't installed.
  398. """
  399. return unittest.skipUnless(is_safetensors_available(), "test requires safetensors")(test_case)
  400. def require_rjieba(test_case):
  401. """
  402. Decorator marking a test that requires rjieba. These tests are skipped when rjieba isn't installed.
  403. """
  404. return unittest.skipUnless(is_rjieba_available(), "test requires rjieba")(test_case)
  405. def require_jieba(test_case):
  406. """
  407. Decorator marking a test that requires jieba. These tests are skipped when jieba isn't installed.
  408. """
  409. return unittest.skipUnless(is_jieba_available(), "test requires jieba")(test_case)
  410. def require_jinja(test_case):
  411. """
  412. Decorator marking a test that requires jinja. These tests are skipped when jinja isn't installed.
  413. """
  414. return unittest.skipUnless(is_jinja_available(), "test requires jinja")(test_case)
  415. def require_tf2onnx(test_case):
  416. return unittest.skipUnless(is_tf2onnx_available(), "test requires tf2onnx")(test_case)
  417. def require_onnx(test_case):
  418. return unittest.skipUnless(is_onnx_available(), "test requires ONNX")(test_case)
  419. def require_timm(test_case):
  420. """
  421. Decorator marking a test that requires Timm.
  422. These tests are skipped when Timm isn't installed.
  423. """
  424. return unittest.skipUnless(is_timm_available(), "test requires Timm")(test_case)
  425. def require_natten(test_case):
  426. """
  427. Decorator marking a test that requires NATTEN.
  428. These tests are skipped when NATTEN isn't installed.
  429. """
  430. return unittest.skipUnless(is_natten_available(), "test requires natten")(test_case)
  431. def require_torch(test_case):
  432. """
  433. Decorator marking a test that requires PyTorch.
  434. These tests are skipped when PyTorch isn't installed.
  435. """
  436. return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)
  437. def require_flash_attn(test_case):
  438. """
  439. Decorator marking a test that requires Flash Attention.
  440. These tests are skipped when Flash Attention isn't installed.
  441. """
  442. return unittest.skipUnless(is_flash_attn_2_available(), "test requires Flash Attention")(test_case)
  443. def require_torch_sdpa(test_case):
  444. """
  445. Decorator marking a test that requires PyTorch's SDPA.
  446. These tests are skipped when requirements are not met (torch version).
  447. """
  448. return unittest.skipUnless(is_torch_sdpa_available(), "test requires PyTorch SDPA")(test_case)
  449. def require_read_token(fn):
  450. """
  451. A decorator that loads the HF token for tests that require to load gated models.
  452. """
  453. token = os.getenv("HF_HUB_READ_TOKEN")
  454. @wraps(fn)
  455. def _inner(*args, **kwargs):
  456. if token is not None:
  457. with patch("huggingface_hub.utils._headers.get_token", return_value=token):
  458. return fn(*args, **kwargs)
  459. else: # Allow running locally with the default token env variable
  460. return fn(*args, **kwargs)
  461. return _inner
  462. def require_peft(test_case):
  463. """
  464. Decorator marking a test that requires PEFT.
  465. These tests are skipped when PEFT isn't installed.
  466. """
  467. return unittest.skipUnless(is_peft_available(), "test requires PEFT")(test_case)
  468. def require_torchvision(test_case):
  469. """
  470. Decorator marking a test that requires Torchvision.
  471. These tests are skipped when Torchvision isn't installed.
  472. """
  473. return unittest.skipUnless(is_torchvision_available(), "test requires Torchvision")(test_case)
  474. def require_torch_or_tf(test_case):
  475. """
  476. Decorator marking a test that requires PyTorch or TensorFlow.
  477. These tests are skipped when neither PyTorch not TensorFlow is installed.
  478. """
  479. return unittest.skipUnless(is_torch_available() or is_tf_available(), "test requires PyTorch or TensorFlow")(
  480. test_case
  481. )
  482. def require_intel_extension_for_pytorch(test_case):
  483. """
  484. Decorator marking a test that requires Intel Extension for PyTorch.
  485. These tests are skipped when Intel Extension for PyTorch isn't installed or it does not match current PyTorch
  486. version.
  487. """
  488. return unittest.skipUnless(
  489. is_ipex_available(),
  490. "test requires Intel Extension for PyTorch to be installed and match current PyTorch version, see"
  491. " https://github.com/intel/intel-extension-for-pytorch",
  492. )(test_case)
  493. def require_tensorflow_probability(test_case):
  494. """
  495. Decorator marking a test that requires TensorFlow probability.
  496. These tests are skipped when TensorFlow probability isn't installed.
  497. """
  498. return unittest.skipUnless(is_tensorflow_probability_available(), "test requires TensorFlow probability")(
  499. test_case
  500. )
  501. def require_torchaudio(test_case):
  502. """
  503. Decorator marking a test that requires torchaudio. These tests are skipped when torchaudio isn't installed.
  504. """
  505. return unittest.skipUnless(is_torchaudio_available(), "test requires torchaudio")(test_case)
  506. def require_tf(test_case):
  507. """
  508. Decorator marking a test that requires TensorFlow. These tests are skipped when TensorFlow isn't installed.
  509. """
  510. return unittest.skipUnless(is_tf_available(), "test requires TensorFlow")(test_case)
  511. def require_flax(test_case):
  512. """
  513. Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
  514. """
  515. return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)
  516. def require_sentencepiece(test_case):
  517. """
  518. Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
  519. """
  520. return unittest.skipUnless(is_sentencepiece_available(), "test requires SentencePiece")(test_case)
  521. def require_sacremoses(test_case):
  522. """
  523. Decorator marking a test that requires Sacremoses. These tests are skipped when Sacremoses isn't installed.
  524. """
  525. return unittest.skipUnless(is_sacremoses_available(), "test requires Sacremoses")(test_case)
  526. def require_seqio(test_case):
  527. """
  528. Decorator marking a test that requires SentencePiece. These tests are skipped when SentencePiece isn't installed.
  529. """
  530. return unittest.skipUnless(is_seqio_available(), "test requires Seqio")(test_case)
  531. def require_scipy(test_case):
  532. """
  533. Decorator marking a test that requires Scipy. These tests are skipped when SentencePiece isn't installed.
  534. """
  535. return unittest.skipUnless(is_scipy_available(), "test requires Scipy")(test_case)
  536. def require_tokenizers(test_case):
  537. """
  538. Decorator marking a test that requires 🤗 Tokenizers. These tests are skipped when 🤗 Tokenizers isn't installed.
  539. """
  540. return unittest.skipUnless(is_tokenizers_available(), "test requires tokenizers")(test_case)
  541. def require_tensorflow_text(test_case):
  542. """
  543. Decorator marking a test that requires tensorflow_text. These tests are skipped when tensroflow_text isn't
  544. installed.
  545. """
  546. return unittest.skipUnless(is_tensorflow_text_available(), "test requires tensorflow_text")(test_case)
  547. def require_keras_nlp(test_case):
  548. """
  549. Decorator marking a test that requires keras_nlp. These tests are skipped when keras_nlp isn't installed.
  550. """
  551. return unittest.skipUnless(is_keras_nlp_available(), "test requires keras_nlp")(test_case)
  552. def require_pandas(test_case):
  553. """
  554. Decorator marking a test that requires pandas. These tests are skipped when pandas isn't installed.
  555. """
  556. return unittest.skipUnless(is_pandas_available(), "test requires pandas")(test_case)
  557. def require_pytesseract(test_case):
  558. """
  559. Decorator marking a test that requires PyTesseract. These tests are skipped when PyTesseract isn't installed.
  560. """
  561. return unittest.skipUnless(is_pytesseract_available(), "test requires PyTesseract")(test_case)
  562. def require_pytorch_quantization(test_case):
  563. """
  564. Decorator marking a test that requires PyTorch Quantization Toolkit. These tests are skipped when PyTorch
  565. Quantization Toolkit isn't installed.
  566. """
  567. return unittest.skipUnless(is_pytorch_quantization_available(), "test requires PyTorch Quantization Toolkit")(
  568. test_case
  569. )
  570. def require_vision(test_case):
  571. """
  572. Decorator marking a test that requires the vision dependencies. These tests are skipped when torchaudio isn't
  573. installed.
  574. """
  575. return unittest.skipUnless(is_vision_available(), "test requires vision")(test_case)
  576. def require_ftfy(test_case):
  577. """
  578. Decorator marking a test that requires ftfy. These tests are skipped when ftfy isn't installed.
  579. """
  580. return unittest.skipUnless(is_ftfy_available(), "test requires ftfy")(test_case)
  581. def require_spacy(test_case):
  582. """
  583. Decorator marking a test that requires SpaCy. These tests are skipped when SpaCy isn't installed.
  584. """
  585. return unittest.skipUnless(is_spacy_available(), "test requires spacy")(test_case)
  586. def require_torch_multi_gpu(test_case):
  587. """
  588. Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
  589. multiple GPUs.
  590. To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k "multi_gpu"
  591. """
  592. if not is_torch_available():
  593. return unittest.skip(reason="test requires PyTorch")(test_case)
  594. device_count = get_device_count()
  595. return unittest.skipUnless(device_count > 1, "test requires multiple GPUs")(test_case)
  596. def require_torch_multi_accelerator(test_case):
  597. """
  598. Decorator marking a test that requires a multi-accelerator (in PyTorch). These tests are skipped on a machine
  599. without multiple accelerators. To run *only* the multi_accelerator tests, assuming all test names contain
  600. multi_accelerator: $ pytest -sv ./tests -k "multi_accelerator"
  601. """
  602. if not is_torch_available():
  603. return unittest.skip(reason="test requires PyTorch")(test_case)
  604. return unittest.skipUnless(backend_device_count(torch_device) > 1, "test requires multiple accelerators")(
  605. test_case
  606. )
  607. def require_torch_non_multi_gpu(test_case):
  608. """
  609. Decorator marking a test that requires 0 or 1 GPU setup (in PyTorch).
  610. """
  611. if not is_torch_available():
  612. return unittest.skip(reason="test requires PyTorch")(test_case)
  613. import torch
  614. return unittest.skipUnless(torch.cuda.device_count() < 2, "test requires 0 or 1 GPU")(test_case)
  615. def require_torch_non_multi_accelerator(test_case):
  616. """
  617. Decorator marking a test that requires 0 or 1 accelerator setup (in PyTorch).
  618. """
  619. if not is_torch_available():
  620. return unittest.skip(reason="test requires PyTorch")(test_case)
  621. return unittest.skipUnless(backend_device_count(torch_device) < 2, "test requires 0 or 1 accelerator")(test_case)
  622. def require_torch_up_to_2_gpus(test_case):
  623. """
  624. Decorator marking a test that requires 0 or 1 or 2 GPU setup (in PyTorch).
  625. """
  626. if not is_torch_available():
  627. return unittest.skip(reason="test requires PyTorch")(test_case)
  628. import torch
  629. return unittest.skipUnless(torch.cuda.device_count() < 3, "test requires 0 or 1 or 2 GPUs")(test_case)
  630. def require_torch_up_to_2_accelerators(test_case):
  631. """
  632. Decorator marking a test that requires 0 or 1 or 2 accelerator setup (in PyTorch).
  633. """
  634. if not is_torch_available():
  635. return unittest.skip(reason="test requires PyTorch")(test_case)
  636. return unittest.skipUnless(backend_device_count(torch_device) < 3, "test requires 0 or 1 or 2 accelerators")(
  637. test_case
  638. )
  639. def require_torch_xla(test_case):
  640. """
  641. Decorator marking a test that requires TorchXLA (in PyTorch).
  642. """
  643. return unittest.skipUnless(is_torch_xla_available(), "test requires TorchXLA")(test_case)
  644. def require_torch_neuroncore(test_case):
  645. """
  646. Decorator marking a test that requires NeuronCore (in PyTorch).
  647. """
  648. return unittest.skipUnless(is_torch_neuroncore_available(check_device=False), "test requires PyTorch NeuronCore")(
  649. test_case
  650. )
  651. def require_torch_npu(test_case):
  652. """
  653. Decorator marking a test that requires NPU (in PyTorch).
  654. """
  655. return unittest.skipUnless(is_torch_npu_available(), "test requires PyTorch NPU")(test_case)
  656. def require_torch_multi_npu(test_case):
  657. """
  658. Decorator marking a test that requires a multi-NPU setup (in PyTorch). These tests are skipped on a machine without
  659. multiple NPUs.
  660. To run *only* the multi_npu tests, assuming all test names contain multi_npu: $ pytest -sv ./tests -k "multi_npu"
  661. """
  662. if not is_torch_npu_available():
  663. return unittest.skip(reason="test requires PyTorch NPU")(test_case)
  664. return unittest.skipUnless(torch.npu.device_count() > 1, "test requires multiple NPUs")(test_case)
  665. def require_torch_xpu(test_case):
  666. """
  667. Decorator marking a test that requires XPU (in PyTorch).
  668. These tests are skipped when XPU backend is not available. XPU backend might be available either via stock
  669. PyTorch (>=2.4) or via Intel Extension for PyTorch. In the latter case, if IPEX is installed, its version
  670. must match match current PyTorch version.
  671. """
  672. return unittest.skipUnless(is_torch_xpu_available(), "test requires XPU device")(test_case)
  673. def require_non_xpu(test_case):
  674. """
  675. Decorator marking a test that should be skipped for XPU.
  676. """
  677. return unittest.skipUnless(torch_device != "xpu", "test requires a non-XPU")(test_case)
  678. def require_torch_multi_xpu(test_case):
  679. """
  680. Decorator marking a test that requires a multi-XPU setup (in PyTorch). These tests are skipped on a machine without
  681. multiple XPUs.
  682. To run *only* the multi_xpu tests, assuming all test names contain multi_xpu: $ pytest -sv ./tests -k "multi_xpu"
  683. """
  684. if not is_torch_xpu_available():
  685. return unittest.skip(reason="test requires PyTorch XPU")(test_case)
  686. return unittest.skipUnless(torch.xpu.device_count() > 1, "test requires multiple XPUs")(test_case)
  687. if is_torch_available():
  688. # Set env var CUDA_VISIBLE_DEVICES="" to force cpu-mode
  689. import torch
  690. if "TRANSFORMERS_TEST_BACKEND" in os.environ:
  691. backend = os.environ["TRANSFORMERS_TEST_BACKEND"]
  692. try:
  693. _ = importlib.import_module(backend)
  694. except ModuleNotFoundError as e:
  695. raise ModuleNotFoundError(
  696. f"Failed to import `TRANSFORMERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module. The original error (look up to see its"
  697. f" traceback):\n{e}"
  698. ) from e
  699. if "TRANSFORMERS_TEST_DEVICE" in os.environ:
  700. torch_device = os.environ["TRANSFORMERS_TEST_DEVICE"]
  701. if torch_device == "cuda" and not torch.cuda.is_available():
  702. raise ValueError(
  703. f"TRANSFORMERS_TEST_DEVICE={torch_device}, but CUDA is unavailable. Please double-check your testing environment."
  704. )
  705. if torch_device == "xpu" and not is_torch_xpu_available():
  706. raise ValueError(
  707. f"TRANSFORMERS_TEST_DEVICE={torch_device}, but XPU is unavailable. Please double-check your testing environment."
  708. )
  709. if torch_device == "npu" and not is_torch_npu_available():
  710. raise ValueError(
  711. f"TRANSFORMERS_TEST_DEVICE={torch_device}, but NPU is unavailable. Please double-check your testing environment."
  712. )
  713. try:
  714. # try creating device to see if provided device is valid
  715. _ = torch.device(torch_device)
  716. except RuntimeError as e:
  717. raise RuntimeError(
  718. f"Unknown testing device specified by environment variable `TRANSFORMERS_TEST_DEVICE`: {torch_device}"
  719. ) from e
  720. elif torch.cuda.is_available():
  721. torch_device = "cuda"
  722. elif _run_third_party_device_tests and is_torch_npu_available():
  723. torch_device = "npu"
  724. elif _run_third_party_device_tests and is_torch_xpu_available():
  725. torch_device = "xpu"
  726. else:
  727. torch_device = "cpu"
  728. else:
  729. torch_device = None
  730. if is_tf_available():
  731. import tensorflow as tf
  732. if is_flax_available():
  733. import jax
  734. jax_device = jax.default_backend()
  735. else:
  736. jax_device = None
  737. def require_torchdynamo(test_case):
  738. """Decorator marking a test that requires TorchDynamo"""
  739. return unittest.skipUnless(is_torchdynamo_available(), "test requires TorchDynamo")(test_case)
  740. def require_torchao(test_case):
  741. """Decorator marking a test that requires torchao"""
  742. return unittest.skipUnless(is_torchao_available(), "test requires torchao")(test_case)
  743. def require_torch_tensorrt_fx(test_case):
  744. """Decorator marking a test that requires Torch-TensorRT FX"""
  745. return unittest.skipUnless(is_torch_tensorrt_fx_available(), "test requires Torch-TensorRT FX")(test_case)
  746. def require_torch_gpu(test_case):
  747. """Decorator marking a test that requires CUDA and PyTorch."""
  748. return unittest.skipUnless(torch_device == "cuda", "test requires CUDA")(test_case)
  749. def require_torch_gpu_if_bnb_not_multi_backend_enabled(test_case):
  750. """
  751. Decorator marking a test that requires a GPU if bitsandbytes multi-backend feature is not enabled.
  752. """
  753. if is_bitsandbytes_available() and is_bitsandbytes_multi_backend_available():
  754. return test_case
  755. return require_torch_gpu(test_case)
  756. def require_torch_accelerator(test_case):
  757. """Decorator marking a test that requires an accessible accelerator and PyTorch."""
  758. return unittest.skipUnless(torch_device is not None and torch_device != "cpu", "test requires accelerator")(
  759. test_case
  760. )
  761. def require_torch_fp16(test_case):
  762. """Decorator marking a test that requires a device that supports fp16"""
  763. return unittest.skipUnless(
  764. is_torch_fp16_available_on_device(torch_device), "test requires device with fp16 support"
  765. )(test_case)
  766. def require_torch_bf16(test_case):
  767. """Decorator marking a test that requires a device that supports bf16"""
  768. return unittest.skipUnless(
  769. is_torch_bf16_available_on_device(torch_device), "test requires device with bf16 support"
  770. )(test_case)
  771. def require_torch_bf16_gpu(test_case):
  772. """Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0"""
  773. return unittest.skipUnless(
  774. is_torch_bf16_gpu_available(),
  775. "test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0",
  776. )(test_case)
  777. def require_torch_bf16_cpu(test_case):
  778. """Decorator marking a test that requires torch>=1.10, using CPU."""
  779. return unittest.skipUnless(
  780. is_torch_bf16_cpu_available(),
  781. "test requires torch>=1.10, using CPU",
  782. )(test_case)
  783. def require_deterministic_for_xpu(test_case):
  784. if is_torch_xpu_available():
  785. return unittest.skipUnless(is_torch_deterministic(), "test requires torch to use deterministic algorithms")(
  786. test_case
  787. )
  788. else:
  789. return test_case
  790. def require_torch_tf32(test_case):
  791. """Decorator marking a test that requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7."""
  792. return unittest.skipUnless(
  793. is_torch_tf32_available(), "test requires Ampere or a newer GPU arch, cuda>=11 and torch>=1.7"
  794. )(test_case)
  795. def require_detectron2(test_case):
  796. """Decorator marking a test that requires detectron2."""
  797. return unittest.skipUnless(is_detectron2_available(), "test requires `detectron2`")(test_case)
  798. def require_faiss(test_case):
  799. """Decorator marking a test that requires faiss."""
  800. return unittest.skipUnless(is_faiss_available(), "test requires `faiss`")(test_case)
  801. def require_optuna(test_case):
  802. """
  803. Decorator marking a test that requires optuna.
  804. These tests are skipped when optuna isn't installed.
  805. """
  806. return unittest.skipUnless(is_optuna_available(), "test requires optuna")(test_case)
  807. def require_ray(test_case):
  808. """
  809. Decorator marking a test that requires Ray/tune.
  810. These tests are skipped when Ray/tune isn't installed.
  811. """
  812. return unittest.skipUnless(is_ray_available(), "test requires Ray/tune")(test_case)
  813. def require_sigopt(test_case):
  814. """
  815. Decorator marking a test that requires SigOpt.
  816. These tests are skipped when SigOpt isn't installed.
  817. """
  818. return unittest.skipUnless(is_sigopt_available(), "test requires SigOpt")(test_case)
  819. def require_wandb(test_case):
  820. """
  821. Decorator marking a test that requires wandb.
  822. These tests are skipped when wandb isn't installed.
  823. """
  824. return unittest.skipUnless(is_wandb_available(), "test requires wandb")(test_case)
  825. def require_clearml(test_case):
  826. """
  827. Decorator marking a test requires clearml.
  828. These tests are skipped when clearml isn't installed.
  829. """
  830. return unittest.skipUnless(is_clearml_available(), "test requires clearml")(test_case)
  831. def require_soundfile(test_case):
  832. """
  833. Decorator marking a test that requires soundfile
  834. These tests are skipped when soundfile isn't installed.
  835. """
  836. return unittest.skipUnless(is_soundfile_availble(), "test requires soundfile")(test_case)
  837. def require_deepspeed(test_case):
  838. """
  839. Decorator marking a test that requires deepspeed
  840. """
  841. return unittest.skipUnless(is_deepspeed_available(), "test requires deepspeed")(test_case)
  842. def require_apex(test_case):
  843. """
  844. Decorator marking a test that requires apex
  845. """
  846. return unittest.skipUnless(is_apex_available(), "test requires apex")(test_case)
  847. def require_aqlm(test_case):
  848. """
  849. Decorator marking a test that requires aqlm
  850. """
  851. return unittest.skipUnless(is_aqlm_available(), "test requires aqlm")(test_case)
  852. def require_eetq(test_case):
  853. """
  854. Decorator marking a test that requires eetq
  855. """
  856. return unittest.skipUnless(is_eetq_available(), "test requires eetq")(test_case)
  857. def require_av(test_case):
  858. """
  859. Decorator marking a test that requires av
  860. """
  861. return unittest.skipUnless(is_av_available(), "test requires av")(test_case)
  862. def require_bitsandbytes(test_case):
  863. """
  864. Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library or its hard dependency torch is not installed.
  865. """
  866. if is_bitsandbytes_available() and is_torch_available():
  867. try:
  868. import pytest
  869. return pytest.mark.bitsandbytes(test_case)
  870. except ImportError:
  871. return test_case
  872. else:
  873. return unittest.skip(reason="test requires bitsandbytes and torch")(test_case)
  874. def require_optimum(test_case):
  875. """
  876. Decorator for optimum dependency
  877. """
  878. return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case)
  879. def require_tensorboard(test_case):
  880. """
  881. Decorator for `tensorboard` dependency
  882. """
  883. return unittest.skipUnless(is_tensorboard_available(), "test requires tensorboard")
  884. def require_auto_gptq(test_case):
  885. """
  886. Decorator for auto_gptq dependency
  887. """
  888. return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case)
  889. def require_auto_awq(test_case):
  890. """
  891. Decorator for auto_awq dependency
  892. """
  893. return unittest.skipUnless(is_auto_awq_available(), "test requires autoawq")(test_case)
  894. def require_optimum_quanto(test_case):
  895. """
  896. Decorator for quanto dependency
  897. """
  898. return unittest.skipUnless(is_optimum_quanto_available(), "test requires optimum-quanto")(test_case)
  899. def require_compressed_tensors(test_case):
  900. """
  901. Decorator for compressed_tensors dependency
  902. """
  903. return unittest.skipUnless(is_compressed_tensors_available(), "test requires compressed_tensors")(test_case)
  904. def require_fbgemm_gpu(test_case):
  905. """
  906. Decorator for fbgemm_gpu dependency
  907. """
  908. return unittest.skipUnless(is_fbgemm_gpu_available(), "test requires fbgemm-gpu")(test_case)
  909. def require_phonemizer(test_case):
  910. """
  911. Decorator marking a test that requires phonemizer
  912. """
  913. return unittest.skipUnless(is_phonemizer_available(), "test requires phonemizer")(test_case)
  914. def require_pyctcdecode(test_case):
  915. """
  916. Decorator marking a test that requires pyctcdecode
  917. """
  918. return unittest.skipUnless(is_pyctcdecode_available(), "test requires pyctcdecode")(test_case)
  919. def require_librosa(test_case):
  920. """
  921. Decorator marking a test that requires librosa
  922. """
  923. return unittest.skipUnless(is_librosa_available(), "test requires librosa")(test_case)
  924. def require_liger_kernel(test_case):
  925. """
  926. Decorator marking a test that requires liger_kernel
  927. """
  928. return unittest.skipUnless(is_liger_kernel_available(), "test requires liger_kernel")(test_case)
  929. def require_essentia(test_case):
  930. """
  931. Decorator marking a test that requires essentia
  932. """
  933. return unittest.skipUnless(is_essentia_available(), "test requires essentia")(test_case)
  934. def require_pretty_midi(test_case):
  935. """
  936. Decorator marking a test that requires pretty_midi
  937. """
  938. return unittest.skipUnless(is_pretty_midi_available(), "test requires pretty_midi")(test_case)
  939. def cmd_exists(cmd):
  940. return shutil.which(cmd) is not None
  941. def require_usr_bin_time(test_case):
  942. """
  943. Decorator marking a test that requires `/usr/bin/time`
  944. """
  945. return unittest.skipUnless(cmd_exists("/usr/bin/time"), "test requires /usr/bin/time")(test_case)
  946. def require_sudachi(test_case):
  947. """
  948. Decorator marking a test that requires sudachi
  949. """
  950. return unittest.skipUnless(is_sudachi_available(), "test requires sudachi")(test_case)
  951. def require_sudachi_projection(test_case):
  952. """
  953. Decorator marking a test that requires sudachi_projection
  954. """
  955. return unittest.skipUnless(is_sudachi_projection_available(), "test requires sudachi which supports projection")(
  956. test_case
  957. )
  958. def require_jumanpp(test_case):
  959. """
  960. Decorator marking a test that requires jumanpp
  961. """
  962. return unittest.skipUnless(is_jumanpp_available(), "test requires jumanpp")(test_case)
  963. def require_cython(test_case):
  964. """
  965. Decorator marking a test that requires jumanpp
  966. """
  967. return unittest.skipUnless(is_cython_available(), "test requires cython")(test_case)
  968. def require_tiktoken(test_case):
  969. """
  970. Decorator marking a test that requires TikToken. These tests are skipped when TikToken isn't installed.
  971. """
  972. return unittest.skipUnless(is_tiktoken_available(), "test requires TikToken")(test_case)
  973. def get_gpu_count():
  974. """
  975. Return the number of available gpus (regardless of whether torch, tf or jax is used)
  976. """
  977. if is_torch_available():
  978. import torch
  979. return torch.cuda.device_count()
  980. elif is_tf_available():
  981. import tensorflow as tf
  982. return len(tf.config.list_physical_devices("GPU"))
  983. elif is_flax_available():
  984. import jax
  985. return jax.device_count()
  986. else:
  987. return 0
  988. def get_tests_dir(append_path=None):
  989. """
  990. Args:
  991. append_path: optional path to append to the tests dir path
  992. Return:
  993. The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
  994. joined after the `tests` dir the former is provided.
  995. """
  996. # this function caller's __file__
  997. caller__file__ = inspect.stack()[1][1]
  998. tests_dir = os.path.abspath(os.path.dirname(caller__file__))
  999. while not tests_dir.endswith("tests"):
  1000. tests_dir = os.path.dirname(tests_dir)
  1001. if append_path:
  1002. return os.path.join(tests_dir, append_path)
  1003. else:
  1004. return tests_dir
  1005. #
  1006. # Helper functions for dealing with testing text outputs
  1007. # The original code came from:
  1008. # https://github.com/fastai/fastai/blob/master/tests/utils/text.py
  1009. # When any function contains print() calls that get overwritten, like progress bars,
  1010. # a special care needs to be applied, since under pytest -s captured output (capsys
  1011. # or contextlib.redirect_stdout) contains any temporary printed strings, followed by
  1012. # \r's. This helper function ensures that the buffer will contain the same output
  1013. # with and without -s in pytest, by turning:
  1014. # foo bar\r tar mar\r final message
  1015. # into:
  1016. # final message
  1017. # it can handle a single string or a multiline buffer
  1018. def apply_print_resets(buf):
  1019. return re.sub(r"^.*\r", "", buf, 0, re.M)
  1020. def assert_screenout(out, what):
  1021. out_pr = apply_print_resets(out).lower()
  1022. match_str = out_pr.find(what.lower())
  1023. assert match_str != -1, f"expecting to find {what} in output: f{out_pr}"
  1024. class CaptureStd:
  1025. """
  1026. Context manager to capture:
  1027. - stdout: replay it, clean it up and make it available via `obj.out`
  1028. - stderr: replay it and make it available via `obj.err`
  1029. Args:
  1030. out (`bool`, *optional*, defaults to `True`): Whether to capture stdout or not.
  1031. err (`bool`, *optional*, defaults to `True`): Whether to capture stderr or not.
  1032. replay (`bool`, *optional*, defaults to `True`): Whether to replay or not.
  1033. By default each captured stream gets replayed back on context's exit, so that one can see what the test was
  1034. doing. If this is a not wanted behavior and the captured data shouldn't be replayed, pass `replay=False` to
  1035. disable this feature.
  1036. Examples:
  1037. ```python
  1038. # to capture stdout only with auto-replay
  1039. with CaptureStdout() as cs:
  1040. print("Secret message")
  1041. assert "message" in cs.out
  1042. # to capture stderr only with auto-replay
  1043. import sys
  1044. with CaptureStderr() as cs:
  1045. print("Warning: ", file=sys.stderr)
  1046. assert "Warning" in cs.err
  1047. # to capture both streams with auto-replay
  1048. with CaptureStd() as cs:
  1049. print("Secret message")
  1050. print("Warning: ", file=sys.stderr)
  1051. assert "message" in cs.out
  1052. assert "Warning" in cs.err
  1053. # to capture just one of the streams, and not the other, with auto-replay
  1054. with CaptureStd(err=False) as cs:
  1055. print("Secret message")
  1056. assert "message" in cs.out
  1057. # but best use the stream-specific subclasses
  1058. # to capture without auto-replay
  1059. with CaptureStd(replay=False) as cs:
  1060. print("Secret message")
  1061. assert "message" in cs.out
  1062. ```"""
  1063. def __init__(self, out=True, err=True, replay=True):
  1064. self.replay = replay
  1065. if out:
  1066. self.out_buf = StringIO()
  1067. self.out = "error: CaptureStd context is unfinished yet, called too early"
  1068. else:
  1069. self.out_buf = None
  1070. self.out = "not capturing stdout"
  1071. if err:
  1072. self.err_buf = StringIO()
  1073. self.err = "error: CaptureStd context is unfinished yet, called too early"
  1074. else:
  1075. self.err_buf = None
  1076. self.err = "not capturing stderr"
  1077. def __enter__(self):
  1078. if self.out_buf:
  1079. self.out_old = sys.stdout
  1080. sys.stdout = self.out_buf
  1081. if self.err_buf:
  1082. self.err_old = sys.stderr
  1083. sys.stderr = self.err_buf
  1084. return self
  1085. def __exit__(self, *exc):
  1086. if self.out_buf:
  1087. sys.stdout = self.out_old
  1088. captured = self.out_buf.getvalue()
  1089. if self.replay:
  1090. sys.stdout.write(captured)
  1091. self.out = apply_print_resets(captured)
  1092. if self.err_buf:
  1093. sys.stderr = self.err_old
  1094. captured = self.err_buf.getvalue()
  1095. if self.replay:
  1096. sys.stderr.write(captured)
  1097. self.err = captured
  1098. def __repr__(self):
  1099. msg = ""
  1100. if self.out_buf:
  1101. msg += f"stdout: {self.out}\n"
  1102. if self.err_buf:
  1103. msg += f"stderr: {self.err}\n"
  1104. return msg
  1105. # in tests it's the best to capture only the stream that's wanted, otherwise
  1106. # it's easy to miss things, so unless you need to capture both streams, use the
  1107. # subclasses below (less typing). Or alternatively, configure `CaptureStd` to
  1108. # disable the stream you don't need to test.
  1109. class CaptureStdout(CaptureStd):
  1110. """Same as CaptureStd but captures only stdout"""
  1111. def __init__(self, replay=True):
  1112. super().__init__(err=False, replay=replay)
  1113. class CaptureStderr(CaptureStd):
  1114. """Same as CaptureStd but captures only stderr"""
  1115. def __init__(self, replay=True):
  1116. super().__init__(out=False, replay=replay)
  1117. class CaptureLogger:
  1118. """
  1119. Context manager to capture `logging` streams
  1120. Args:
  1121. logger: 'logging` logger object
  1122. Returns:
  1123. The captured output is available via `self.out`
  1124. Example:
  1125. ```python
  1126. >>> from transformers import logging
  1127. >>> from transformers.testing_utils import CaptureLogger
  1128. >>> msg = "Testing 1, 2, 3"
  1129. >>> logging.set_verbosity_info()
  1130. >>> logger = logging.get_logger("transformers.models.bart.tokenization_bart")
  1131. >>> with CaptureLogger(logger) as cl:
  1132. ... logger.info(msg)
  1133. >>> assert cl.out, msg + "\n"
  1134. ```
  1135. """
  1136. def __init__(self, logger):
  1137. self.logger = logger
  1138. self.io = StringIO()
  1139. self.sh = logging.StreamHandler(self.io)
  1140. self.out = ""
  1141. def __enter__(self):
  1142. self.logger.addHandler(self.sh)
  1143. return self
  1144. def __exit__(self, *exc):
  1145. self.logger.removeHandler(self.sh)
  1146. self.out = self.io.getvalue()
  1147. def __repr__(self):
  1148. return f"captured: {self.out}\n"
  1149. @contextlib.contextmanager
  1150. def LoggingLevel(level):
  1151. """
  1152. This is a context manager to temporarily change transformers modules logging level to the desired value and have it
  1153. restored to the original setting at the end of the scope.
  1154. Example:
  1155. ```python
  1156. with LoggingLevel(logging.INFO):
  1157. AutoModel.from_pretrained("openai-community/gpt2") # calls logger.info() several times
  1158. ```
  1159. """
  1160. orig_level = transformers_logging.get_verbosity()
  1161. try:
  1162. transformers_logging.set_verbosity(level)
  1163. yield
  1164. finally:
  1165. transformers_logging.set_verbosity(orig_level)
  1166. @contextlib.contextmanager
  1167. # adapted from https://stackoverflow.com/a/64789046/9201239
  1168. def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]:
  1169. """
  1170. Temporary add given path to `sys.path`.
  1171. Usage :
  1172. ```python
  1173. with ExtendSysPath("/path/to/dir"):
  1174. mymodule = importlib.import_module("mymodule")
  1175. ```
  1176. """
  1177. path = os.fspath(path)
  1178. try:
  1179. sys.path.insert(0, path)
  1180. yield
  1181. finally:
  1182. sys.path.remove(path)
  1183. class TestCasePlus(unittest.TestCase):
  1184. """
  1185. This class extends *unittest.TestCase* with additional features.
  1186. Feature 1: A set of fully resolved important file and dir path accessors.
  1187. In tests often we need to know where things are relative to the current test file, and it's not trivial since the
  1188. test could be invoked from more than one directory or could reside in sub-directories with different depths. This
  1189. class solves this problem by sorting out all the basic paths and provides easy accessors to them:
  1190. - `pathlib` objects (all fully resolved):
  1191. - `test_file_path` - the current test file path (=`__file__`)
  1192. - `test_file_dir` - the directory containing the current test file
  1193. - `tests_dir` - the directory of the `tests` test suite
  1194. - `examples_dir` - the directory of the `examples` test suite
  1195. - `repo_root_dir` - the directory of the repository
  1196. - `src_dir` - the directory of `src` (i.e. where the `transformers` sub-dir resides)
  1197. - stringified paths---same as above but these return paths as strings, rather than `pathlib` objects:
  1198. - `test_file_path_str`
  1199. - `test_file_dir_str`
  1200. - `tests_dir_str`
  1201. - `examples_dir_str`
  1202. - `repo_root_dir_str`
  1203. - `src_dir_str`
  1204. Feature 2: Flexible auto-removable temporary dirs which are guaranteed to get removed at the end of test.
  1205. 1. Create a unique temporary dir:
  1206. ```python
  1207. def test_whatever(self):
  1208. tmp_dir = self.get_auto_remove_tmp_dir()
  1209. ```
  1210. `tmp_dir` will contain the path to the created temporary dir. It will be automatically removed at the end of the
  1211. test.
  1212. 2. Create a temporary dir of my choice, ensure it's empty before the test starts and don't
  1213. empty it after the test.
  1214. ```python
  1215. def test_whatever(self):
  1216. tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
  1217. ```
  1218. This is useful for debug when you want to monitor a specific directory and want to make sure the previous tests
  1219. didn't leave any data in there.
  1220. 3. You can override the first two options by directly overriding the `before` and `after` args, leading to the
  1221. following behavior:
  1222. `before=True`: the temporary dir will always be cleared at the beginning of the test.
  1223. `before=False`: if the temporary dir already existed, any existing files will remain there.
  1224. `after=True`: the temporary dir will always be deleted at the end of the test.
  1225. `after=False`: the temporary dir will always be left intact at the end of the test.
  1226. Note 1: In order to run the equivalent of `rm -r` safely, only subdirs of the project repository checkout are
  1227. allowed if an explicit `tmp_dir` is used, so that by mistake no `/tmp` or similar important part of the filesystem
  1228. will get nuked. i.e. please always pass paths that start with `./`
  1229. Note 2: Each test can register multiple temporary dirs and they all will get auto-removed, unless requested
  1230. otherwise.
  1231. Feature 3: Get a copy of the `os.environ` object that sets up `PYTHONPATH` specific to the current test suite. This
  1232. is useful for invoking external programs from the test suite - e.g. distributed training.
  1233. ```python
  1234. def test_whatever(self):
  1235. env = self.get_env()
  1236. ```"""
  1237. def setUp(self):
  1238. # get_auto_remove_tmp_dir feature:
  1239. self.teardown_tmp_dirs = []
  1240. # figure out the resolved paths for repo_root, tests, examples, etc.
  1241. self._test_file_path = inspect.getfile(self.__class__)
  1242. path = Path(self._test_file_path).resolve()
  1243. self._test_file_dir = path.parents[0]
  1244. for up in [1, 2, 3]:
  1245. tmp_dir = path.parents[up]
  1246. if (tmp_dir / "src").is_dir() and (tmp_dir / "tests").is_dir():
  1247. break
  1248. if tmp_dir:
  1249. self._repo_root_dir = tmp_dir
  1250. else:
  1251. raise ValueError(f"can't figure out the root of the repo from {self._test_file_path}")
  1252. self._tests_dir = self._repo_root_dir / "tests"
  1253. self._examples_dir = self._repo_root_dir / "examples"
  1254. self._src_dir = self._repo_root_dir / "src"
  1255. @property
  1256. def test_file_path(self):
  1257. return self._test_file_path
  1258. @property
  1259. def test_file_path_str(self):
  1260. return str(self._test_file_path)
  1261. @property
  1262. def test_file_dir(self):
  1263. return self._test_file_dir
  1264. @property
  1265. def test_file_dir_str(self):
  1266. return str(self._test_file_dir)
  1267. @property
  1268. def tests_dir(self):
  1269. return self._tests_dir
  1270. @property
  1271. def tests_dir_str(self):
  1272. return str(self._tests_dir)
  1273. @property
  1274. def examples_dir(self):
  1275. return self._examples_dir
  1276. @property
  1277. def examples_dir_str(self):
  1278. return str(self._examples_dir)
  1279. @property
  1280. def repo_root_dir(self):
  1281. return self._repo_root_dir
  1282. @property
  1283. def repo_root_dir_str(self):
  1284. return str(self._repo_root_dir)
  1285. @property
  1286. def src_dir(self):
  1287. return self._src_dir
  1288. @property
  1289. def src_dir_str(self):
  1290. return str(self._src_dir)
  1291. def get_env(self):
  1292. """
  1293. Return a copy of the `os.environ` object that sets up `PYTHONPATH` correctly, depending on the test suite it's
  1294. invoked from. This is useful for invoking external programs from the test suite - e.g. distributed training.
  1295. It always inserts `./src` first, then `./tests` or `./examples` depending on the test suite type and finally
  1296. the preset `PYTHONPATH` if any (all full resolved paths).
  1297. """
  1298. env = os.environ.copy()
  1299. paths = [self.src_dir_str]
  1300. if "/examples" in self.test_file_dir_str:
  1301. paths.append(self.examples_dir_str)
  1302. else:
  1303. paths.append(self.tests_dir_str)
  1304. paths.append(env.get("PYTHONPATH", ""))
  1305. env["PYTHONPATH"] = ":".join(paths)
  1306. return env
  1307. def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None):
  1308. """
  1309. Args:
  1310. tmp_dir (`string`, *optional*):
  1311. if `None`:
  1312. - a unique temporary path will be created
  1313. - sets `before=True` if `before` is `None`
  1314. - sets `after=True` if `after` is `None`
  1315. else:
  1316. - `tmp_dir` will be created
  1317. - sets `before=True` if `before` is `None`
  1318. - sets `after=False` if `after` is `None`
  1319. before (`bool`, *optional*):
  1320. If `True` and the `tmp_dir` already exists, make sure to empty it right away if `False` and the
  1321. `tmp_dir` already exists, any existing files will remain there.
  1322. after (`bool`, *optional*):
  1323. If `True`, delete the `tmp_dir` at the end of the test if `False`, leave the `tmp_dir` and its contents
  1324. intact at the end of the test.
  1325. Returns:
  1326. tmp_dir(`string`): either the same value as passed via *tmp_dir* or the path to the auto-selected tmp dir
  1327. """
  1328. if tmp_dir is not None:
  1329. # defining the most likely desired behavior for when a custom path is provided.
  1330. # this most likely indicates the debug mode where we want an easily locatable dir that:
  1331. # 1. gets cleared out before the test (if it already exists)
  1332. # 2. is left intact after the test
  1333. if before is None:
  1334. before = True
  1335. if after is None:
  1336. after = False
  1337. # using provided path
  1338. path = Path(tmp_dir).resolve()
  1339. # to avoid nuking parts of the filesystem, only relative paths are allowed
  1340. if not tmp_dir.startswith("./"):
  1341. raise ValueError(
  1342. f"`tmp_dir` can only be a relative path, i.e. `./some/path`, but received `{tmp_dir}`"
  1343. )
  1344. # ensure the dir is empty to start with
  1345. if before is True and path.exists():
  1346. shutil.rmtree(tmp_dir, ignore_errors=True)
  1347. path.mkdir(parents=True, exist_ok=True)
  1348. else:
  1349. # defining the most likely desired behavior for when a unique tmp path is auto generated
  1350. # (not a debug mode), here we require a unique tmp dir that:
  1351. # 1. is empty before the test (it will be empty in this situation anyway)
  1352. # 2. gets fully removed after the test
  1353. if before is None:
  1354. before = True
  1355. if after is None:
  1356. after = True
  1357. # using unique tmp dir (always empty, regardless of `before`)
  1358. tmp_dir = tempfile.mkdtemp()
  1359. if after is True:
  1360. # register for deletion
  1361. self.teardown_tmp_dirs.append(tmp_dir)
  1362. return tmp_dir
  1363. def python_one_liner_max_rss(self, one_liner_str):
  1364. """
  1365. Runs the passed python one liner (just the code) and returns how much max cpu memory was used to run the
  1366. program.
  1367. Args:
  1368. one_liner_str (`string`):
  1369. a python one liner code that gets passed to `python -c`
  1370. Returns:
  1371. max cpu memory bytes used to run the program. This value is likely to vary slightly from run to run.
  1372. Requirements:
  1373. this helper needs `/usr/bin/time` to be installed (`apt install time`)
  1374. Example:
  1375. ```
  1376. one_liner_str = 'from transformers import AutoModel; AutoModel.from_pretrained("google-t5/t5-large")'
  1377. max_rss = self.python_one_liner_max_rss(one_liner_str)
  1378. ```
  1379. """
  1380. if not cmd_exists("/usr/bin/time"):
  1381. raise ValueError("/usr/bin/time is required, install with `apt install time`")
  1382. cmd = shlex.split(f"/usr/bin/time -f %M python -c '{one_liner_str}'")
  1383. with CaptureStd() as cs:
  1384. execute_subprocess_async(cmd, env=self.get_env())
  1385. # returned data is in KB so convert to bytes
  1386. max_rss = int(cs.err.split("\n")[-2].replace("stderr: ", "")) * 1024
  1387. return max_rss
  1388. def tearDown(self):
  1389. # get_auto_remove_tmp_dir feature: remove registered temp dirs
  1390. for path in self.teardown_tmp_dirs:
  1391. shutil.rmtree(path, ignore_errors=True)
  1392. self.teardown_tmp_dirs = []
  1393. if is_accelerate_available():
  1394. AcceleratorState._reset_state()
  1395. PartialState._reset_state()
  1396. # delete all the env variables having `ACCELERATE` in them
  1397. for k in list(os.environ.keys()):
  1398. if "ACCELERATE" in k:
  1399. del os.environ[k]
  1400. def mockenv(**kwargs):
  1401. """
  1402. this is a convenience wrapper, that allows this ::
  1403. @mockenv(RUN_SLOW=True, USE_TF=False) def test_something():
  1404. run_slow = os.getenv("RUN_SLOW", False) use_tf = os.getenv("USE_TF", False)
  1405. """
  1406. return mock.patch.dict(os.environ, kwargs)
  1407. # from https://stackoverflow.com/a/34333710/9201239
  1408. @contextlib.contextmanager
  1409. def mockenv_context(*remove, **update):
  1410. """
  1411. Temporarily updates the `os.environ` dictionary in-place. Similar to mockenv
  1412. The `os.environ` dictionary is updated in-place so that the modification is sure to work in all situations.
  1413. Args:
  1414. remove: Environment variables to remove.
  1415. update: Dictionary of environment variables and values to add/update.
  1416. """
  1417. env = os.environ
  1418. update = update or {}
  1419. remove = remove or []
  1420. # List of environment variables being updated or removed.
  1421. stomped = (set(update.keys()) | set(remove)) & set(env.keys())
  1422. # Environment variables and values to restore on exit.
  1423. update_after = {k: env[k] for k in stomped}
  1424. # Environment variables and values to remove on exit.
  1425. remove_after = frozenset(k for k in update if k not in env)
  1426. try:
  1427. env.update(update)
  1428. [env.pop(k, None) for k in remove]
  1429. yield
  1430. finally:
  1431. env.update(update_after)
  1432. [env.pop(k) for k in remove_after]
  1433. # --- pytest conf functions --- #
  1434. # to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
  1435. pytest_opt_registered = {}
  1436. def pytest_addoption_shared(parser):
  1437. """
  1438. This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.
  1439. It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
  1440. option.
  1441. """
  1442. option = "--make-reports"
  1443. if option not in pytest_opt_registered:
  1444. parser.addoption(
  1445. option,
  1446. action="store",
  1447. default=False,
  1448. help="generate report files. The value of this option is used as a prefix to report names",
  1449. )
  1450. pytest_opt_registered[option] = 1
  1451. def pytest_terminal_summary_main(tr, id):
  1452. """
  1453. Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
  1454. directory. The report files are prefixed with the test suite name.
  1455. This function emulates --duration and -rA pytest arguments.
  1456. This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
  1457. there.
  1458. Args:
  1459. - tr: `terminalreporter` passed from `conftest.py`
  1460. - id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
  1461. needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
  1462. NB: this functions taps into a private _pytest API and while unlikely, it could break should pytest do internal
  1463. changes - also it calls default internal methods of terminalreporter which can be hijacked by various `pytest-`
  1464. plugins and interfere.
  1465. """
  1466. from _pytest.config import create_terminal_writer
  1467. if not len(id):
  1468. id = "tests"
  1469. config = tr.config
  1470. orig_writer = config.get_terminal_writer()
  1471. orig_tbstyle = config.option.tbstyle
  1472. orig_reportchars = tr.reportchars
  1473. dir = f"reports/{id}"
  1474. Path(dir).mkdir(parents=True, exist_ok=True)
  1475. report_files = {
  1476. k: f"{dir}/{k}.txt"
  1477. for k in [
  1478. "durations",
  1479. "errors",
  1480. "failures_long",
  1481. "failures_short",
  1482. "failures_line",
  1483. "passes",
  1484. "stats",
  1485. "summary_short",
  1486. "warnings",
  1487. ]
  1488. }
  1489. # custom durations report
  1490. # note: there is no need to call pytest --durations=XX to get this separate report
  1491. # adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
  1492. dlist = []
  1493. for replist in tr.stats.values():
  1494. for rep in replist:
  1495. if hasattr(rep, "duration"):
  1496. dlist.append(rep)
  1497. if dlist:
  1498. dlist.sort(key=lambda x: x.duration, reverse=True)
  1499. with open(report_files["durations"], "w") as f:
  1500. durations_min = 0.05 # sec
  1501. f.write("slowest durations\n")
  1502. for i, rep in enumerate(dlist):
  1503. if rep.duration < durations_min:
  1504. f.write(f"{len(dlist)-i} durations < {durations_min} secs were omitted")
  1505. break
  1506. f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")
  1507. def summary_failures_short(tr):
  1508. # expecting that the reports were --tb=long (default) so we chop them off here to the last frame
  1509. reports = tr.getreports("failed")
  1510. if not reports:
  1511. return
  1512. tr.write_sep("=", "FAILURES SHORT STACK")
  1513. for rep in reports:
  1514. msg = tr._getfailureheadline(rep)
  1515. tr.write_sep("_", msg, red=True, bold=True)
  1516. # chop off the optional leading extra frames, leaving only the last one
  1517. longrepr = re.sub(r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
  1518. tr._tw.line(longrepr)
  1519. # note: not printing out any rep.sections to keep the report short
  1520. # use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
  1521. # adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
  1522. # note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
  1523. # pytest-instafail does that)
  1524. # report failures with line/short/long styles
  1525. config.option.tbstyle = "auto" # full tb
  1526. with open(report_files["failures_long"], "w") as f:
  1527. tr._tw = create_terminal_writer(config, f)
  1528. tr.summary_failures()
  1529. # config.option.tbstyle = "short" # short tb
  1530. with open(report_files["failures_short"], "w") as f:
  1531. tr._tw = create_terminal_writer(config, f)
  1532. summary_failures_short(tr)
  1533. config.option.tbstyle = "line" # one line per error
  1534. with open(report_files["failures_line"], "w") as f:
  1535. tr._tw = create_terminal_writer(config, f)
  1536. tr.summary_failures()
  1537. with open(report_files["errors"], "w") as f:
  1538. tr._tw = create_terminal_writer(config, f)
  1539. tr.summary_errors()
  1540. with open(report_files["warnings"], "w") as f:
  1541. tr._tw = create_terminal_writer(config, f)
  1542. tr.summary_warnings() # normal warnings
  1543. tr.summary_warnings() # final warnings
  1544. tr.reportchars = "wPpsxXEf" # emulate -rA (used in summary_passes() and short_test_summary())
  1545. # Skip the `passes` report, as it starts to take more than 5 minutes, and sometimes it timeouts on CircleCI if it
  1546. # takes > 10 minutes (as this part doesn't generate any output on the terminal).
  1547. # (also, it seems there is no useful information in this report, and we rarely need to read it)
  1548. # with open(report_files["passes"], "w") as f:
  1549. # tr._tw = create_terminal_writer(config, f)
  1550. # tr.summary_passes()
  1551. with open(report_files["summary_short"], "w") as f:
  1552. tr._tw = create_terminal_writer(config, f)
  1553. tr.short_test_summary()
  1554. with open(report_files["stats"], "w") as f:
  1555. tr._tw = create_terminal_writer(config, f)
  1556. tr.summary_stats()
  1557. # restore:
  1558. tr._tw = orig_writer
  1559. tr.reportchars = orig_reportchars
  1560. config.option.tbstyle = orig_tbstyle
  1561. # --- distributed testing functions --- #
  1562. # adapted from https://stackoverflow.com/a/59041913/9201239
  1563. import asyncio # noqa
  1564. class _RunOutput:
  1565. def __init__(self, returncode, stdout, stderr):
  1566. self.returncode = returncode
  1567. self.stdout = stdout
  1568. self.stderr = stderr
  1569. async def _read_stream(stream, callback):
  1570. while True:
  1571. line = await stream.readline()
  1572. if line:
  1573. callback(line)
  1574. else:
  1575. break
  1576. async def _stream_subprocess(cmd, env=None, stdin=None, timeout=None, quiet=False, echo=False) -> _RunOutput:
  1577. if echo:
  1578. print("\nRunning: ", " ".join(cmd))
  1579. p = await asyncio.create_subprocess_exec(
  1580. cmd[0],
  1581. *cmd[1:],
  1582. stdin=stdin,
  1583. stdout=asyncio.subprocess.PIPE,
  1584. stderr=asyncio.subprocess.PIPE,
  1585. env=env,
  1586. )
  1587. # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
  1588. # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
  1589. #
  1590. # If it starts hanging, will need to switch to the following code. The problem is that no data
  1591. # will be seen until it's done and if it hangs for example there will be no debug info.
  1592. # out, err = await p.communicate()
  1593. # return _RunOutput(p.returncode, out, err)
  1594. out = []
  1595. err = []
  1596. def tee(line, sink, pipe, label=""):
  1597. line = line.decode("utf-8").rstrip()
  1598. sink.append(line)
  1599. if not quiet:
  1600. print(label, line, file=pipe)
  1601. # XXX: the timeout doesn't seem to make any difference here
  1602. await asyncio.wait(
  1603. [
  1604. _read_stream(p.stdout, lambda l: tee(l, out, sys.stdout, label="stdout:")),
  1605. _read_stream(p.stderr, lambda l: tee(l, err, sys.stderr, label="stderr:")),
  1606. ],
  1607. timeout=timeout,
  1608. )
  1609. return _RunOutput(await p.wait(), out, err)
  1610. def execute_subprocess_async(cmd, env=None, stdin=None, timeout=180, quiet=False, echo=True) -> _RunOutput:
  1611. loop = asyncio.get_event_loop()
  1612. result = loop.run_until_complete(
  1613. _stream_subprocess(cmd, env=env, stdin=stdin, timeout=timeout, quiet=quiet, echo=echo)
  1614. )
  1615. cmd_str = " ".join(cmd)
  1616. if result.returncode > 0:
  1617. stderr = "\n".join(result.stderr)
  1618. raise RuntimeError(
  1619. f"'{cmd_str}' failed with returncode {result.returncode}\n\n"
  1620. f"The combined stderr from workers follows:\n{stderr}"
  1621. )
  1622. # check that the subprocess actually did run and produced some output, should the test rely on
  1623. # the remote side to do the testing
  1624. if not result.stdout and not result.stderr:
  1625. raise RuntimeError(f"'{cmd_str}' produced no output.")
  1626. return result
  1627. def pytest_xdist_worker_id():
  1628. """
  1629. Returns an int value of worker's numerical id under `pytest-xdist`'s concurrent workers `pytest -n N` regime, or 0
  1630. if `-n 1` or `pytest-xdist` isn't being used.
  1631. """
  1632. worker = os.environ.get("PYTEST_XDIST_WORKER", "gw0")
  1633. worker = re.sub(r"^gw", "", worker, 0, re.M)
  1634. return int(worker)
  1635. def get_torch_dist_unique_port():
  1636. """
  1637. Returns a port number that can be fed to `torch.distributed.launch`'s `--master_port` argument.
  1638. Under `pytest-xdist` it adds a delta number based on a worker id so that concurrent tests don't try to use the same
  1639. port at once.
  1640. """
  1641. port = 29500
  1642. uniq_delta = pytest_xdist_worker_id()
  1643. return port + uniq_delta
  1644. def nested_simplify(obj, decimals=3):
  1645. """
  1646. Simplifies an object by rounding float numbers, and downcasting tensors/numpy arrays to get simple equality test
  1647. within tests.
  1648. """
  1649. import numpy as np
  1650. if isinstance(obj, list):
  1651. return [nested_simplify(item, decimals) for item in obj]
  1652. if isinstance(obj, tuple):
  1653. return tuple([nested_simplify(item, decimals) for item in obj])
  1654. elif isinstance(obj, np.ndarray):
  1655. return nested_simplify(obj.tolist())
  1656. elif isinstance(obj, Mapping):
  1657. return {nested_simplify(k, decimals): nested_simplify(v, decimals) for k, v in obj.items()}
  1658. elif isinstance(obj, (str, int, np.int64)):
  1659. return obj
  1660. elif obj is None:
  1661. return obj
  1662. elif is_torch_available() and isinstance(obj, torch.Tensor):
  1663. return nested_simplify(obj.tolist(), decimals)
  1664. elif is_tf_available() and tf.is_tensor(obj):
  1665. return nested_simplify(obj.numpy().tolist())
  1666. elif isinstance(obj, float):
  1667. return round(obj, decimals)
  1668. elif isinstance(obj, (np.int32, np.float32, np.float16)):
  1669. return nested_simplify(obj.item(), decimals)
  1670. else:
  1671. raise Exception(f"Not supported: {type(obj)}")
  1672. def check_json_file_has_correct_format(file_path):
  1673. with open(file_path, "r") as f:
  1674. lines = f.readlines()
  1675. if len(lines) == 1:
  1676. # length can only be 1 if dict is empty
  1677. assert lines[0] == "{}"
  1678. else:
  1679. # otherwise make sure json has correct format (at least 3 lines)
  1680. assert len(lines) >= 3
  1681. # each key one line, ident should be 2, min length is 3
  1682. assert lines[0].strip() == "{"
  1683. for line in lines[1:-1]:
  1684. left_indent = len(lines[1]) - len(lines[1].lstrip())
  1685. assert left_indent == 2
  1686. assert lines[-1].strip() == "}"
  1687. def to_2tuple(x):
  1688. if isinstance(x, collections.abc.Iterable):
  1689. return x
  1690. return (x, x)
  1691. # These utils relate to ensuring the right error message is received when running scripts
  1692. class SubprocessCallException(Exception):
  1693. pass
  1694. def run_command(command: List[str], return_stdout=False):
  1695. """
  1696. Runs `command` with `subprocess.check_output` and will potentially return the `stdout`. Will also properly capture
  1697. if an error occured while running `command`
  1698. """
  1699. try:
  1700. output = subprocess.check_output(command, stderr=subprocess.STDOUT)
  1701. if return_stdout:
  1702. if hasattr(output, "decode"):
  1703. output = output.decode("utf-8")
  1704. return output
  1705. except subprocess.CalledProcessError as e:
  1706. raise SubprocessCallException(
  1707. f"Command `{' '.join(command)}` failed with the following error:\n\n{e.output.decode()}"
  1708. ) from e
  1709. class RequestCounter:
  1710. """
  1711. Helper class that will count all requests made online.
  1712. Might not be robust if urllib3 changes its logging format but should be good enough for us.
  1713. Usage:
  1714. ```py
  1715. with RequestCounter() as counter:
  1716. _ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert")
  1717. assert counter["GET"] == 0
  1718. assert counter["HEAD"] == 1
  1719. assert counter.total_calls == 1
  1720. ```
  1721. """
  1722. def __enter__(self):
  1723. self._counter = defaultdict(int)
  1724. self.patcher = patch.object(urllib3.connectionpool.log, "debug", wraps=urllib3.connectionpool.log.debug)
  1725. self.mock = self.patcher.start()
  1726. return self
  1727. def __exit__(self, *args, **kwargs) -> None:
  1728. for call in self.mock.call_args_list:
  1729. log = call.args[0] % call.args[1:]
  1730. for method in ("HEAD", "GET", "POST", "PUT", "DELETE", "CONNECT", "OPTIONS", "TRACE", "PATCH"):
  1731. if method in log:
  1732. self._counter[method] += 1
  1733. break
  1734. self.patcher.stop()
  1735. def __getitem__(self, key: str) -> int:
  1736. return self._counter[key]
  1737. @property
  1738. def total_calls(self) -> int:
  1739. return sum(self._counter.values())
  1740. def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
  1741. """
  1742. To decorate flaky tests. They will be retried on failures.
  1743. Args:
  1744. max_attempts (`int`, *optional*, defaults to 5):
  1745. The maximum number of attempts to retry the flaky test.
  1746. wait_before_retry (`float`, *optional*):
  1747. If provided, will wait that number of seconds before retrying the test.
  1748. description (`str`, *optional*):
  1749. A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
  1750. etc.)
  1751. """
  1752. def decorator(test_func_ref):
  1753. @functools.wraps(test_func_ref)
  1754. def wrapper(*args, **kwargs):
  1755. retry_count = 1
  1756. while retry_count < max_attempts:
  1757. try:
  1758. return test_func_ref(*args, **kwargs)
  1759. except Exception as err:
  1760. print(f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
  1761. if wait_before_retry is not None:
  1762. time.sleep(wait_before_retry)
  1763. retry_count += 1
  1764. return test_func_ref(*args, **kwargs)
  1765. return wrapper
  1766. return decorator
  1767. def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
  1768. """
  1769. To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.
  1770. Args:
  1771. test_case (`unittest.TestCase`):
  1772. The test that will run `target_func`.
  1773. target_func (`Callable`):
  1774. The function implementing the actual testing logic.
  1775. inputs (`dict`, *optional*, defaults to `None`):
  1776. The inputs that will be passed to `target_func` through an (input) queue.
  1777. timeout (`int`, *optional*, defaults to `None`):
  1778. The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
  1779. variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
  1780. """
  1781. if timeout is None:
  1782. timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))
  1783. start_methohd = "spawn"
  1784. ctx = multiprocessing.get_context(start_methohd)
  1785. input_queue = ctx.Queue(1)
  1786. output_queue = ctx.JoinableQueue(1)
  1787. # We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
  1788. input_queue.put(inputs, timeout=timeout)
  1789. process = ctx.Process(target=target_func, args=(input_queue, output_queue, timeout))
  1790. process.start()
  1791. # Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
  1792. # the test to exit properly.
  1793. try:
  1794. results = output_queue.get(timeout=timeout)
  1795. output_queue.task_done()
  1796. except Exception as e:
  1797. process.terminate()
  1798. test_case.fail(e)
  1799. process.join(timeout=timeout)
  1800. if results["error"] is not None:
  1801. test_case.fail(f'{results["error"]}')
  1802. """
  1803. The following contains utils to run the documentation tests without having to overwrite any files.
  1804. The `preprocess_string` function adds `# doctest: +IGNORE_RESULT` markers on the fly anywhere a `load_dataset` call is
  1805. made as a print would otherwise fail the corresonding line.
  1806. To skip cuda tests, make sure to call `SKIP_CUDA_DOCTEST=1 pytest --doctest-modules <path_to_files_to_test>
  1807. """
  1808. def preprocess_string(string, skip_cuda_tests):
  1809. """Prepare a docstring or a `.md` file to be run by doctest.
  1810. The argument `string` would be the whole file content if it is a `.md` file. For a python file, it would be one of
  1811. its docstring. In each case, it may contain multiple python code examples. If `skip_cuda_tests` is `True` and a
  1812. cuda stuff is detective (with a heuristic), this method will return an empty string so no doctest will be run for
  1813. `string`.
  1814. """
  1815. codeblock_pattern = r"(```(?:python|py)\s*\n\s*>>> )((?:.*?\n)*?.*?```)"
  1816. codeblocks = re.split(re.compile(codeblock_pattern, flags=re.MULTILINE | re.DOTALL), string)
  1817. is_cuda_found = False
  1818. for i, codeblock in enumerate(codeblocks):
  1819. if "load_dataset(" in codeblock and "# doctest: +IGNORE_RESULT" not in codeblock:
  1820. codeblocks[i] = re.sub(r"(>>> .*load_dataset\(.*)", r"\1 # doctest: +IGNORE_RESULT", codeblock)
  1821. if (
  1822. (">>>" in codeblock or "..." in codeblock)
  1823. and re.search(r"cuda|to\(0\)|device=0", codeblock)
  1824. and skip_cuda_tests
  1825. ):
  1826. is_cuda_found = True
  1827. break
  1828. modified_string = ""
  1829. if not is_cuda_found:
  1830. modified_string = "".join(codeblocks)
  1831. return modified_string
  1832. class HfDocTestParser(doctest.DocTestParser):
  1833. """
  1834. Overwrites the DocTestParser from doctest to properly parse the codeblocks that are formatted with black. This
  1835. means that there are no extra lines at the end of our snippets. The `# doctest: +IGNORE_RESULT` marker is also
  1836. added anywhere a `load_dataset` call is made as a print would otherwise fail the corresponding line.
  1837. Tests involving cuda are skipped base on a naive pattern that should be updated if it is not enough.
  1838. """
  1839. # This regular expression is used to find doctest examples in a
  1840. # string. It defines three groups: `source` is the source code
  1841. # (including leading indentation and prompts); `indent` is the
  1842. # indentation of the first (PS1) line of the source code; and
  1843. # `want` is the expected output (including leading indentation).
  1844. # fmt: off
  1845. _EXAMPLE_RE = re.compile(r'''
  1846. # Source consists of a PS1 line followed by zero or more PS2 lines.
  1847. (?P<source>
  1848. (?:^(?P<indent> [ ]*) >>> .*) # PS1 line
  1849. (?:\n [ ]* \.\.\. .*)*) # PS2 lines
  1850. \n?
  1851. # Want consists of any non-blank lines that do not start with PS1.
  1852. (?P<want> (?:(?![ ]*$) # Not a blank line
  1853. (?![ ]*>>>) # Not a line starting with PS1
  1854. # !!!!!!!!!!! HF Specific !!!!!!!!!!!
  1855. (?:(?!```).)* # Match any character except '`' until a '```' is found (this is specific to HF because black removes the last line)
  1856. # !!!!!!!!!!! HF Specific !!!!!!!!!!!
  1857. (?:\n|$) # Match a new line or end of string
  1858. )*)
  1859. ''', re.MULTILINE | re.VERBOSE
  1860. )
  1861. # fmt: on
  1862. # !!!!!!!!!!! HF Specific !!!!!!!!!!!
  1863. skip_cuda_tests: bool = bool(os.environ.get("SKIP_CUDA_DOCTEST", False))
  1864. # !!!!!!!!!!! HF Specific !!!!!!!!!!!
  1865. def parse(self, string, name="<string>"):
  1866. """
  1867. Overwrites the `parse` method to incorporate a skip for CUDA tests, and remove logs and dataset prints before
  1868. calling `super().parse`
  1869. """
  1870. string = preprocess_string(string, self.skip_cuda_tests)
  1871. return super().parse(string, name)
  1872. class HfDoctestModule(Module):
  1873. """
  1874. Overwrites the `DoctestModule` of the pytest package to make sure the HFDocTestParser is used when discovering
  1875. tests.
  1876. """
  1877. def collect(self) -> Iterable[DoctestItem]:
  1878. class MockAwareDocTestFinder(doctest.DocTestFinder):
  1879. """A hackish doctest finder that overrides stdlib internals to fix a stdlib bug.
  1880. https://github.com/pytest-dev/pytest/issues/3456 https://bugs.python.org/issue25532
  1881. """
  1882. def _find_lineno(self, obj, source_lines):
  1883. """Doctest code does not take into account `@property`, this
  1884. is a hackish way to fix it. https://bugs.python.org/issue17446
  1885. Wrapped Doctests will need to be unwrapped so the correct line number is returned. This will be
  1886. reported upstream. #8796
  1887. """
  1888. if isinstance(obj, property):
  1889. obj = getattr(obj, "fget", obj)
  1890. if hasattr(obj, "__wrapped__"):
  1891. # Get the main obj in case of it being wrapped
  1892. obj = inspect.unwrap(obj)
  1893. # Type ignored because this is a private function.
  1894. return super()._find_lineno( # type:ignore[misc]
  1895. obj,
  1896. source_lines,
  1897. )
  1898. def _find(self, tests, obj, name, module, source_lines, globs, seen) -> None:
  1899. if _is_mocked(obj):
  1900. return
  1901. with _patch_unwrap_mock_aware():
  1902. # Type ignored because this is a private function.
  1903. super()._find( # type:ignore[misc]
  1904. tests, obj, name, module, source_lines, globs, seen
  1905. )
  1906. if self.path.name == "conftest.py":
  1907. module = self.config.pluginmanager._importconftest(
  1908. self.path,
  1909. self.config.getoption("importmode"),
  1910. rootpath=self.config.rootpath,
  1911. )
  1912. else:
  1913. try:
  1914. module = import_path(
  1915. self.path,
  1916. root=self.config.rootpath,
  1917. mode=self.config.getoption("importmode"),
  1918. )
  1919. except ImportError:
  1920. if self.config.getvalue("doctest_ignore_import_errors"):
  1921. skip("unable to import module %r" % self.path)
  1922. else:
  1923. raise
  1924. # !!!!!!!!!!! HF Specific !!!!!!!!!!!
  1925. finder = MockAwareDocTestFinder(parser=HfDocTestParser())
  1926. # !!!!!!!!!!! HF Specific !!!!!!!!!!!
  1927. optionflags = get_optionflags(self)
  1928. runner = _get_runner(
  1929. verbose=False,
  1930. optionflags=optionflags,
  1931. checker=_get_checker(),
  1932. continue_on_failure=_get_continue_on_failure(self.config),
  1933. )
  1934. for test in finder.find(module, module.__name__):
  1935. if test.examples: # skip empty doctests and cuda
  1936. yield DoctestItem.from_parent(self, name=test.name, runner=runner, dtest=test)
  1937. def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
  1938. if device not in dispatch_table:
  1939. return dispatch_table["default"](*args, **kwargs)
  1940. fn = dispatch_table[device]
  1941. # Some device agnostic functions return values. Need to guard against `None`
  1942. # instead at user level.
  1943. if fn is None:
  1944. return None
  1945. return fn(*args, **kwargs)
  1946. if is_torch_available():
  1947. # Mappings from device names to callable functions to support device agnostic
  1948. # testing.
  1949. BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed, "cpu": torch.manual_seed, "default": torch.manual_seed}
  1950. BACKEND_EMPTY_CACHE = {"cuda": torch.cuda.empty_cache, "cpu": None, "default": None}
  1951. BACKEND_DEVICE_COUNT = {"cuda": torch.cuda.device_count, "cpu": lambda: 0, "default": lambda: 1}
  1952. else:
  1953. BACKEND_MANUAL_SEED = {"default": None}
  1954. BACKEND_EMPTY_CACHE = {"default": None}
  1955. BACKEND_DEVICE_COUNT = {"default": lambda: 0}
  1956. def backend_manual_seed(device: str, seed: int):
  1957. return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)
  1958. def backend_empty_cache(device: str):
  1959. return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)
  1960. def backend_device_count(device: str):
  1961. return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)
  1962. if is_torch_available():
  1963. # If `TRANSFORMERS_TEST_DEVICE_SPEC` is enabled we need to import extra entries
  1964. # into device to function mappings.
  1965. if "TRANSFORMERS_TEST_DEVICE_SPEC" in os.environ:
  1966. device_spec_path = os.environ["TRANSFORMERS_TEST_DEVICE_SPEC"]
  1967. if not Path(device_spec_path).is_file():
  1968. raise ValueError(
  1969. f"Specified path to device spec file is not a file or not found. Received '{device_spec_path}"
  1970. )
  1971. # Try to strip extension for later import – also verifies we are importing a
  1972. # python file.
  1973. try:
  1974. import_name = device_spec_path[: device_spec_path.index(".py")]
  1975. except ValueError as e:
  1976. raise ValueError(f"Provided device spec file was not a Python file! Received '{device_spec_path}") from e
  1977. device_spec_module = importlib.import_module(import_name)
  1978. # Imported file must contain `DEVICE_NAME`. If it doesn't, terminate early.
  1979. try:
  1980. device_name = device_spec_module.DEVICE_NAME
  1981. except AttributeError as e:
  1982. raise AttributeError("Device spec file did not contain `DEVICE_NAME`") from e
  1983. if "TRANSFORMERS_TEST_DEVICE" in os.environ and torch_device != device_name:
  1984. msg = f"Mismatch between environment variable `TRANSFORMERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
  1985. msg += "Either unset `TRANSFORMERS_TEST_DEVICE` or ensure it matches device spec name."
  1986. raise ValueError(msg)
  1987. torch_device = device_name
  1988. def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
  1989. try:
  1990. # Try to import the function directly
  1991. spec_fn = getattr(device_spec_module, attribute_name)
  1992. device_fn_dict[torch_device] = spec_fn
  1993. except AttributeError as e:
  1994. # If the function doesn't exist, and there is no default, throw an error
  1995. if "default" not in device_fn_dict:
  1996. raise AttributeError(
  1997. f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
  1998. ) from e
  1999. # Add one entry here for each `BACKEND_*` dictionary.
  2000. update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
  2001. update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
  2002. update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
  2003. def compare_pipeline_output_to_hub_spec(output, hub_spec):
  2004. missing_keys = []
  2005. unexpected_keys = []
  2006. all_field_names = {field.name for field in fields(hub_spec)}
  2007. matching_keys = sorted([key for key in output.keys() if key in all_field_names])
  2008. # Fields with a MISSING default are required and must be in the output
  2009. for field in fields(hub_spec):
  2010. if field.default is MISSING and field.name not in output:
  2011. missing_keys.append(field.name)
  2012. # All output keys must match either a required or optional field in the Hub spec
  2013. for output_key in output:
  2014. if output_key not in all_field_names:
  2015. unexpected_keys.append(output_key)
  2016. if missing_keys or unexpected_keys:
  2017. error = ["Pipeline output does not match Hub spec!"]
  2018. if matching_keys:
  2019. error.append(f"Matching keys: {matching_keys}")
  2020. if missing_keys:
  2021. error.append(f"Missing required keys in pipeline output: {missing_keys}")
  2022. if unexpected_keys:
  2023. error.append(f"Keys in pipeline output that are not in Hub spec: {unexpected_keys}")
  2024. raise KeyError("\n".join(error))