__init__.py 53 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164
  1. # coding=utf-8
  2. # Copyright 2018 The HuggingFace Inc. team.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. import json
  16. import os
  17. import warnings
  18. from pathlib import Path
  19. from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
  20. from huggingface_hub import model_info
  21. from ..configuration_utils import PretrainedConfig
  22. from ..dynamic_module_utils import get_class_from_dynamic_module
  23. from ..feature_extraction_utils import PreTrainedFeatureExtractor
  24. from ..image_processing_utils import BaseImageProcessor
  25. from ..models.auto.configuration_auto import AutoConfig
  26. from ..models.auto.feature_extraction_auto import FEATURE_EXTRACTOR_MAPPING, AutoFeatureExtractor
  27. from ..models.auto.image_processing_auto import IMAGE_PROCESSOR_MAPPING, AutoImageProcessor
  28. from ..models.auto.modeling_auto import AutoModelForDepthEstimation, AutoModelForImageToImage
  29. from ..models.auto.processing_auto import PROCESSOR_MAPPING, AutoProcessor
  30. from ..models.auto.tokenization_auto import TOKENIZER_MAPPING, AutoTokenizer
  31. from ..processing_utils import ProcessorMixin
  32. from ..tokenization_utils import PreTrainedTokenizer
  33. from ..utils import (
  34. CONFIG_NAME,
  35. HUGGINGFACE_CO_RESOLVE_ENDPOINT,
  36. cached_file,
  37. extract_commit_hash,
  38. find_adapter_config_file,
  39. is_kenlm_available,
  40. is_offline_mode,
  41. is_peft_available,
  42. is_pyctcdecode_available,
  43. is_tf_available,
  44. is_torch_available,
  45. logging,
  46. )
  47. from .audio_classification import AudioClassificationPipeline
  48. from .automatic_speech_recognition import AutomaticSpeechRecognitionPipeline
  49. from .base import (
  50. ArgumentHandler,
  51. CsvPipelineDataFormat,
  52. JsonPipelineDataFormat,
  53. PipedPipelineDataFormat,
  54. Pipeline,
  55. PipelineDataFormat,
  56. PipelineException,
  57. PipelineRegistry,
  58. get_default_model_and_revision,
  59. infer_framework_load_model,
  60. )
  61. from .depth_estimation import DepthEstimationPipeline
  62. from .document_question_answering import DocumentQuestionAnsweringPipeline
  63. from .feature_extraction import FeatureExtractionPipeline
  64. from .fill_mask import FillMaskPipeline
  65. from .image_classification import ImageClassificationPipeline
  66. from .image_feature_extraction import ImageFeatureExtractionPipeline
  67. from .image_segmentation import ImageSegmentationPipeline
  68. from .image_to_image import ImageToImagePipeline
  69. from .image_to_text import ImageToTextPipeline
  70. from .mask_generation import MaskGenerationPipeline
  71. from .object_detection import ObjectDetectionPipeline
  72. from .question_answering import QuestionAnsweringArgumentHandler, QuestionAnsweringPipeline
  73. from .table_question_answering import TableQuestionAnsweringArgumentHandler, TableQuestionAnsweringPipeline
  74. from .text2text_generation import SummarizationPipeline, Text2TextGenerationPipeline, TranslationPipeline
  75. from .text_classification import TextClassificationPipeline
  76. from .text_generation import TextGenerationPipeline
  77. from .text_to_audio import TextToAudioPipeline
  78. from .token_classification import (
  79. AggregationStrategy,
  80. NerPipeline,
  81. TokenClassificationArgumentHandler,
  82. TokenClassificationPipeline,
  83. )
  84. from .video_classification import VideoClassificationPipeline
  85. from .visual_question_answering import VisualQuestionAnsweringPipeline
  86. from .zero_shot_audio_classification import ZeroShotAudioClassificationPipeline
  87. from .zero_shot_classification import ZeroShotClassificationArgumentHandler, ZeroShotClassificationPipeline
  88. from .zero_shot_image_classification import ZeroShotImageClassificationPipeline
  89. from .zero_shot_object_detection import ZeroShotObjectDetectionPipeline
  90. if is_tf_available():
  91. import tensorflow as tf
  92. from ..models.auto.modeling_tf_auto import (
  93. TFAutoModel,
  94. TFAutoModelForCausalLM,
  95. TFAutoModelForImageClassification,
  96. TFAutoModelForMaskedLM,
  97. TFAutoModelForQuestionAnswering,
  98. TFAutoModelForSeq2SeqLM,
  99. TFAutoModelForSequenceClassification,
  100. TFAutoModelForTableQuestionAnswering,
  101. TFAutoModelForTokenClassification,
  102. TFAutoModelForVision2Seq,
  103. TFAutoModelForZeroShotImageClassification,
  104. )
  105. if is_torch_available():
  106. import torch
  107. from ..models.auto.modeling_auto import (
  108. AutoModel,
  109. AutoModelForAudioClassification,
  110. AutoModelForCausalLM,
  111. AutoModelForCTC,
  112. AutoModelForDocumentQuestionAnswering,
  113. AutoModelForImageClassification,
  114. AutoModelForImageSegmentation,
  115. AutoModelForMaskedLM,
  116. AutoModelForMaskGeneration,
  117. AutoModelForObjectDetection,
  118. AutoModelForQuestionAnswering,
  119. AutoModelForSemanticSegmentation,
  120. AutoModelForSeq2SeqLM,
  121. AutoModelForSequenceClassification,
  122. AutoModelForSpeechSeq2Seq,
  123. AutoModelForTableQuestionAnswering,
  124. AutoModelForTextToSpectrogram,
  125. AutoModelForTextToWaveform,
  126. AutoModelForTokenClassification,
  127. AutoModelForVideoClassification,
  128. AutoModelForVision2Seq,
  129. AutoModelForVisualQuestionAnswering,
  130. AutoModelForZeroShotImageClassification,
  131. AutoModelForZeroShotObjectDetection,
  132. )
  133. if TYPE_CHECKING:
  134. from ..modeling_tf_utils import TFPreTrainedModel
  135. from ..modeling_utils import PreTrainedModel
  136. from ..tokenization_utils_fast import PreTrainedTokenizerFast
  137. logger = logging.get_logger(__name__)
  138. # Register all the supported tasks here
  139. TASK_ALIASES = {
  140. "sentiment-analysis": "text-classification",
  141. "ner": "token-classification",
  142. "vqa": "visual-question-answering",
  143. "text-to-speech": "text-to-audio",
  144. }
  145. SUPPORTED_TASKS = {
  146. "audio-classification": {
  147. "impl": AudioClassificationPipeline,
  148. "tf": (),
  149. "pt": (AutoModelForAudioClassification,) if is_torch_available() else (),
  150. "default": {"model": {"pt": ("superb/wav2vec2-base-superb-ks", "372e048")}},
  151. "type": "audio",
  152. },
  153. "automatic-speech-recognition": {
  154. "impl": AutomaticSpeechRecognitionPipeline,
  155. "tf": (),
  156. "pt": (AutoModelForCTC, AutoModelForSpeechSeq2Seq) if is_torch_available() else (),
  157. "default": {"model": {"pt": ("facebook/wav2vec2-base-960h", "22aad52")}},
  158. "type": "multimodal",
  159. },
  160. "text-to-audio": {
  161. "impl": TextToAudioPipeline,
  162. "tf": (),
  163. "pt": (AutoModelForTextToWaveform, AutoModelForTextToSpectrogram) if is_torch_available() else (),
  164. "default": {"model": {"pt": ("suno/bark-small", "1dbd7a1")}},
  165. "type": "text",
  166. },
  167. "feature-extraction": {
  168. "impl": FeatureExtractionPipeline,
  169. "tf": (TFAutoModel,) if is_tf_available() else (),
  170. "pt": (AutoModel,) if is_torch_available() else (),
  171. "default": {
  172. "model": {
  173. "pt": ("distilbert/distilbert-base-cased", "6ea8117"),
  174. "tf": ("distilbert/distilbert-base-cased", "6ea8117"),
  175. }
  176. },
  177. "type": "multimodal",
  178. },
  179. "text-classification": {
  180. "impl": TextClassificationPipeline,
  181. "tf": (TFAutoModelForSequenceClassification,) if is_tf_available() else (),
  182. "pt": (AutoModelForSequenceClassification,) if is_torch_available() else (),
  183. "default": {
  184. "model": {
  185. "pt": ("distilbert/distilbert-base-uncased-finetuned-sst-2-english", "714eb0f"),
  186. "tf": ("distilbert/distilbert-base-uncased-finetuned-sst-2-english", "714eb0f"),
  187. },
  188. },
  189. "type": "text",
  190. },
  191. "token-classification": {
  192. "impl": TokenClassificationPipeline,
  193. "tf": (TFAutoModelForTokenClassification,) if is_tf_available() else (),
  194. "pt": (AutoModelForTokenClassification,) if is_torch_available() else (),
  195. "default": {
  196. "model": {
  197. "pt": ("dbmdz/bert-large-cased-finetuned-conll03-english", "4c53496"),
  198. "tf": ("dbmdz/bert-large-cased-finetuned-conll03-english", "4c53496"),
  199. },
  200. },
  201. "type": "text",
  202. },
  203. "question-answering": {
  204. "impl": QuestionAnsweringPipeline,
  205. "tf": (TFAutoModelForQuestionAnswering,) if is_tf_available() else (),
  206. "pt": (AutoModelForQuestionAnswering,) if is_torch_available() else (),
  207. "default": {
  208. "model": {
  209. "pt": ("distilbert/distilbert-base-cased-distilled-squad", "564e9b5"),
  210. "tf": ("distilbert/distilbert-base-cased-distilled-squad", "564e9b5"),
  211. },
  212. },
  213. "type": "text",
  214. },
  215. "table-question-answering": {
  216. "impl": TableQuestionAnsweringPipeline,
  217. "pt": (AutoModelForTableQuestionAnswering,) if is_torch_available() else (),
  218. "tf": (TFAutoModelForTableQuestionAnswering,) if is_tf_available() else (),
  219. "default": {
  220. "model": {
  221. "pt": ("google/tapas-base-finetuned-wtq", "e3dde19"),
  222. "tf": ("google/tapas-base-finetuned-wtq", "e3dde19"),
  223. },
  224. },
  225. "type": "text",
  226. },
  227. "visual-question-answering": {
  228. "impl": VisualQuestionAnsweringPipeline,
  229. "pt": (AutoModelForVisualQuestionAnswering,) if is_torch_available() else (),
  230. "tf": (),
  231. "default": {
  232. "model": {"pt": ("dandelin/vilt-b32-finetuned-vqa", "d0a1f6a")},
  233. },
  234. "type": "multimodal",
  235. },
  236. "document-question-answering": {
  237. "impl": DocumentQuestionAnsweringPipeline,
  238. "pt": (AutoModelForDocumentQuestionAnswering,) if is_torch_available() else (),
  239. "tf": (),
  240. "default": {
  241. "model": {"pt": ("impira/layoutlm-document-qa", "beed3c4")},
  242. },
  243. "type": "multimodal",
  244. },
  245. "fill-mask": {
  246. "impl": FillMaskPipeline,
  247. "tf": (TFAutoModelForMaskedLM,) if is_tf_available() else (),
  248. "pt": (AutoModelForMaskedLM,) if is_torch_available() else (),
  249. "default": {
  250. "model": {
  251. "pt": ("distilbert/distilroberta-base", "fb53ab8"),
  252. "tf": ("distilbert/distilroberta-base", "fb53ab8"),
  253. }
  254. },
  255. "type": "text",
  256. },
  257. "summarization": {
  258. "impl": SummarizationPipeline,
  259. "tf": (TFAutoModelForSeq2SeqLM,) if is_tf_available() else (),
  260. "pt": (AutoModelForSeq2SeqLM,) if is_torch_available() else (),
  261. "default": {
  262. "model": {"pt": ("sshleifer/distilbart-cnn-12-6", "a4f8f3e"), "tf": ("google-t5/t5-small", "df1b051")}
  263. },
  264. "type": "text",
  265. },
  266. # This task is a special case as it's parametrized by SRC, TGT languages.
  267. "translation": {
  268. "impl": TranslationPipeline,
  269. "tf": (TFAutoModelForSeq2SeqLM,) if is_tf_available() else (),
  270. "pt": (AutoModelForSeq2SeqLM,) if is_torch_available() else (),
  271. "default": {
  272. ("en", "fr"): {"model": {"pt": ("google-t5/t5-base", "a9723ea"), "tf": ("google-t5/t5-base", "a9723ea")}},
  273. ("en", "de"): {"model": {"pt": ("google-t5/t5-base", "a9723ea"), "tf": ("google-t5/t5-base", "a9723ea")}},
  274. ("en", "ro"): {"model": {"pt": ("google-t5/t5-base", "a9723ea"), "tf": ("google-t5/t5-base", "a9723ea")}},
  275. },
  276. "type": "text",
  277. },
  278. "text2text-generation": {
  279. "impl": Text2TextGenerationPipeline,
  280. "tf": (TFAutoModelForSeq2SeqLM,) if is_tf_available() else (),
  281. "pt": (AutoModelForSeq2SeqLM,) if is_torch_available() else (),
  282. "default": {"model": {"pt": ("google-t5/t5-base", "a9723ea"), "tf": ("google-t5/t5-base", "a9723ea")}},
  283. "type": "text",
  284. },
  285. "text-generation": {
  286. "impl": TextGenerationPipeline,
  287. "tf": (TFAutoModelForCausalLM,) if is_tf_available() else (),
  288. "pt": (AutoModelForCausalLM,) if is_torch_available() else (),
  289. "default": {"model": {"pt": ("openai-community/gpt2", "607a30d"), "tf": ("openai-community/gpt2", "607a30d")}},
  290. "type": "text",
  291. },
  292. "zero-shot-classification": {
  293. "impl": ZeroShotClassificationPipeline,
  294. "tf": (TFAutoModelForSequenceClassification,) if is_tf_available() else (),
  295. "pt": (AutoModelForSequenceClassification,) if is_torch_available() else (),
  296. "default": {
  297. "model": {
  298. "pt": ("facebook/bart-large-mnli", "d7645e1"),
  299. "tf": ("FacebookAI/roberta-large-mnli", "2a8f12d"),
  300. },
  301. "config": {
  302. "pt": ("facebook/bart-large-mnli", "d7645e1"),
  303. "tf": ("FacebookAI/roberta-large-mnli", "2a8f12d"),
  304. },
  305. },
  306. "type": "text",
  307. },
  308. "zero-shot-image-classification": {
  309. "impl": ZeroShotImageClassificationPipeline,
  310. "tf": (TFAutoModelForZeroShotImageClassification,) if is_tf_available() else (),
  311. "pt": (AutoModelForZeroShotImageClassification,) if is_torch_available() else (),
  312. "default": {
  313. "model": {
  314. "pt": ("openai/clip-vit-base-patch32", "3d74acf"),
  315. "tf": ("openai/clip-vit-base-patch32", "3d74acf"),
  316. }
  317. },
  318. "type": "multimodal",
  319. },
  320. "zero-shot-audio-classification": {
  321. "impl": ZeroShotAudioClassificationPipeline,
  322. "tf": (),
  323. "pt": (AutoModel,) if is_torch_available() else (),
  324. "default": {
  325. "model": {
  326. "pt": ("laion/clap-htsat-fused", "cca9e28"),
  327. }
  328. },
  329. "type": "multimodal",
  330. },
  331. "image-classification": {
  332. "impl": ImageClassificationPipeline,
  333. "tf": (TFAutoModelForImageClassification,) if is_tf_available() else (),
  334. "pt": (AutoModelForImageClassification,) if is_torch_available() else (),
  335. "default": {
  336. "model": {
  337. "pt": ("google/vit-base-patch16-224", "3f49326"),
  338. "tf": ("google/vit-base-patch16-224", "3f49326"),
  339. }
  340. },
  341. "type": "image",
  342. },
  343. "image-feature-extraction": {
  344. "impl": ImageFeatureExtractionPipeline,
  345. "tf": (TFAutoModel,) if is_tf_available() else (),
  346. "pt": (AutoModel,) if is_torch_available() else (),
  347. "default": {
  348. "model": {
  349. "pt": ("google/vit-base-patch16-224", "3f49326"),
  350. "tf": ("google/vit-base-patch16-224", "3f49326"),
  351. }
  352. },
  353. "type": "image",
  354. },
  355. "image-segmentation": {
  356. "impl": ImageSegmentationPipeline,
  357. "tf": (),
  358. "pt": (AutoModelForImageSegmentation, AutoModelForSemanticSegmentation) if is_torch_available() else (),
  359. "default": {"model": {"pt": ("facebook/detr-resnet-50-panoptic", "d53b52a")}},
  360. "type": "multimodal",
  361. },
  362. "image-to-text": {
  363. "impl": ImageToTextPipeline,
  364. "tf": (TFAutoModelForVision2Seq,) if is_tf_available() else (),
  365. "pt": (AutoModelForVision2Seq,) if is_torch_available() else (),
  366. "default": {
  367. "model": {
  368. "pt": ("ydshieh/vit-gpt2-coco-en", "5bebf1e"),
  369. "tf": ("ydshieh/vit-gpt2-coco-en", "5bebf1e"),
  370. }
  371. },
  372. "type": "multimodal",
  373. },
  374. "object-detection": {
  375. "impl": ObjectDetectionPipeline,
  376. "tf": (),
  377. "pt": (AutoModelForObjectDetection,) if is_torch_available() else (),
  378. "default": {"model": {"pt": ("facebook/detr-resnet-50", "1d5f47b")}},
  379. "type": "multimodal",
  380. },
  381. "zero-shot-object-detection": {
  382. "impl": ZeroShotObjectDetectionPipeline,
  383. "tf": (),
  384. "pt": (AutoModelForZeroShotObjectDetection,) if is_torch_available() else (),
  385. "default": {"model": {"pt": ("google/owlvit-base-patch32", "cbc355f")}},
  386. "type": "multimodal",
  387. },
  388. "depth-estimation": {
  389. "impl": DepthEstimationPipeline,
  390. "tf": (),
  391. "pt": (AutoModelForDepthEstimation,) if is_torch_available() else (),
  392. "default": {"model": {"pt": ("Intel/dpt-large", "bc15f29")}},
  393. "type": "image",
  394. },
  395. "video-classification": {
  396. "impl": VideoClassificationPipeline,
  397. "tf": (),
  398. "pt": (AutoModelForVideoClassification,) if is_torch_available() else (),
  399. "default": {"model": {"pt": ("MCG-NJU/videomae-base-finetuned-kinetics", "488eb9a")}},
  400. "type": "video",
  401. },
  402. "mask-generation": {
  403. "impl": MaskGenerationPipeline,
  404. "tf": (),
  405. "pt": (AutoModelForMaskGeneration,) if is_torch_available() else (),
  406. "default": {"model": {"pt": ("facebook/sam-vit-huge", "87aecf0")}},
  407. "type": "multimodal",
  408. },
  409. "image-to-image": {
  410. "impl": ImageToImagePipeline,
  411. "tf": (),
  412. "pt": (AutoModelForImageToImage,) if is_torch_available() else (),
  413. "default": {"model": {"pt": ("caidas/swin2SR-classical-sr-x2-64", "cee1c92")}},
  414. "type": "image",
  415. },
  416. }
  417. NO_FEATURE_EXTRACTOR_TASKS = set()
  418. NO_IMAGE_PROCESSOR_TASKS = set()
  419. NO_TOKENIZER_TASKS = set()
  420. # Those model configs are special, they are generic over their task, meaning
  421. # any tokenizer/feature_extractor might be use for a given model so we cannot
  422. # use the statically defined TOKENIZER_MAPPING and FEATURE_EXTRACTOR_MAPPING to
  423. # see if the model defines such objects or not.
  424. MULTI_MODEL_AUDIO_CONFIGS = {"SpeechEncoderDecoderConfig"}
  425. MULTI_MODEL_VISION_CONFIGS = {"VisionEncoderDecoderConfig", "VisionTextDualEncoderConfig"}
  426. for task, values in SUPPORTED_TASKS.items():
  427. if values["type"] == "text":
  428. NO_FEATURE_EXTRACTOR_TASKS.add(task)
  429. NO_IMAGE_PROCESSOR_TASKS.add(task)
  430. elif values["type"] in {"image", "video"}:
  431. NO_TOKENIZER_TASKS.add(task)
  432. elif values["type"] in {"audio"}:
  433. NO_TOKENIZER_TASKS.add(task)
  434. NO_IMAGE_PROCESSOR_TASKS.add(task)
  435. elif values["type"] != "multimodal":
  436. raise ValueError(f"SUPPORTED_TASK {task} contains invalid type {values['type']}")
  437. PIPELINE_REGISTRY = PipelineRegistry(supported_tasks=SUPPORTED_TASKS, task_aliases=TASK_ALIASES)
  438. def get_supported_tasks() -> List[str]:
  439. """
  440. Returns a list of supported task strings.
  441. """
  442. return PIPELINE_REGISTRY.get_supported_tasks()
  443. def get_task(model: str, token: Optional[str] = None, **deprecated_kwargs) -> str:
  444. use_auth_token = deprecated_kwargs.pop("use_auth_token", None)
  445. if use_auth_token is not None:
  446. warnings.warn(
  447. "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
  448. FutureWarning,
  449. )
  450. if token is not None:
  451. raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.")
  452. token = use_auth_token
  453. if is_offline_mode():
  454. raise RuntimeError("You cannot infer task automatically within `pipeline` when using offline mode")
  455. try:
  456. info = model_info(model, token=token)
  457. except Exception as e:
  458. raise RuntimeError(f"Instantiating a pipeline without a task set raised an error: {e}")
  459. if not info.pipeline_tag:
  460. raise RuntimeError(
  461. f"The model {model} does not seem to have a correct `pipeline_tag` set to infer the task automatically"
  462. )
  463. if getattr(info, "library_name", "transformers") != "transformers":
  464. raise RuntimeError(f"This model is meant to be used with {info.library_name} not with transformers")
  465. task = info.pipeline_tag
  466. return task
  467. def check_task(task: str) -> Tuple[str, Dict, Any]:
  468. """
  469. Checks an incoming task string, to validate it's correct and return the default Pipeline and Model classes, and
  470. default models if they exist.
  471. Args:
  472. task (`str`):
  473. The task defining which pipeline will be returned. Currently accepted tasks are:
  474. - `"audio-classification"`
  475. - `"automatic-speech-recognition"`
  476. - `"conversational"`
  477. - `"depth-estimation"`
  478. - `"document-question-answering"`
  479. - `"feature-extraction"`
  480. - `"fill-mask"`
  481. - `"image-classification"`
  482. - `"image-feature-extraction"`
  483. - `"image-segmentation"`
  484. - `"image-to-text"`
  485. - `"image-to-image"`
  486. - `"object-detection"`
  487. - `"question-answering"`
  488. - `"summarization"`
  489. - `"table-question-answering"`
  490. - `"text2text-generation"`
  491. - `"text-classification"` (alias `"sentiment-analysis"` available)
  492. - `"text-generation"`
  493. - `"text-to-audio"` (alias `"text-to-speech"` available)
  494. - `"token-classification"` (alias `"ner"` available)
  495. - `"translation"`
  496. - `"translation_xx_to_yy"`
  497. - `"video-classification"`
  498. - `"visual-question-answering"` (alias `"vqa"` available)
  499. - `"zero-shot-classification"`
  500. - `"zero-shot-image-classification"`
  501. - `"zero-shot-object-detection"`
  502. Returns:
  503. (normalized_task: `str`, task_defaults: `dict`, task_options: (`tuple`, None)) The normalized task name
  504. (removed alias and options). The actual dictionary required to initialize the pipeline and some extra task
  505. options for parametrized tasks like "translation_XX_to_YY"
  506. """
  507. return PIPELINE_REGISTRY.check_task(task)
  508. def clean_custom_task(task_info):
  509. import transformers
  510. if "impl" not in task_info:
  511. raise RuntimeError("This model introduces a custom pipeline without specifying its implementation.")
  512. pt_class_names = task_info.get("pt", ())
  513. if isinstance(pt_class_names, str):
  514. pt_class_names = [pt_class_names]
  515. task_info["pt"] = tuple(getattr(transformers, c) for c in pt_class_names)
  516. tf_class_names = task_info.get("tf", ())
  517. if isinstance(tf_class_names, str):
  518. tf_class_names = [tf_class_names]
  519. task_info["tf"] = tuple(getattr(transformers, c) for c in tf_class_names)
  520. return task_info, None
  521. def pipeline(
  522. task: str = None,
  523. model: Optional[Union[str, "PreTrainedModel", "TFPreTrainedModel"]] = None,
  524. config: Optional[Union[str, PretrainedConfig]] = None,
  525. tokenizer: Optional[Union[str, PreTrainedTokenizer, "PreTrainedTokenizerFast"]] = None,
  526. feature_extractor: Optional[Union[str, PreTrainedFeatureExtractor]] = None,
  527. image_processor: Optional[Union[str, BaseImageProcessor]] = None,
  528. processor: Optional[Union[str, ProcessorMixin]] = None,
  529. framework: Optional[str] = None,
  530. revision: Optional[str] = None,
  531. use_fast: bool = True,
  532. token: Optional[Union[str, bool]] = None,
  533. device: Optional[Union[int, str, "torch.device"]] = None,
  534. device_map=None,
  535. torch_dtype=None,
  536. trust_remote_code: Optional[bool] = None,
  537. model_kwargs: Dict[str, Any] = None,
  538. pipeline_class: Optional[Any] = None,
  539. **kwargs,
  540. ) -> Pipeline:
  541. """
  542. Utility factory method to build a [`Pipeline`].
  543. A pipeline consists of:
  544. - One or more components for pre-processing model inputs, such as a [tokenizer](tokenizer),
  545. [image_processor](image_processor), [feature_extractor](feature_extractor), or [processor](processors).
  546. - A [model](model) that generates predictions from the inputs.
  547. - Optional post-processing steps to refine the model's output, which can also be handled by processors.
  548. <Tip>
  549. While there are such optional arguments as `tokenizer`, `feature_extractor`, `image_processor`, and `processor`,
  550. they shouldn't be specified all at once. If these components are not provided, `pipeline` will try to load
  551. required ones automatically. In case you want to provide these components explicitly, please refer to a
  552. specific pipeline in order to get more details regarding what components are required.
  553. </Tip>
  554. Args:
  555. task (`str`):
  556. The task defining which pipeline will be returned. Currently accepted tasks are:
  557. - `"audio-classification"`: will return a [`AudioClassificationPipeline`].
  558. - `"automatic-speech-recognition"`: will return a [`AutomaticSpeechRecognitionPipeline`].
  559. - `"depth-estimation"`: will return a [`DepthEstimationPipeline`].
  560. - `"document-question-answering"`: will return a [`DocumentQuestionAnsweringPipeline`].
  561. - `"feature-extraction"`: will return a [`FeatureExtractionPipeline`].
  562. - `"fill-mask"`: will return a [`FillMaskPipeline`]:.
  563. - `"image-classification"`: will return a [`ImageClassificationPipeline`].
  564. - `"image-feature-extraction"`: will return an [`ImageFeatureExtractionPipeline`].
  565. - `"image-segmentation"`: will return a [`ImageSegmentationPipeline`].
  566. - `"image-to-image"`: will return a [`ImageToImagePipeline`].
  567. - `"image-to-text"`: will return a [`ImageToTextPipeline`].
  568. - `"mask-generation"`: will return a [`MaskGenerationPipeline`].
  569. - `"object-detection"`: will return a [`ObjectDetectionPipeline`].
  570. - `"question-answering"`: will return a [`QuestionAnsweringPipeline`].
  571. - `"summarization"`: will return a [`SummarizationPipeline`].
  572. - `"table-question-answering"`: will return a [`TableQuestionAnsweringPipeline`].
  573. - `"text2text-generation"`: will return a [`Text2TextGenerationPipeline`].
  574. - `"text-classification"` (alias `"sentiment-analysis"` available): will return a
  575. [`TextClassificationPipeline`].
  576. - `"text-generation"`: will return a [`TextGenerationPipeline`]:.
  577. - `"text-to-audio"` (alias `"text-to-speech"` available): will return a [`TextToAudioPipeline`]:.
  578. - `"token-classification"` (alias `"ner"` available): will return a [`TokenClassificationPipeline`].
  579. - `"translation"`: will return a [`TranslationPipeline`].
  580. - `"translation_xx_to_yy"`: will return a [`TranslationPipeline`].
  581. - `"video-classification"`: will return a [`VideoClassificationPipeline`].
  582. - `"visual-question-answering"`: will return a [`VisualQuestionAnsweringPipeline`].
  583. - `"zero-shot-classification"`: will return a [`ZeroShotClassificationPipeline`].
  584. - `"zero-shot-image-classification"`: will return a [`ZeroShotImageClassificationPipeline`].
  585. - `"zero-shot-audio-classification"`: will return a [`ZeroShotAudioClassificationPipeline`].
  586. - `"zero-shot-object-detection"`: will return a [`ZeroShotObjectDetectionPipeline`].
  587. model (`str` or [`PreTrainedModel`] or [`TFPreTrainedModel`], *optional*):
  588. The model that will be used by the pipeline to make predictions. This can be a model identifier or an
  589. actual instance of a pretrained model inheriting from [`PreTrainedModel`] (for PyTorch) or
  590. [`TFPreTrainedModel`] (for TensorFlow).
  591. If not provided, the default for the `task` will be loaded.
  592. config (`str` or [`PretrainedConfig`], *optional*):
  593. The configuration that will be used by the pipeline to instantiate the model. This can be a model
  594. identifier or an actual pretrained model configuration inheriting from [`PretrainedConfig`].
  595. If not provided, the default configuration file for the requested model will be used. That means that if
  596. `model` is given, its default configuration will be used. However, if `model` is not supplied, this
  597. `task`'s default model's config is used instead.
  598. tokenizer (`str` or [`PreTrainedTokenizer`], *optional*):
  599. The tokenizer that will be used by the pipeline to encode data for the model. This can be a model
  600. identifier or an actual pretrained tokenizer inheriting from [`PreTrainedTokenizer`].
  601. If not provided, the default tokenizer for the given `model` will be loaded (if it is a string). If `model`
  602. is not specified or not a string, then the default tokenizer for `config` is loaded (if it is a string).
  603. However, if `config` is also not given or not a string, then the default tokenizer for the given `task`
  604. will be loaded.
  605. feature_extractor (`str` or [`PreTrainedFeatureExtractor`], *optional*):
  606. The feature extractor that will be used by the pipeline to encode data for the model. This can be a model
  607. identifier or an actual pretrained feature extractor inheriting from [`PreTrainedFeatureExtractor`].
  608. Feature extractors are used for non-NLP models, such as Speech or Vision models as well as multi-modal
  609. models. Multi-modal models will also require a tokenizer to be passed.
  610. If not provided, the default feature extractor for the given `model` will be loaded (if it is a string). If
  611. `model` is not specified or not a string, then the default feature extractor for `config` is loaded (if it
  612. is a string). However, if `config` is also not given or not a string, then the default feature extractor
  613. for the given `task` will be loaded.
  614. image_processor (`str` or [`BaseImageProcessor`], *optional*):
  615. The image processor that will be used by the pipeline to preprocess images for the model. This can be a
  616. model identifier or an actual image processor inheriting from [`BaseImageProcessor`].
  617. Image processors are used for Vision models and multi-modal models that require image inputs. Multi-modal
  618. models will also require a tokenizer to be passed.
  619. If not provided, the default image processor for the given `model` will be loaded (if it is a string). If
  620. `model` is not specified or not a string, then the default image processor for `config` is loaded (if it is
  621. a string).
  622. processor (`str` or [`ProcessorMixin`], *optional*):
  623. The processor that will be used by the pipeline to preprocess data for the model. This can be a model
  624. identifier or an actual processor inheriting from [`ProcessorMixin`].
  625. Processors are used for multi-modal models that require multi-modal inputs, for example, a model that
  626. requires both text and image inputs.
  627. If not provided, the default processor for the given `model` will be loaded (if it is a string). If `model`
  628. is not specified or not a string, then the default processor for `config` is loaded (if it is a string).
  629. framework (`str`, *optional*):
  630. The framework to use, either `"pt"` for PyTorch or `"tf"` for TensorFlow. The specified framework must be
  631. installed.
  632. If no framework is specified, will default to the one currently installed. If no framework is specified and
  633. both frameworks are installed, will default to the framework of the `model`, or to PyTorch if no model is
  634. provided.
  635. revision (`str`, *optional*, defaults to `"main"`):
  636. When passing a task name or a string model identifier: The specific model version to use. It can be a
  637. branch name, a tag name, or a commit id, since we use a git-based system for storing models and other
  638. artifacts on huggingface.co, so `revision` can be any identifier allowed by git.
  639. use_fast (`bool`, *optional*, defaults to `True`):
  640. Whether or not to use a Fast tokenizer if possible (a [`PreTrainedTokenizerFast`]).
  641. use_auth_token (`str` or *bool*, *optional*):
  642. The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
  643. when running `huggingface-cli login` (stored in `~/.huggingface`).
  644. device (`int` or `str` or `torch.device`):
  645. Defines the device (*e.g.*, `"cpu"`, `"cuda:1"`, `"mps"`, or a GPU ordinal rank like `1`) on which this
  646. pipeline will be allocated.
  647. device_map (`str` or `Dict[str, Union[int, str, torch.device]`, *optional*):
  648. Sent directly as `model_kwargs` (just a simpler shortcut). When `accelerate` library is present, set
  649. `device_map="auto"` to compute the most optimized `device_map` automatically (see
  650. [here](https://huggingface.co/docs/accelerate/main/en/package_reference/big_modeling#accelerate.cpu_offload)
  651. for more information).
  652. <Tip warning={true}>
  653. Do not use `device_map` AND `device` at the same time as they will conflict
  654. </Tip>
  655. torch_dtype (`str` or `torch.dtype`, *optional*):
  656. Sent directly as `model_kwargs` (just a simpler shortcut) to use the available precision for this model
  657. (`torch.float16`, `torch.bfloat16`, ... or `"auto"`).
  658. trust_remote_code (`bool`, *optional*, defaults to `False`):
  659. Whether or not to allow for custom code defined on the Hub in their own modeling, configuration,
  660. tokenization or even pipeline files. This option should only be set to `True` for repositories you trust
  661. and in which you have read the code, as it will execute code present on the Hub on your local machine.
  662. model_kwargs (`Dict[str, Any]`, *optional*):
  663. Additional dictionary of keyword arguments passed along to the model's `from_pretrained(...,
  664. **model_kwargs)` function.
  665. kwargs (`Dict[str, Any]`, *optional*):
  666. Additional keyword arguments passed along to the specific pipeline init (see the documentation for the
  667. corresponding pipeline class for possible values).
  668. Returns:
  669. [`Pipeline`]: A suitable pipeline for the task.
  670. Examples:
  671. ```python
  672. >>> from transformers import pipeline, AutoModelForTokenClassification, AutoTokenizer
  673. >>> # Sentiment analysis pipeline
  674. >>> analyzer = pipeline("sentiment-analysis")
  675. >>> # Question answering pipeline, specifying the checkpoint identifier
  676. >>> oracle = pipeline(
  677. ... "question-answering", model="distilbert/distilbert-base-cased-distilled-squad", tokenizer="google-bert/bert-base-cased"
  678. ... )
  679. >>> # Named entity recognition pipeline, passing in a specific model and tokenizer
  680. >>> model = AutoModelForTokenClassification.from_pretrained("dbmdz/bert-large-cased-finetuned-conll03-english")
  681. >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-cased")
  682. >>> recognizer = pipeline("ner", model=model, tokenizer=tokenizer)
  683. ```"""
  684. if model_kwargs is None:
  685. model_kwargs = {}
  686. # Make sure we only pass use_auth_token once as a kwarg (it used to be possible to pass it in model_kwargs,
  687. # this is to keep BC).
  688. use_auth_token = model_kwargs.pop("use_auth_token", None)
  689. if use_auth_token is not None:
  690. warnings.warn(
  691. "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
  692. FutureWarning,
  693. )
  694. if token is not None:
  695. raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.")
  696. token = use_auth_token
  697. code_revision = kwargs.pop("code_revision", None)
  698. commit_hash = kwargs.pop("_commit_hash", None)
  699. hub_kwargs = {
  700. "revision": revision,
  701. "token": token,
  702. "trust_remote_code": trust_remote_code,
  703. "_commit_hash": commit_hash,
  704. }
  705. if task is None and model is None:
  706. raise RuntimeError(
  707. "Impossible to instantiate a pipeline without either a task or a model "
  708. "being specified. "
  709. "Please provide a task class or a model"
  710. )
  711. if model is None and tokenizer is not None:
  712. raise RuntimeError(
  713. "Impossible to instantiate a pipeline with tokenizer specified but not the model as the provided tokenizer"
  714. " may not be compatible with the default model. Please provide a PreTrainedModel class or a"
  715. " path/identifier to a pretrained model when providing tokenizer."
  716. )
  717. if model is None and feature_extractor is not None:
  718. raise RuntimeError(
  719. "Impossible to instantiate a pipeline with feature_extractor specified but not the model as the provided"
  720. " feature_extractor may not be compatible with the default model. Please provide a PreTrainedModel class"
  721. " or a path/identifier to a pretrained model when providing feature_extractor."
  722. )
  723. if isinstance(model, Path):
  724. model = str(model)
  725. if commit_hash is None:
  726. pretrained_model_name_or_path = None
  727. if isinstance(config, str):
  728. pretrained_model_name_or_path = config
  729. elif config is None and isinstance(model, str):
  730. pretrained_model_name_or_path = model
  731. if not isinstance(config, PretrainedConfig) and pretrained_model_name_or_path is not None:
  732. # We make a call to the config file first (which may be absent) to get the commit hash as soon as possible
  733. resolved_config_file = cached_file(
  734. pretrained_model_name_or_path,
  735. CONFIG_NAME,
  736. _raise_exceptions_for_gated_repo=False,
  737. _raise_exceptions_for_missing_entries=False,
  738. _raise_exceptions_for_connection_errors=False,
  739. cache_dir=model_kwargs.get("cache_dir"),
  740. **hub_kwargs,
  741. )
  742. hub_kwargs["_commit_hash"] = extract_commit_hash(resolved_config_file, commit_hash)
  743. else:
  744. hub_kwargs["_commit_hash"] = getattr(config, "_commit_hash", None)
  745. # Config is the primordial information item.
  746. # Instantiate config if needed
  747. if isinstance(config, str):
  748. config = AutoConfig.from_pretrained(
  749. config, _from_pipeline=task, code_revision=code_revision, **hub_kwargs, **model_kwargs
  750. )
  751. hub_kwargs["_commit_hash"] = config._commit_hash
  752. elif config is None and isinstance(model, str):
  753. # Check for an adapter file in the model path if PEFT is available
  754. if is_peft_available():
  755. # `find_adapter_config_file` doesn't accept `trust_remote_code`
  756. _hub_kwargs = {k: v for k, v in hub_kwargs.items() if k != "trust_remote_code"}
  757. maybe_adapter_path = find_adapter_config_file(
  758. model,
  759. token=hub_kwargs["token"],
  760. revision=hub_kwargs["revision"],
  761. _commit_hash=hub_kwargs["_commit_hash"],
  762. )
  763. if maybe_adapter_path is not None:
  764. with open(maybe_adapter_path, "r", encoding="utf-8") as f:
  765. adapter_config = json.load(f)
  766. model = adapter_config["base_model_name_or_path"]
  767. config = AutoConfig.from_pretrained(
  768. model, _from_pipeline=task, code_revision=code_revision, **hub_kwargs, **model_kwargs
  769. )
  770. hub_kwargs["_commit_hash"] = config._commit_hash
  771. custom_tasks = {}
  772. if config is not None and len(getattr(config, "custom_pipelines", {})) > 0:
  773. custom_tasks = config.custom_pipelines
  774. if task is None and trust_remote_code is not False:
  775. if len(custom_tasks) == 1:
  776. task = list(custom_tasks.keys())[0]
  777. else:
  778. raise RuntimeError(
  779. "We can't infer the task automatically for this model as there are multiple tasks available. Pick "
  780. f"one in {', '.join(custom_tasks.keys())}"
  781. )
  782. if task is None and model is not None:
  783. if not isinstance(model, str):
  784. raise RuntimeError(
  785. "Inferring the task automatically requires to check the hub with a model_id defined as a `str`. "
  786. f"{model} is not a valid model_id."
  787. )
  788. task = get_task(model, token)
  789. # Retrieve the task
  790. if task in custom_tasks:
  791. normalized_task = task
  792. targeted_task, task_options = clean_custom_task(custom_tasks[task])
  793. if pipeline_class is None:
  794. if not trust_remote_code:
  795. raise ValueError(
  796. "Loading this pipeline requires you to execute the code in the pipeline file in that"
  797. " repo on your local machine. Make sure you have read the code there to avoid malicious use, then"
  798. " set the option `trust_remote_code=True` to remove this error."
  799. )
  800. class_ref = targeted_task["impl"]
  801. pipeline_class = get_class_from_dynamic_module(
  802. class_ref,
  803. model,
  804. code_revision=code_revision,
  805. **hub_kwargs,
  806. )
  807. else:
  808. normalized_task, targeted_task, task_options = check_task(task)
  809. if pipeline_class is None:
  810. pipeline_class = targeted_task["impl"]
  811. # Use default model/config/tokenizer for the task if no model is provided
  812. if model is None:
  813. # At that point framework might still be undetermined
  814. model, default_revision = get_default_model_and_revision(targeted_task, framework, task_options)
  815. revision = revision if revision is not None else default_revision
  816. logger.warning(
  817. f"No model was supplied, defaulted to {model} and revision"
  818. f" {revision} ({HUGGINGFACE_CO_RESOLVE_ENDPOINT}/{model}).\n"
  819. "Using a pipeline without specifying a model name and revision in production is not recommended."
  820. )
  821. hub_kwargs["revision"] = revision
  822. if config is None and isinstance(model, str):
  823. config = AutoConfig.from_pretrained(model, _from_pipeline=task, **hub_kwargs, **model_kwargs)
  824. hub_kwargs["_commit_hash"] = config._commit_hash
  825. if device_map is not None:
  826. if "device_map" in model_kwargs:
  827. raise ValueError(
  828. 'You cannot use both `pipeline(... device_map=..., model_kwargs={"device_map":...})` as those'
  829. " arguments might conflict, use only one.)"
  830. )
  831. if device is not None:
  832. logger.warning(
  833. "Both `device` and `device_map` are specified. `device` will override `device_map`. You"
  834. " will most likely encounter unexpected behavior. Please remove `device` and keep `device_map`."
  835. )
  836. model_kwargs["device_map"] = device_map
  837. if torch_dtype is not None:
  838. if "torch_dtype" in model_kwargs:
  839. raise ValueError(
  840. 'You cannot use both `pipeline(... torch_dtype=..., model_kwargs={"torch_dtype":...})` as those'
  841. " arguments might conflict, use only one.)"
  842. )
  843. if isinstance(torch_dtype, str) and hasattr(torch, torch_dtype):
  844. torch_dtype = getattr(torch, torch_dtype)
  845. model_kwargs["torch_dtype"] = torch_dtype
  846. model_name = model if isinstance(model, str) else None
  847. # Load the correct model if possible
  848. # Infer the framework from the model if not already defined
  849. if isinstance(model, str) or framework is None:
  850. model_classes = {"tf": targeted_task["tf"], "pt": targeted_task["pt"]}
  851. framework, model = infer_framework_load_model(
  852. model,
  853. model_classes=model_classes,
  854. config=config,
  855. framework=framework,
  856. task=task,
  857. **hub_kwargs,
  858. **model_kwargs,
  859. )
  860. model_config = model.config
  861. hub_kwargs["_commit_hash"] = model.config._commit_hash
  862. load_tokenizer = type(model_config) in TOKENIZER_MAPPING or model_config.tokenizer_class is not None
  863. load_feature_extractor = type(model_config) in FEATURE_EXTRACTOR_MAPPING or feature_extractor is not None
  864. load_image_processor = type(model_config) in IMAGE_PROCESSOR_MAPPING or image_processor is not None
  865. load_processor = type(model_config) in PROCESSOR_MAPPING or processor is not None
  866. # Check that pipeline class required loading
  867. load_tokenizer = load_tokenizer and pipeline_class._load_tokenizer
  868. load_feature_extractor = load_feature_extractor and pipeline_class._load_feature_extractor
  869. load_image_processor = load_image_processor and pipeline_class._load_image_processor
  870. load_processor = load_processor and pipeline_class._load_processor
  871. # If `model` (instance of `PretrainedModel` instead of `str`) is passed (and/or same for config), while
  872. # `image_processor` or `feature_extractor` is `None`, the loading will fail. This happens particularly for some
  873. # vision tasks when calling `pipeline()` with `model` and only one of the `image_processor` and `feature_extractor`.
  874. # TODO: we need to make `NO_IMAGE_PROCESSOR_TASKS` and `NO_FEATURE_EXTRACTOR_TASKS` more robust to avoid such issue.
  875. # This block is only temporarily to make CI green.
  876. if load_image_processor and load_feature_extractor:
  877. load_feature_extractor = False
  878. if (
  879. tokenizer is None
  880. and not load_tokenizer
  881. and normalized_task not in NO_TOKENIZER_TASKS
  882. # Using class name to avoid importing the real class.
  883. and (
  884. model_config.__class__.__name__ in MULTI_MODEL_AUDIO_CONFIGS
  885. or model_config.__class__.__name__ in MULTI_MODEL_VISION_CONFIGS
  886. )
  887. ):
  888. # This is a special category of models, that are fusions of multiple models
  889. # so the model_config might not define a tokenizer, but it seems to be
  890. # necessary for the task, so we're force-trying to load it.
  891. load_tokenizer = True
  892. if (
  893. image_processor is None
  894. and not load_image_processor
  895. and normalized_task not in NO_IMAGE_PROCESSOR_TASKS
  896. # Using class name to avoid importing the real class.
  897. and model_config.__class__.__name__ in MULTI_MODEL_VISION_CONFIGS
  898. ):
  899. # This is a special category of models, that are fusions of multiple models
  900. # so the model_config might not define a tokenizer, but it seems to be
  901. # necessary for the task, so we're force-trying to load it.
  902. load_image_processor = True
  903. if (
  904. feature_extractor is None
  905. and not load_feature_extractor
  906. and normalized_task not in NO_FEATURE_EXTRACTOR_TASKS
  907. # Using class name to avoid importing the real class.
  908. and model_config.__class__.__name__ in MULTI_MODEL_AUDIO_CONFIGS
  909. ):
  910. # This is a special category of models, that are fusions of multiple models
  911. # so the model_config might not define a tokenizer, but it seems to be
  912. # necessary for the task, so we're force-trying to load it.
  913. load_feature_extractor = True
  914. if task in NO_TOKENIZER_TASKS:
  915. # These will never require a tokenizer.
  916. # the model on the other hand might have a tokenizer, but
  917. # the files could be missing from the hub, instead of failing
  918. # on such repos, we just force to not load it.
  919. load_tokenizer = False
  920. if task in NO_FEATURE_EXTRACTOR_TASKS:
  921. load_feature_extractor = False
  922. if task in NO_IMAGE_PROCESSOR_TASKS:
  923. load_image_processor = False
  924. if load_tokenizer:
  925. # Try to infer tokenizer from model or config name (if provided as str)
  926. if tokenizer is None:
  927. if isinstance(model_name, str):
  928. tokenizer = model_name
  929. elif isinstance(config, str):
  930. tokenizer = config
  931. else:
  932. # Impossible to guess what is the right tokenizer here
  933. raise Exception(
  934. "Impossible to guess which tokenizer to use. "
  935. "Please provide a PreTrainedTokenizer class or a path/identifier to a pretrained tokenizer."
  936. )
  937. # Instantiate tokenizer if needed
  938. if isinstance(tokenizer, (str, tuple)):
  939. if isinstance(tokenizer, tuple):
  940. # For tuple we have (tokenizer name, {kwargs})
  941. use_fast = tokenizer[1].pop("use_fast", use_fast)
  942. tokenizer_identifier = tokenizer[0]
  943. tokenizer_kwargs = tokenizer[1]
  944. else:
  945. tokenizer_identifier = tokenizer
  946. tokenizer_kwargs = model_kwargs.copy()
  947. tokenizer_kwargs.pop("torch_dtype", None)
  948. tokenizer = AutoTokenizer.from_pretrained(
  949. tokenizer_identifier, use_fast=use_fast, _from_pipeline=task, **hub_kwargs, **tokenizer_kwargs
  950. )
  951. if load_image_processor:
  952. # Try to infer image processor from model or config name (if provided as str)
  953. if image_processor is None:
  954. if isinstance(model_name, str):
  955. image_processor = model_name
  956. elif isinstance(config, str):
  957. image_processor = config
  958. # Backward compatibility, as `feature_extractor` used to be the name
  959. # for `ImageProcessor`.
  960. elif feature_extractor is not None and isinstance(feature_extractor, BaseImageProcessor):
  961. image_processor = feature_extractor
  962. else:
  963. # Impossible to guess what is the right image_processor here
  964. raise Exception(
  965. "Impossible to guess which image processor to use. "
  966. "Please provide a PreTrainedImageProcessor class or a path/identifier "
  967. "to a pretrained image processor."
  968. )
  969. # Instantiate image_processor if needed
  970. if isinstance(image_processor, (str, tuple)):
  971. image_processor = AutoImageProcessor.from_pretrained(
  972. image_processor, _from_pipeline=task, **hub_kwargs, **model_kwargs
  973. )
  974. if load_feature_extractor:
  975. # Try to infer feature extractor from model or config name (if provided as str)
  976. if feature_extractor is None:
  977. if isinstance(model_name, str):
  978. feature_extractor = model_name
  979. elif isinstance(config, str):
  980. feature_extractor = config
  981. else:
  982. # Impossible to guess what is the right feature_extractor here
  983. raise Exception(
  984. "Impossible to guess which feature extractor to use. "
  985. "Please provide a PreTrainedFeatureExtractor class or a path/identifier "
  986. "to a pretrained feature extractor."
  987. )
  988. # Instantiate feature_extractor if needed
  989. if isinstance(feature_extractor, (str, tuple)):
  990. feature_extractor = AutoFeatureExtractor.from_pretrained(
  991. feature_extractor, _from_pipeline=task, **hub_kwargs, **model_kwargs
  992. )
  993. if (
  994. feature_extractor._processor_class
  995. and feature_extractor._processor_class.endswith("WithLM")
  996. and isinstance(model_name, str)
  997. ):
  998. try:
  999. import kenlm # to trigger `ImportError` if not installed
  1000. from pyctcdecode import BeamSearchDecoderCTC
  1001. if os.path.isdir(model_name) or os.path.isfile(model_name):
  1002. decoder = BeamSearchDecoderCTC.load_from_dir(model_name)
  1003. else:
  1004. language_model_glob = os.path.join(
  1005. BeamSearchDecoderCTC._LANGUAGE_MODEL_SERIALIZED_DIRECTORY, "*"
  1006. )
  1007. alphabet_filename = BeamSearchDecoderCTC._ALPHABET_SERIALIZED_FILENAME
  1008. allow_patterns = [language_model_glob, alphabet_filename]
  1009. decoder = BeamSearchDecoderCTC.load_from_hf_hub(model_name, allow_patterns=allow_patterns)
  1010. kwargs["decoder"] = decoder
  1011. except ImportError as e:
  1012. logger.warning(f"Could not load the `decoder` for {model_name}. Defaulting to raw CTC. Error: {e}")
  1013. if not is_kenlm_available():
  1014. logger.warning("Try to install `kenlm`: `pip install kenlm")
  1015. if not is_pyctcdecode_available():
  1016. logger.warning("Try to install `pyctcdecode`: `pip install pyctcdecode")
  1017. if load_processor:
  1018. # Try to infer processor from model or config name (if provided as str)
  1019. if processor is None:
  1020. if isinstance(model_name, str):
  1021. processor = model_name
  1022. elif isinstance(config, str):
  1023. processor = config
  1024. else:
  1025. # Impossible to guess what is the right processor here
  1026. raise Exception(
  1027. "Impossible to guess which processor to use. "
  1028. "Please provide a processor instance or a path/identifier "
  1029. "to a processor."
  1030. )
  1031. # Instantiate processor if needed
  1032. if isinstance(processor, (str, tuple)):
  1033. processor = AutoProcessor.from_pretrained(processor, _from_pipeline=task, **hub_kwargs, **model_kwargs)
  1034. if not isinstance(processor, ProcessorMixin):
  1035. raise TypeError(
  1036. "Processor was loaded, but it is not an instance of `ProcessorMixin`. "
  1037. f"Got type `{type(processor)}` instead. Please check that you specified "
  1038. "correct pipeline task for the model and model has processor implemented and saved."
  1039. )
  1040. if task == "translation" and model.config.task_specific_params:
  1041. for key in model.config.task_specific_params:
  1042. if key.startswith("translation"):
  1043. task = key
  1044. warnings.warn(
  1045. f'"translation" task was used, instead of "translation_XX_to_YY", defaulting to "{task}"',
  1046. UserWarning,
  1047. )
  1048. break
  1049. if tokenizer is not None:
  1050. kwargs["tokenizer"] = tokenizer
  1051. if feature_extractor is not None:
  1052. kwargs["feature_extractor"] = feature_extractor
  1053. if torch_dtype is not None:
  1054. kwargs["torch_dtype"] = torch_dtype
  1055. if image_processor is not None:
  1056. kwargs["image_processor"] = image_processor
  1057. if device is not None:
  1058. kwargs["device"] = device
  1059. if processor is not None:
  1060. kwargs["processor"] = processor
  1061. return pipeline_class(model=model, framework=framework, task=task, **kwargs)