feature_extraction_auto.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403
  1. # coding=utf-8
  2. # Copyright 2021 The HuggingFace Inc. team.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """AutoFeatureExtractor class."""
  16. import importlib
  17. import json
  18. import os
  19. import warnings
  20. from collections import OrderedDict
  21. from typing import Dict, Optional, Union
  22. # Build the list of all feature extractors
  23. from ...configuration_utils import PretrainedConfig
  24. from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
  25. from ...feature_extraction_utils import FeatureExtractionMixin
  26. from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
  27. from .auto_factory import _LazyAutoMapping
  28. from .configuration_auto import (
  29. CONFIG_MAPPING_NAMES,
  30. AutoConfig,
  31. model_type_to_module_name,
  32. replace_list_option_in_docstrings,
  33. )
  34. logger = logging.get_logger(__name__)
  35. FEATURE_EXTRACTOR_MAPPING_NAMES = OrderedDict(
  36. [
  37. ("audio-spectrogram-transformer", "ASTFeatureExtractor"),
  38. ("beit", "BeitFeatureExtractor"),
  39. ("chinese_clip", "ChineseCLIPFeatureExtractor"),
  40. ("clap", "ClapFeatureExtractor"),
  41. ("clip", "CLIPFeatureExtractor"),
  42. ("clipseg", "ViTFeatureExtractor"),
  43. ("clvp", "ClvpFeatureExtractor"),
  44. ("conditional_detr", "ConditionalDetrFeatureExtractor"),
  45. ("convnext", "ConvNextFeatureExtractor"),
  46. ("cvt", "ConvNextFeatureExtractor"),
  47. ("dac", "DacFeatureExtractor"),
  48. ("data2vec-audio", "Wav2Vec2FeatureExtractor"),
  49. ("data2vec-vision", "BeitFeatureExtractor"),
  50. ("deformable_detr", "DeformableDetrFeatureExtractor"),
  51. ("deit", "DeiTFeatureExtractor"),
  52. ("detr", "DetrFeatureExtractor"),
  53. ("dinat", "ViTFeatureExtractor"),
  54. ("donut-swin", "DonutFeatureExtractor"),
  55. ("dpt", "DPTFeatureExtractor"),
  56. ("encodec", "EncodecFeatureExtractor"),
  57. ("flava", "FlavaFeatureExtractor"),
  58. ("glpn", "GLPNFeatureExtractor"),
  59. ("groupvit", "CLIPFeatureExtractor"),
  60. ("hubert", "Wav2Vec2FeatureExtractor"),
  61. ("imagegpt", "ImageGPTFeatureExtractor"),
  62. ("layoutlmv2", "LayoutLMv2FeatureExtractor"),
  63. ("layoutlmv3", "LayoutLMv3FeatureExtractor"),
  64. ("levit", "LevitFeatureExtractor"),
  65. ("maskformer", "MaskFormerFeatureExtractor"),
  66. ("mctct", "MCTCTFeatureExtractor"),
  67. ("mimi", "EncodecFeatureExtractor"),
  68. ("mobilenet_v1", "MobileNetV1FeatureExtractor"),
  69. ("mobilenet_v2", "MobileNetV2FeatureExtractor"),
  70. ("mobilevit", "MobileViTFeatureExtractor"),
  71. ("moshi", "EncodecFeatureExtractor"),
  72. ("nat", "ViTFeatureExtractor"),
  73. ("owlvit", "OwlViTFeatureExtractor"),
  74. ("perceiver", "PerceiverFeatureExtractor"),
  75. ("poolformer", "PoolFormerFeatureExtractor"),
  76. ("pop2piano", "Pop2PianoFeatureExtractor"),
  77. ("regnet", "ConvNextFeatureExtractor"),
  78. ("resnet", "ConvNextFeatureExtractor"),
  79. ("seamless_m4t", "SeamlessM4TFeatureExtractor"),
  80. ("seamless_m4t_v2", "SeamlessM4TFeatureExtractor"),
  81. ("segformer", "SegformerFeatureExtractor"),
  82. ("sew", "Wav2Vec2FeatureExtractor"),
  83. ("sew-d", "Wav2Vec2FeatureExtractor"),
  84. ("speech_to_text", "Speech2TextFeatureExtractor"),
  85. ("speecht5", "SpeechT5FeatureExtractor"),
  86. ("swiftformer", "ViTFeatureExtractor"),
  87. ("swin", "ViTFeatureExtractor"),
  88. ("swinv2", "ViTFeatureExtractor"),
  89. ("table-transformer", "DetrFeatureExtractor"),
  90. ("timesformer", "VideoMAEFeatureExtractor"),
  91. ("tvlt", "TvltFeatureExtractor"),
  92. ("unispeech", "Wav2Vec2FeatureExtractor"),
  93. ("unispeech-sat", "Wav2Vec2FeatureExtractor"),
  94. ("univnet", "UnivNetFeatureExtractor"),
  95. ("van", "ConvNextFeatureExtractor"),
  96. ("videomae", "VideoMAEFeatureExtractor"),
  97. ("vilt", "ViltFeatureExtractor"),
  98. ("vit", "ViTFeatureExtractor"),
  99. ("vit_mae", "ViTFeatureExtractor"),
  100. ("vit_msn", "ViTFeatureExtractor"),
  101. ("wav2vec2", "Wav2Vec2FeatureExtractor"),
  102. ("wav2vec2-bert", "Wav2Vec2FeatureExtractor"),
  103. ("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
  104. ("wavlm", "Wav2Vec2FeatureExtractor"),
  105. ("whisper", "WhisperFeatureExtractor"),
  106. ("xclip", "CLIPFeatureExtractor"),
  107. ("yolos", "YolosFeatureExtractor"),
  108. ]
  109. )
  110. FEATURE_EXTRACTOR_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
  111. def feature_extractor_class_from_name(class_name: str):
  112. for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
  113. if class_name in extractors:
  114. module_name = model_type_to_module_name(module_name)
  115. module = importlib.import_module(f".{module_name}", "transformers.models")
  116. try:
  117. return getattr(module, class_name)
  118. except AttributeError:
  119. continue
  120. for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
  121. if getattr(extractor, "__name__", None) == class_name:
  122. return extractor
  123. # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
  124. # init and we return the proper dummy to get an appropriate error message.
  125. main_module = importlib.import_module("transformers")
  126. if hasattr(main_module, class_name):
  127. return getattr(main_module, class_name)
  128. return None
  129. def get_feature_extractor_config(
  130. pretrained_model_name_or_path: Union[str, os.PathLike],
  131. cache_dir: Optional[Union[str, os.PathLike]] = None,
  132. force_download: bool = False,
  133. resume_download: Optional[bool] = None,
  134. proxies: Optional[Dict[str, str]] = None,
  135. token: Optional[Union[bool, str]] = None,
  136. revision: Optional[str] = None,
  137. local_files_only: bool = False,
  138. **kwargs,
  139. ):
  140. """
  141. Loads the tokenizer configuration from a pretrained model tokenizer configuration.
  142. Args:
  143. pretrained_model_name_or_path (`str` or `os.PathLike`):
  144. This can be either:
  145. - a string, the *model id* of a pretrained model configuration hosted inside a model repo on
  146. huggingface.co.
  147. - a path to a *directory* containing a configuration file saved using the
  148. [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`.
  149. cache_dir (`str` or `os.PathLike`, *optional*):
  150. Path to a directory in which a downloaded pretrained model configuration should be cached if the standard
  151. cache should not be used.
  152. force_download (`bool`, *optional*, defaults to `False`):
  153. Whether or not to force to (re-)download the configuration files and override the cached versions if they
  154. exist.
  155. resume_download:
  156. Deprecated and ignored. All downloads are now resumed by default when possible.
  157. Will be removed in v5 of Transformers.
  158. proxies (`Dict[str, str]`, *optional*):
  159. A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
  160. 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
  161. token (`str` or *bool*, *optional*):
  162. The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
  163. when running `huggingface-cli login` (stored in `~/.huggingface`).
  164. revision (`str`, *optional*, defaults to `"main"`):
  165. The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
  166. git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
  167. identifier allowed by git.
  168. local_files_only (`bool`, *optional*, defaults to `False`):
  169. If `True`, will only try to load the tokenizer configuration from local files.
  170. <Tip>
  171. Passing `token=True` is required when you want to use a private model.
  172. </Tip>
  173. Returns:
  174. `Dict`: The configuration of the tokenizer.
  175. Examples:
  176. ```python
  177. # Download configuration from huggingface.co and cache.
  178. tokenizer_config = get_tokenizer_config("google-bert/bert-base-uncased")
  179. # This model does not have a tokenizer config so the result will be an empty dict.
  180. tokenizer_config = get_tokenizer_config("FacebookAI/xlm-roberta-base")
  181. # Save a pretrained tokenizer locally and you can reload its config
  182. from transformers import AutoTokenizer
  183. tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-cased")
  184. tokenizer.save_pretrained("tokenizer-test")
  185. tokenizer_config = get_tokenizer_config("tokenizer-test")
  186. ```"""
  187. use_auth_token = kwargs.pop("use_auth_token", None)
  188. if use_auth_token is not None:
  189. warnings.warn(
  190. "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
  191. FutureWarning,
  192. )
  193. if token is not None:
  194. raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.")
  195. token = use_auth_token
  196. resolved_config_file = get_file_from_repo(
  197. pretrained_model_name_or_path,
  198. FEATURE_EXTRACTOR_NAME,
  199. cache_dir=cache_dir,
  200. force_download=force_download,
  201. resume_download=resume_download,
  202. proxies=proxies,
  203. token=token,
  204. revision=revision,
  205. local_files_only=local_files_only,
  206. )
  207. if resolved_config_file is None:
  208. logger.info(
  209. "Could not locate the feature extractor configuration file, will try to use the model config instead."
  210. )
  211. return {}
  212. with open(resolved_config_file, encoding="utf-8") as reader:
  213. return json.load(reader)
  214. class AutoFeatureExtractor:
  215. r"""
  216. This is a generic feature extractor class that will be instantiated as one of the feature extractor classes of the
  217. library when created with the [`AutoFeatureExtractor.from_pretrained`] class method.
  218. This class cannot be instantiated directly using `__init__()` (throws an error).
  219. """
  220. def __init__(self):
  221. raise EnvironmentError(
  222. "AutoFeatureExtractor is designed to be instantiated "
  223. "using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method."
  224. )
  225. @classmethod
  226. @replace_list_option_in_docstrings(FEATURE_EXTRACTOR_MAPPING_NAMES)
  227. def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
  228. r"""
  229. Instantiate one of the feature extractor classes of the library from a pretrained model vocabulary.
  230. The feature extractor class to instantiate is selected based on the `model_type` property of the config object
  231. (either passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's
  232. missing, by falling back to using pattern matching on `pretrained_model_name_or_path`:
  233. List options
  234. Params:
  235. pretrained_model_name_or_path (`str` or `os.PathLike`):
  236. This can be either:
  237. - a string, the *model id* of a pretrained feature_extractor hosted inside a model repo on
  238. huggingface.co.
  239. - a path to a *directory* containing a feature extractor file saved using the
  240. [`~feature_extraction_utils.FeatureExtractionMixin.save_pretrained`] method, e.g.,
  241. `./my_model_directory/`.
  242. - a path or url to a saved feature extractor JSON *file*, e.g.,
  243. `./my_model_directory/preprocessor_config.json`.
  244. cache_dir (`str` or `os.PathLike`, *optional*):
  245. Path to a directory in which a downloaded pretrained model feature extractor should be cached if the
  246. standard cache should not be used.
  247. force_download (`bool`, *optional*, defaults to `False`):
  248. Whether or not to force to (re-)download the feature extractor files and override the cached versions
  249. if they exist.
  250. resume_download:
  251. Deprecated and ignored. All downloads are now resumed by default when possible.
  252. Will be removed in v5 of Transformers.
  253. proxies (`Dict[str, str]`, *optional*):
  254. A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
  255. 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
  256. token (`str` or *bool*, *optional*):
  257. The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
  258. when running `huggingface-cli login` (stored in `~/.huggingface`).
  259. revision (`str`, *optional*, defaults to `"main"`):
  260. The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
  261. git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
  262. identifier allowed by git.
  263. return_unused_kwargs (`bool`, *optional*, defaults to `False`):
  264. If `False`, then this function returns just the final feature extractor object. If `True`, then this
  265. functions returns a `Tuple(feature_extractor, unused_kwargs)` where *unused_kwargs* is a dictionary
  266. consisting of the key/value pairs whose keys are not feature extractor attributes: i.e., the part of
  267. `kwargs` which has not been used to update `feature_extractor` and is otherwise ignored.
  268. trust_remote_code (`bool`, *optional*, defaults to `False`):
  269. Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
  270. should only be set to `True` for repositories you trust and in which you have read the code, as it will
  271. execute code present on the Hub on your local machine.
  272. kwargs (`Dict[str, Any]`, *optional*):
  273. The values in kwargs of any keys which are feature extractor attributes will be used to override the
  274. loaded values. Behavior concerning key/value pairs whose keys are *not* feature extractor attributes is
  275. controlled by the `return_unused_kwargs` keyword parameter.
  276. <Tip>
  277. Passing `token=True` is required when you want to use a private model.
  278. </Tip>
  279. Examples:
  280. ```python
  281. >>> from transformers import AutoFeatureExtractor
  282. >>> # Download feature extractor from huggingface.co and cache.
  283. >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h")
  284. >>> # If feature extractor files are in a directory (e.g. feature extractor was saved using *save_pretrained('./test/saved_model/')*)
  285. >>> # feature_extractor = AutoFeatureExtractor.from_pretrained("./test/saved_model/")
  286. ```"""
  287. use_auth_token = kwargs.pop("use_auth_token", None)
  288. if use_auth_token is not None:
  289. warnings.warn(
  290. "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
  291. FutureWarning,
  292. )
  293. if kwargs.get("token", None) is not None:
  294. raise ValueError(
  295. "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
  296. )
  297. kwargs["token"] = use_auth_token
  298. config = kwargs.pop("config", None)
  299. trust_remote_code = kwargs.pop("trust_remote_code", None)
  300. kwargs["_from_auto"] = True
  301. config_dict, _ = FeatureExtractionMixin.get_feature_extractor_dict(pretrained_model_name_or_path, **kwargs)
  302. feature_extractor_class = config_dict.get("feature_extractor_type", None)
  303. feature_extractor_auto_map = None
  304. if "AutoFeatureExtractor" in config_dict.get("auto_map", {}):
  305. feature_extractor_auto_map = config_dict["auto_map"]["AutoFeatureExtractor"]
  306. # If we don't find the feature extractor class in the feature extractor config, let's try the model config.
  307. if feature_extractor_class is None and feature_extractor_auto_map is None:
  308. if not isinstance(config, PretrainedConfig):
  309. config = AutoConfig.from_pretrained(
  310. pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs
  311. )
  312. # It could be in `config.feature_extractor_type``
  313. feature_extractor_class = getattr(config, "feature_extractor_type", None)
  314. if hasattr(config, "auto_map") and "AutoFeatureExtractor" in config.auto_map:
  315. feature_extractor_auto_map = config.auto_map["AutoFeatureExtractor"]
  316. if feature_extractor_class is not None:
  317. feature_extractor_class = feature_extractor_class_from_name(feature_extractor_class)
  318. has_remote_code = feature_extractor_auto_map is not None
  319. has_local_code = feature_extractor_class is not None or type(config) in FEATURE_EXTRACTOR_MAPPING
  320. trust_remote_code = resolve_trust_remote_code(
  321. trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code
  322. )
  323. if has_remote_code and trust_remote_code:
  324. feature_extractor_class = get_class_from_dynamic_module(
  325. feature_extractor_auto_map, pretrained_model_name_or_path, **kwargs
  326. )
  327. _ = kwargs.pop("code_revision", None)
  328. if os.path.isdir(pretrained_model_name_or_path):
  329. feature_extractor_class.register_for_auto_class()
  330. return feature_extractor_class.from_dict(config_dict, **kwargs)
  331. elif feature_extractor_class is not None:
  332. return feature_extractor_class.from_dict(config_dict, **kwargs)
  333. # Last try: we use the FEATURE_EXTRACTOR_MAPPING.
  334. elif type(config) in FEATURE_EXTRACTOR_MAPPING:
  335. feature_extractor_class = FEATURE_EXTRACTOR_MAPPING[type(config)]
  336. return feature_extractor_class.from_dict(config_dict, **kwargs)
  337. raise ValueError(
  338. f"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a "
  339. f"`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following "
  340. f"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys())}"
  341. )
  342. @staticmethod
  343. def register(config_class, feature_extractor_class, exist_ok=False):
  344. """
  345. Register a new feature extractor for this class.
  346. Args:
  347. config_class ([`PretrainedConfig`]):
  348. The configuration corresponding to the model to register.
  349. feature_extractor_class ([`FeatureExtractorMixin`]): The feature extractor to register.
  350. """
  351. FEATURE_EXTRACTOR_MAPPING.register(config_class, feature_extractor_class, exist_ok=exist_ok)