generic.py 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856
  1. # Copyright 2022 The HuggingFace Team. All rights reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. """
  15. Generic utilities
  16. """
  17. import inspect
  18. import tempfile
  19. import warnings
  20. from collections import OrderedDict, UserDict
  21. from collections.abc import MutableMapping
  22. from contextlib import ExitStack, contextmanager
  23. from dataclasses import fields, is_dataclass
  24. from enum import Enum
  25. from functools import partial, wraps
  26. from typing import Any, ContextManager, Iterable, List, Optional, Tuple
  27. import numpy as np
  28. from packaging import version
  29. from .import_utils import (
  30. get_torch_version,
  31. is_flax_available,
  32. is_mlx_available,
  33. is_tf_available,
  34. is_torch_available,
  35. is_torch_fx_proxy,
  36. )
  37. class cached_property(property):
  38. """
  39. Descriptor that mimics @property but caches output in member variable.
  40. From tensorflow_datasets
  41. Built-in in functools from Python 3.8.
  42. """
  43. def __get__(self, obj, objtype=None):
  44. # See docs.python.org/3/howto/descriptor.html#properties
  45. if obj is None:
  46. return self
  47. if self.fget is None:
  48. raise AttributeError("unreadable attribute")
  49. attr = "__cached_" + self.fget.__name__
  50. cached = getattr(obj, attr, None)
  51. if cached is None:
  52. cached = self.fget(obj)
  53. setattr(obj, attr, cached)
  54. return cached
  55. # vendored from distutils.util
  56. def strtobool(val):
  57. """Convert a string representation of truth to true (1) or false (0).
  58. True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values are 'n', 'no', 'f', 'false', 'off', and '0'.
  59. Raises ValueError if 'val' is anything else.
  60. """
  61. val = val.lower()
  62. if val in {"y", "yes", "t", "true", "on", "1"}:
  63. return 1
  64. if val in {"n", "no", "f", "false", "off", "0"}:
  65. return 0
  66. raise ValueError(f"invalid truth value {val!r}")
  67. def infer_framework_from_repr(x):
  68. """
  69. Tries to guess the framework of an object `x` from its repr (brittle but will help in `is_tensor` to try the
  70. frameworks in a smart order, without the need to import the frameworks).
  71. """
  72. representation = str(type(x))
  73. if representation.startswith("<class 'torch."):
  74. return "pt"
  75. elif representation.startswith("<class 'tensorflow."):
  76. return "tf"
  77. elif representation.startswith("<class 'jax"):
  78. return "jax"
  79. elif representation.startswith("<class 'numpy."):
  80. return "np"
  81. elif representation.startswith("<class 'mlx."):
  82. return "mlx"
  83. def _get_frameworks_and_test_func(x):
  84. """
  85. Returns an (ordered since we are in Python 3.7+) dictionary framework to test function, which places the framework
  86. we can guess from the repr first, then Numpy, then the others.
  87. """
  88. framework_to_test = {
  89. "pt": is_torch_tensor,
  90. "tf": is_tf_tensor,
  91. "jax": is_jax_tensor,
  92. "np": is_numpy_array,
  93. "mlx": is_mlx_array,
  94. }
  95. preferred_framework = infer_framework_from_repr(x)
  96. # We will test this one first, then numpy, then the others.
  97. frameworks = [] if preferred_framework is None else [preferred_framework]
  98. if preferred_framework != "np":
  99. frameworks.append("np")
  100. frameworks.extend([f for f in framework_to_test if f not in [preferred_framework, "np"]])
  101. return {f: framework_to_test[f] for f in frameworks}
  102. def is_tensor(x):
  103. """
  104. Tests if `x` is a `torch.Tensor`, `tf.Tensor`, `jaxlib.xla_extension.DeviceArray`, `np.ndarray` or `mlx.array`
  105. in the order defined by `infer_framework_from_repr`
  106. """
  107. # This gives us a smart order to test the frameworks with the corresponding tests.
  108. framework_to_test_func = _get_frameworks_and_test_func(x)
  109. for test_func in framework_to_test_func.values():
  110. if test_func(x):
  111. return True
  112. # Tracers
  113. if is_torch_fx_proxy(x):
  114. return True
  115. if is_flax_available():
  116. from jax.core import Tracer
  117. if isinstance(x, Tracer):
  118. return True
  119. return False
  120. def _is_numpy(x):
  121. return isinstance(x, np.ndarray)
  122. def is_numpy_array(x):
  123. """
  124. Tests if `x` is a numpy array or not.
  125. """
  126. return _is_numpy(x)
  127. def _is_torch(x):
  128. import torch
  129. return isinstance(x, torch.Tensor)
  130. def is_torch_tensor(x):
  131. """
  132. Tests if `x` is a torch tensor or not. Safe to call even if torch is not installed.
  133. """
  134. return False if not is_torch_available() else _is_torch(x)
  135. def _is_torch_device(x):
  136. import torch
  137. return isinstance(x, torch.device)
  138. def is_torch_device(x):
  139. """
  140. Tests if `x` is a torch device or not. Safe to call even if torch is not installed.
  141. """
  142. return False if not is_torch_available() else _is_torch_device(x)
  143. def _is_torch_dtype(x):
  144. import torch
  145. if isinstance(x, str):
  146. if hasattr(torch, x):
  147. x = getattr(torch, x)
  148. else:
  149. return False
  150. return isinstance(x, torch.dtype)
  151. def is_torch_dtype(x):
  152. """
  153. Tests if `x` is a torch dtype or not. Safe to call even if torch is not installed.
  154. """
  155. return False if not is_torch_available() else _is_torch_dtype(x)
  156. def _is_tensorflow(x):
  157. import tensorflow as tf
  158. return isinstance(x, tf.Tensor)
  159. def is_tf_tensor(x):
  160. """
  161. Tests if `x` is a tensorflow tensor or not. Safe to call even if tensorflow is not installed.
  162. """
  163. return False if not is_tf_available() else _is_tensorflow(x)
  164. def _is_tf_symbolic_tensor(x):
  165. import tensorflow as tf
  166. # the `is_symbolic_tensor` predicate is only available starting with TF 2.14
  167. if hasattr(tf, "is_symbolic_tensor"):
  168. return tf.is_symbolic_tensor(x)
  169. return isinstance(x, tf.Tensor)
  170. def is_tf_symbolic_tensor(x):
  171. """
  172. Tests if `x` is a tensorflow symbolic tensor or not (ie. not eager). Safe to call even if tensorflow is not
  173. installed.
  174. """
  175. return False if not is_tf_available() else _is_tf_symbolic_tensor(x)
  176. def _is_jax(x):
  177. import jax.numpy as jnp # noqa: F811
  178. return isinstance(x, jnp.ndarray)
  179. def is_jax_tensor(x):
  180. """
  181. Tests if `x` is a Jax tensor or not. Safe to call even if jax is not installed.
  182. """
  183. return False if not is_flax_available() else _is_jax(x)
  184. def _is_mlx(x):
  185. import mlx.core as mx
  186. return isinstance(x, mx.array)
  187. def is_mlx_array(x):
  188. """
  189. Tests if `x` is a mlx array or not. Safe to call even when mlx is not installed.
  190. """
  191. return False if not is_mlx_available() else _is_mlx(x)
  192. def to_py_obj(obj):
  193. """
  194. Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a python list.
  195. """
  196. framework_to_py_obj = {
  197. "pt": lambda obj: obj.detach().cpu().tolist(),
  198. "tf": lambda obj: obj.numpy().tolist(),
  199. "jax": lambda obj: np.asarray(obj).tolist(),
  200. "np": lambda obj: obj.tolist(),
  201. }
  202. if isinstance(obj, (dict, UserDict)):
  203. return {k: to_py_obj(v) for k, v in obj.items()}
  204. elif isinstance(obj, (list, tuple)):
  205. return [to_py_obj(o) for o in obj]
  206. # This gives us a smart order to test the frameworks with the corresponding tests.
  207. framework_to_test_func = _get_frameworks_and_test_func(obj)
  208. for framework, test_func in framework_to_test_func.items():
  209. if test_func(obj):
  210. return framework_to_py_obj[framework](obj)
  211. # tolist also works on 0d np arrays
  212. if isinstance(obj, np.number):
  213. return obj.tolist()
  214. else:
  215. return obj
  216. def to_numpy(obj):
  217. """
  218. Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a Numpy array.
  219. """
  220. framework_to_numpy = {
  221. "pt": lambda obj: obj.detach().cpu().numpy(),
  222. "tf": lambda obj: obj.numpy(),
  223. "jax": lambda obj: np.asarray(obj),
  224. "np": lambda obj: obj,
  225. }
  226. if isinstance(obj, (dict, UserDict)):
  227. return {k: to_numpy(v) for k, v in obj.items()}
  228. elif isinstance(obj, (list, tuple)):
  229. return np.array(obj)
  230. # This gives us a smart order to test the frameworks with the corresponding tests.
  231. framework_to_test_func = _get_frameworks_and_test_func(obj)
  232. for framework, test_func in framework_to_test_func.items():
  233. if test_func(obj):
  234. return framework_to_numpy[framework](obj)
  235. return obj
  236. class ModelOutput(OrderedDict):
  237. """
  238. Base class for all model outputs as dataclass. Has a `__getitem__` that allows indexing by integer or slice (like a
  239. tuple) or strings (like a dictionary) that will ignore the `None` attributes. Otherwise behaves like a regular
  240. python dictionary.
  241. <Tip warning={true}>
  242. You can't unpack a `ModelOutput` directly. Use the [`~utils.ModelOutput.to_tuple`] method to convert it to a tuple
  243. before.
  244. </Tip>
  245. """
  246. def __init_subclass__(cls) -> None:
  247. """Register subclasses as pytree nodes.
  248. This is necessary to synchronize gradients when using `torch.nn.parallel.DistributedDataParallel` with
  249. `static_graph=True` with modules that output `ModelOutput` subclasses.
  250. """
  251. if is_torch_available():
  252. if version.parse(get_torch_version()) >= version.parse("2.2"):
  253. _torch_pytree.register_pytree_node(
  254. cls,
  255. _model_output_flatten,
  256. partial(_model_output_unflatten, output_type=cls),
  257. serialized_type_name=f"{cls.__module__}.{cls.__name__}",
  258. )
  259. else:
  260. _torch_pytree._register_pytree_node(
  261. cls,
  262. _model_output_flatten,
  263. partial(_model_output_unflatten, output_type=cls),
  264. )
  265. def __init__(self, *args, **kwargs):
  266. super().__init__(*args, **kwargs)
  267. # Subclasses of ModelOutput must use the @dataclass decorator
  268. # This check is done in __init__ because the @dataclass decorator operates after __init_subclass__
  269. # issubclass() would return True for issubclass(ModelOutput, ModelOutput) when False is needed
  270. # Just need to check that the current class is not ModelOutput
  271. is_modeloutput_subclass = self.__class__ != ModelOutput
  272. if is_modeloutput_subclass and not is_dataclass(self):
  273. raise TypeError(
  274. f"{self.__module__}.{self.__class__.__name__} is not a dataclasss."
  275. " This is a subclass of ModelOutput and so must use the @dataclass decorator."
  276. )
  277. def __post_init__(self):
  278. """Check the ModelOutput dataclass.
  279. Only occurs if @dataclass decorator has been used.
  280. """
  281. class_fields = fields(self)
  282. # Safety and consistency checks
  283. if not len(class_fields):
  284. raise ValueError(f"{self.__class__.__name__} has no fields.")
  285. if not all(field.default is None for field in class_fields[1:]):
  286. raise ValueError(f"{self.__class__.__name__} should not have more than one required field.")
  287. first_field = getattr(self, class_fields[0].name)
  288. other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:])
  289. if other_fields_are_none and not is_tensor(first_field):
  290. if isinstance(first_field, dict):
  291. iterator = first_field.items()
  292. first_field_iterator = True
  293. else:
  294. try:
  295. iterator = iter(first_field)
  296. first_field_iterator = True
  297. except TypeError:
  298. first_field_iterator = False
  299. # if we provided an iterator as first field and the iterator is a (key, value) iterator
  300. # set the associated fields
  301. if first_field_iterator:
  302. for idx, element in enumerate(iterator):
  303. if (
  304. not isinstance(element, (list, tuple))
  305. or not len(element) == 2
  306. or not isinstance(element[0], str)
  307. ):
  308. if idx == 0:
  309. # If we do not have an iterator of key/values, set it as attribute
  310. self[class_fields[0].name] = first_field
  311. else:
  312. # If we have a mixed iterator, raise an error
  313. raise ValueError(
  314. f"Cannot set key/value for {element}. It needs to be a tuple (key, value)."
  315. )
  316. break
  317. setattr(self, element[0], element[1])
  318. if element[1] is not None:
  319. self[element[0]] = element[1]
  320. elif first_field is not None:
  321. self[class_fields[0].name] = first_field
  322. else:
  323. for field in class_fields:
  324. v = getattr(self, field.name)
  325. if v is not None:
  326. self[field.name] = v
  327. def __delitem__(self, *args, **kwargs):
  328. raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.")
  329. def setdefault(self, *args, **kwargs):
  330. raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.")
  331. def pop(self, *args, **kwargs):
  332. raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.")
  333. def update(self, *args, **kwargs):
  334. raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.")
  335. def __getitem__(self, k):
  336. if isinstance(k, str):
  337. inner_dict = dict(self.items())
  338. return inner_dict[k]
  339. else:
  340. return self.to_tuple()[k]
  341. def __setattr__(self, name, value):
  342. if name in self.keys() and value is not None:
  343. # Don't call self.__setitem__ to avoid recursion errors
  344. super().__setitem__(name, value)
  345. super().__setattr__(name, value)
  346. def __setitem__(self, key, value):
  347. # Will raise a KeyException if needed
  348. super().__setitem__(key, value)
  349. # Don't call self.__setattr__ to avoid recursion errors
  350. super().__setattr__(key, value)
  351. def __reduce__(self):
  352. if not is_dataclass(self):
  353. return super().__reduce__()
  354. callable, _args, *remaining = super().__reduce__()
  355. args = tuple(getattr(self, field.name) for field in fields(self))
  356. return callable, args, *remaining
  357. def to_tuple(self) -> Tuple[Any]:
  358. """
  359. Convert self to a tuple containing all the attributes/keys that are not `None`.
  360. """
  361. return tuple(self[k] for k in self.keys())
  362. if is_torch_available():
  363. import torch.utils._pytree as _torch_pytree
  364. def _model_output_flatten(output: ModelOutput) -> Tuple[List[Any], "_torch_pytree.Context"]:
  365. return list(output.values()), list(output.keys())
  366. def _model_output_unflatten(
  367. values: Iterable[Any],
  368. context: "_torch_pytree.Context",
  369. output_type=None,
  370. ) -> ModelOutput:
  371. return output_type(**dict(zip(context, values)))
  372. if version.parse(get_torch_version()) >= version.parse("2.2"):
  373. _torch_pytree.register_pytree_node(
  374. ModelOutput,
  375. _model_output_flatten,
  376. partial(_model_output_unflatten, output_type=ModelOutput),
  377. serialized_type_name=f"{ModelOutput.__module__}.{ModelOutput.__name__}",
  378. )
  379. else:
  380. _torch_pytree._register_pytree_node(
  381. ModelOutput,
  382. _model_output_flatten,
  383. partial(_model_output_unflatten, output_type=ModelOutput),
  384. )
  385. class ExplicitEnum(str, Enum):
  386. """
  387. Enum with more explicit error message for missing values.
  388. """
  389. @classmethod
  390. def _missing_(cls, value):
  391. raise ValueError(
  392. f"{value} is not a valid {cls.__name__}, please select one of {list(cls._value2member_map_.keys())}"
  393. )
  394. class PaddingStrategy(ExplicitEnum):
  395. """
  396. Possible values for the `padding` argument in [`PreTrainedTokenizerBase.__call__`]. Useful for tab-completion in an
  397. IDE.
  398. """
  399. LONGEST = "longest"
  400. MAX_LENGTH = "max_length"
  401. DO_NOT_PAD = "do_not_pad"
  402. class TensorType(ExplicitEnum):
  403. """
  404. Possible values for the `return_tensors` argument in [`PreTrainedTokenizerBase.__call__`]. Useful for
  405. tab-completion in an IDE.
  406. """
  407. PYTORCH = "pt"
  408. TENSORFLOW = "tf"
  409. NUMPY = "np"
  410. JAX = "jax"
  411. MLX = "mlx"
  412. class ContextManagers:
  413. """
  414. Wrapper for `contextlib.ExitStack` which enters a collection of context managers. Adaptation of `ContextManagers`
  415. in the `fastcore` library.
  416. """
  417. def __init__(self, context_managers: List[ContextManager]):
  418. self.context_managers = context_managers
  419. self.stack = ExitStack()
  420. def __enter__(self):
  421. for context_manager in self.context_managers:
  422. self.stack.enter_context(context_manager)
  423. def __exit__(self, *args, **kwargs):
  424. self.stack.__exit__(*args, **kwargs)
  425. def can_return_loss(model_class):
  426. """
  427. Check if a given model can return loss.
  428. Args:
  429. model_class (`type`): The class of the model.
  430. """
  431. framework = infer_framework(model_class)
  432. if framework == "tf":
  433. signature = inspect.signature(model_class.call) # TensorFlow models
  434. elif framework == "pt":
  435. signature = inspect.signature(model_class.forward) # PyTorch models
  436. else:
  437. signature = inspect.signature(model_class.__call__) # Flax models
  438. for p in signature.parameters:
  439. if p == "return_loss" and signature.parameters[p].default is True:
  440. return True
  441. return False
  442. def find_labels(model_class):
  443. """
  444. Find the labels used by a given model.
  445. Args:
  446. model_class (`type`): The class of the model.
  447. """
  448. model_name = model_class.__name__
  449. framework = infer_framework(model_class)
  450. if framework == "tf":
  451. signature = inspect.signature(model_class.call) # TensorFlow models
  452. elif framework == "pt":
  453. signature = inspect.signature(model_class.forward) # PyTorch models
  454. else:
  455. signature = inspect.signature(model_class.__call__) # Flax models
  456. if "QuestionAnswering" in model_name:
  457. return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
  458. else:
  459. return [p for p in signature.parameters if "label" in p]
  460. def flatten_dict(d: MutableMapping, parent_key: str = "", delimiter: str = "."):
  461. """Flatten a nested dict into a single level dict."""
  462. def _flatten_dict(d, parent_key="", delimiter="."):
  463. for k, v in d.items():
  464. key = str(parent_key) + delimiter + str(k) if parent_key else k
  465. if v and isinstance(v, MutableMapping):
  466. yield from flatten_dict(v, key, delimiter=delimiter).items()
  467. else:
  468. yield key, v
  469. return dict(_flatten_dict(d, parent_key, delimiter))
  470. @contextmanager
  471. def working_or_temp_dir(working_dir, use_temp_dir: bool = False):
  472. if use_temp_dir:
  473. with tempfile.TemporaryDirectory() as tmp_dir:
  474. yield tmp_dir
  475. else:
  476. yield working_dir
  477. def transpose(array, axes=None):
  478. """
  479. Framework-agnostic version of `numpy.transpose` that will work on torch/TensorFlow/Jax tensors as well as NumPy
  480. arrays.
  481. """
  482. if is_numpy_array(array):
  483. return np.transpose(array, axes=axes)
  484. elif is_torch_tensor(array):
  485. return array.T if axes is None else array.permute(*axes)
  486. elif is_tf_tensor(array):
  487. import tensorflow as tf
  488. return tf.transpose(array, perm=axes)
  489. elif is_jax_tensor(array):
  490. import jax.numpy as jnp
  491. return jnp.transpose(array, axes=axes)
  492. else:
  493. raise ValueError(f"Type not supported for transpose: {type(array)}.")
  494. def reshape(array, newshape):
  495. """
  496. Framework-agnostic version of `numpy.reshape` that will work on torch/TensorFlow/Jax tensors as well as NumPy
  497. arrays.
  498. """
  499. if is_numpy_array(array):
  500. return np.reshape(array, newshape)
  501. elif is_torch_tensor(array):
  502. return array.reshape(*newshape)
  503. elif is_tf_tensor(array):
  504. import tensorflow as tf
  505. return tf.reshape(array, newshape)
  506. elif is_jax_tensor(array):
  507. import jax.numpy as jnp
  508. return jnp.reshape(array, newshape)
  509. else:
  510. raise ValueError(f"Type not supported for reshape: {type(array)}.")
  511. def squeeze(array, axis=None):
  512. """
  513. Framework-agnostic version of `numpy.squeeze` that will work on torch/TensorFlow/Jax tensors as well as NumPy
  514. arrays.
  515. """
  516. if is_numpy_array(array):
  517. return np.squeeze(array, axis=axis)
  518. elif is_torch_tensor(array):
  519. return array.squeeze() if axis is None else array.squeeze(dim=axis)
  520. elif is_tf_tensor(array):
  521. import tensorflow as tf
  522. return tf.squeeze(array, axis=axis)
  523. elif is_jax_tensor(array):
  524. import jax.numpy as jnp
  525. return jnp.squeeze(array, axis=axis)
  526. else:
  527. raise ValueError(f"Type not supported for squeeze: {type(array)}.")
  528. def expand_dims(array, axis):
  529. """
  530. Framework-agnostic version of `numpy.expand_dims` that will work on torch/TensorFlow/Jax tensors as well as NumPy
  531. arrays.
  532. """
  533. if is_numpy_array(array):
  534. return np.expand_dims(array, axis)
  535. elif is_torch_tensor(array):
  536. return array.unsqueeze(dim=axis)
  537. elif is_tf_tensor(array):
  538. import tensorflow as tf
  539. return tf.expand_dims(array, axis=axis)
  540. elif is_jax_tensor(array):
  541. import jax.numpy as jnp
  542. return jnp.expand_dims(array, axis=axis)
  543. else:
  544. raise ValueError(f"Type not supported for expand_dims: {type(array)}.")
  545. def tensor_size(array):
  546. """
  547. Framework-agnostic version of `numpy.size` that will work on torch/TensorFlow/Jax tensors as well as NumPy arrays.
  548. """
  549. if is_numpy_array(array):
  550. return np.size(array)
  551. elif is_torch_tensor(array):
  552. return array.numel()
  553. elif is_tf_tensor(array):
  554. import tensorflow as tf
  555. return tf.size(array)
  556. elif is_jax_tensor(array):
  557. return array.size
  558. else:
  559. raise ValueError(f"Type not supported for tensor_size: {type(array)}.")
  560. def add_model_info_to_auto_map(auto_map, repo_id):
  561. """
  562. Adds the information of the repo_id to a given auto map.
  563. """
  564. for key, value in auto_map.items():
  565. if isinstance(value, (tuple, list)):
  566. auto_map[key] = [f"{repo_id}--{v}" if (v is not None and "--" not in v) else v for v in value]
  567. elif value is not None and "--" not in value:
  568. auto_map[key] = f"{repo_id}--{value}"
  569. return auto_map
  570. def add_model_info_to_custom_pipelines(custom_pipeline, repo_id):
  571. """
  572. Adds the information of the repo_id to a given custom pipeline.
  573. """
  574. # {custom_pipelines : {task: {"impl": "path.to.task"},...} }
  575. for task in custom_pipeline.keys():
  576. if "impl" in custom_pipeline[task]:
  577. module = custom_pipeline[task]["impl"]
  578. if "--" not in module:
  579. custom_pipeline[task]["impl"] = f"{repo_id}--{module}"
  580. return custom_pipeline
  581. def infer_framework(model_class):
  582. """
  583. Infers the framework of a given model without using isinstance(), because we cannot guarantee that the relevant
  584. classes are imported or available.
  585. """
  586. for base_class in inspect.getmro(model_class):
  587. module = base_class.__module__
  588. name = base_class.__name__
  589. if module.startswith("tensorflow") or module.startswith("keras") or name == "TFPreTrainedModel":
  590. return "tf"
  591. elif module.startswith("torch") or name == "PreTrainedModel":
  592. return "pt"
  593. elif module.startswith("flax") or module.startswith("jax") or name == "FlaxPreTrainedModel":
  594. return "flax"
  595. else:
  596. raise TypeError(f"Could not infer framework from class {model_class}.")
  597. def torch_int(x):
  598. """
  599. Casts an input to a torch int64 tensor if we are in a tracing context, otherwise to a Python int.
  600. """
  601. if not is_torch_available():
  602. return int(x)
  603. import torch
  604. return x.to(torch.int64) if torch.jit.is_tracing() and isinstance(x, torch.Tensor) else int(x)
  605. def torch_float(x):
  606. """
  607. Casts an input to a torch float32 tensor if we are in a tracing context, otherwise to a Python float.
  608. """
  609. if not is_torch_available():
  610. return int(x)
  611. import torch
  612. return x.to(torch.float32) if torch.jit.is_tracing() and isinstance(x, torch.Tensor) else int(x)
  613. def filter_out_non_signature_kwargs(extra: Optional[list] = None):
  614. """
  615. Decorator to filter out named arguments that are not in the function signature.
  616. This decorator ensures that only the keyword arguments that match the function's signature, or are specified in the
  617. `extra` list, are passed to the function. Any additional keyword arguments are filtered out and a warning is issued.
  618. Parameters:
  619. extra (`Optional[list]`, *optional*):
  620. A list of extra keyword argument names that are allowed even if they are not in the function's signature.
  621. Returns:
  622. Callable:
  623. A decorator that wraps the function and filters out invalid keyword arguments.
  624. Example usage:
  625. ```python
  626. @filter_out_non_signature_kwargs(extra=["allowed_extra_arg"])
  627. def my_function(arg1, arg2, **kwargs):
  628. print(arg1, arg2, kwargs)
  629. my_function(arg1=1, arg2=2, allowed_extra_arg=3, invalid_arg=4)
  630. # This will print: 1 2 {"allowed_extra_arg": 3}
  631. # And issue a warning: "The following named arguments are not valid for `my_function` and were ignored: 'invalid_arg'"
  632. ```
  633. """
  634. extra = extra or []
  635. extra_params_to_pass = set(extra)
  636. def decorator(func):
  637. sig = inspect.signature(func)
  638. function_named_args = set(sig.parameters.keys())
  639. valid_kwargs_to_pass = function_named_args.union(extra_params_to_pass)
  640. # Required for better warning message
  641. is_instance_method = "self" in function_named_args
  642. is_class_method = "cls" in function_named_args
  643. # Mark function as decorated
  644. func._filter_out_non_signature_kwargs = True
  645. @wraps(func)
  646. def wrapper(*args, **kwargs):
  647. valid_kwargs = {}
  648. invalid_kwargs = {}
  649. for k, v in kwargs.items():
  650. if k in valid_kwargs_to_pass:
  651. valid_kwargs[k] = v
  652. else:
  653. invalid_kwargs[k] = v
  654. if invalid_kwargs:
  655. invalid_kwargs_names = [f"'{k}'" for k in invalid_kwargs.keys()]
  656. invalid_kwargs_names = ", ".join(invalid_kwargs_names)
  657. # Get the class name for better warning message
  658. if is_instance_method:
  659. cls_prefix = args[0].__class__.__name__ + "."
  660. elif is_class_method:
  661. cls_prefix = args[0].__name__ + "."
  662. else:
  663. cls_prefix = ""
  664. warnings.warn(
  665. f"The following named arguments are not valid for `{cls_prefix}{func.__name__}`"
  666. f" and were ignored: {invalid_kwargs_names}",
  667. UserWarning,
  668. stacklevel=2,
  669. )
  670. return func(*args, **valid_kwargs)
  671. return wrapper
  672. return decorator