modeling_flax_utils.py 60 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290
  1. # coding=utf-8
  2. # Copyright 2021 The Google Flax Team Authors and The HuggingFace Inc. team.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. import gc
  16. import json
  17. import os
  18. import re
  19. import warnings
  20. from functools import partial
  21. from pickle import UnpicklingError
  22. from typing import Any, Dict, Optional, Set, Tuple, Union
  23. import flax.linen as nn
  24. import jax
  25. import jax.numpy as jnp
  26. import msgpack.exceptions
  27. from flax.core.frozen_dict import FrozenDict, unfreeze
  28. from flax.serialization import from_bytes, to_bytes
  29. from flax.traverse_util import flatten_dict, unflatten_dict
  30. from jax.random import PRNGKey
  31. from .configuration_utils import PretrainedConfig
  32. from .dynamic_module_utils import custom_object_save
  33. from .generation import FlaxGenerationMixin, GenerationConfig
  34. from .modeling_flax_pytorch_utils import load_pytorch_checkpoint_in_flax_state_dict
  35. from .utils import (
  36. FLAX_WEIGHTS_INDEX_NAME,
  37. FLAX_WEIGHTS_NAME,
  38. SAFE_WEIGHTS_INDEX_NAME,
  39. SAFE_WEIGHTS_NAME,
  40. WEIGHTS_INDEX_NAME,
  41. WEIGHTS_NAME,
  42. PushToHubMixin,
  43. add_code_sample_docstrings,
  44. add_start_docstrings_to_model_forward,
  45. cached_file,
  46. copy_func,
  47. download_url,
  48. has_file,
  49. is_offline_mode,
  50. is_remote_url,
  51. logging,
  52. replace_return_docstrings,
  53. )
  54. from .utils.hub import convert_file_size_to_int, get_checkpoint_shard_files
  55. from .utils.import_utils import is_safetensors_available
  56. if is_safetensors_available():
  57. from safetensors import safe_open
  58. from safetensors.flax import load_file as safe_load_file
  59. from safetensors.flax import save_file as safe_save_file
  60. logger = logging.get_logger(__name__)
  61. def quick_gelu(x):
  62. return x * jax.nn.sigmoid(1.702 * x)
  63. ACT2FN = {
  64. "gelu": partial(nn.gelu, approximate=False),
  65. "relu": nn.relu,
  66. "silu": nn.swish,
  67. "swish": nn.swish,
  68. "gelu_new": partial(nn.gelu, approximate=True),
  69. "quick_gelu": quick_gelu,
  70. "gelu_pytorch_tanh": partial(nn.gelu, approximate=True),
  71. }
  72. def dtype_byte_size(dtype):
  73. """
  74. Returns the size (in bytes) occupied by one parameter of type `dtype`. Example:
  75. ```py
  76. >>> dtype_byte_size(np.float32)
  77. 4
  78. ```
  79. """
  80. if dtype is bool:
  81. return 1 / 8
  82. bit_search = re.search(r"[^\d](\d+)$", dtype.name)
  83. if bit_search is None:
  84. raise ValueError(f"`dtype` is not a valid dtype: {dtype}.")
  85. bit_size = int(bit_search.groups()[0])
  86. return bit_size // 8
  87. def flax_shard_checkpoint(params, max_shard_size="10GB"):
  88. """
  89. Splits a model state dictionary in sub-checkpoints so that the final size of each sub-checkpoint does not exceed a
  90. given size. The sub-checkpoints are determined by iterating through the `state_dict` in the order of its keys, so
  91. there is no optimization made to make each sub-checkpoint as close as possible to the maximum size passed. For
  92. example, if the limit is 10GB and we have weights of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as
  93. [6GB], [6+2GB], [6+2+2GB] and not [6+2+2GB], [6+2GB], [6GB].
  94. <Tip warning={true}>
  95. If one of the model's weight is bigger that `max_shard_size`, it will end up in its own sub-checkpoint which will
  96. have a size greater than `max_shard_size`.
  97. </Tip>
  98. Args:
  99. params (`Union[Dict, FrozenDict]`): A `PyTree` of model parameters.
  100. max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`):
  101. The maximum size of each sub-checkpoint. If expressed as a string, needs to be digits followed by a unit
  102. (like `"5MB"`).
  103. """
  104. max_shard_size = convert_file_size_to_int(max_shard_size)
  105. sharded_state_dicts = []
  106. current_block = {}
  107. current_block_size = 0
  108. total_size = 0
  109. # flatten the weights to chunk
  110. weights = flatten_dict(params, sep="/")
  111. for item in weights:
  112. weight_size = weights[item].size * dtype_byte_size(weights[item].dtype)
  113. # If this weight is going to tip up over the maximal size, we split.
  114. if current_block_size + weight_size > max_shard_size:
  115. sharded_state_dicts.append(current_block)
  116. current_block = {}
  117. current_block_size = 0
  118. current_block[item] = weights[item]
  119. current_block_size += weight_size
  120. total_size += weight_size
  121. # Add the last block
  122. sharded_state_dicts.append(current_block)
  123. # If we only have one shard, we return it
  124. if len(sharded_state_dicts) == 1:
  125. return {FLAX_WEIGHTS_NAME: sharded_state_dicts[0]}, None
  126. # Otherwise, let's build the index
  127. weight_map = {}
  128. shards = {}
  129. for idx, shard in enumerate(sharded_state_dicts):
  130. shard_file = FLAX_WEIGHTS_NAME.replace(".msgpack", f"-{idx+1:05d}-of-{len(sharded_state_dicts):05d}.msgpack")
  131. shards[shard_file] = shard
  132. for weight_name in shard.keys():
  133. weight_map[weight_name] = shard_file
  134. # Add the metadata
  135. metadata = {"total_size": total_size}
  136. index = {"metadata": metadata, "weight_map": weight_map}
  137. return shards, index
  138. class FlaxPreTrainedModel(PushToHubMixin, FlaxGenerationMixin):
  139. r"""
  140. Base class for all models.
  141. [`FlaxPreTrainedModel`] takes care of storing the configuration of the models and handles methods for loading,
  142. downloading and saving models.
  143. Class attributes (overridden by derived classes):
  144. - **config_class** ([`PretrainedConfig`]) -- A subclass of [`PretrainedConfig`] to use as configuration class
  145. for this model architecture.
  146. - **base_model_prefix** (`str`) -- A string indicating the attribute associated to the base model in derived
  147. classes of the same architecture adding modules on top of the base model.
  148. - **main_input_name** (`str`) -- The name of the principal input to the model (often `input_ids` for NLP
  149. models, `pixel_values` for vision models and `input_values` for speech models).
  150. """
  151. config_class = None
  152. base_model_prefix = ""
  153. main_input_name = "input_ids"
  154. _auto_class = None
  155. _missing_keys = set()
  156. def __init__(
  157. self,
  158. config: PretrainedConfig,
  159. module: nn.Module,
  160. input_shape: Tuple = (1, 1),
  161. seed: int = 0,
  162. dtype: jnp.dtype = jnp.float32,
  163. _do_init: bool = True,
  164. ):
  165. if config is None:
  166. raise ValueError("config cannot be None")
  167. if module is None:
  168. raise ValueError("module cannot be None")
  169. # Those are private to be exposed as typed property on derived classes.
  170. self._config = config
  171. self._module = module
  172. # Those are public as their type is generic to every derived classes.
  173. self.key = PRNGKey(seed)
  174. self.dtype = dtype
  175. self.input_shape = input_shape
  176. self.generation_config = GenerationConfig.from_model_config(config) if self.can_generate() else None
  177. # To check if the model was initialized automatically.
  178. self._is_initialized = _do_init
  179. if _do_init:
  180. # randomly initialized parameters
  181. random_params = self.init_weights(self.key, input_shape)
  182. params_shape_tree = jax.eval_shape(lambda params: params, random_params)
  183. else:
  184. init_fn = partial(self.init_weights, input_shape=input_shape)
  185. params_shape_tree = jax.eval_shape(init_fn, self.key)
  186. logger.info(
  187. "Model weights are not initialized as `_do_init` is set to `False`. "
  188. f"Make sure to call `{self.__class__.__name__}.init_weights` manually to initialize the weights."
  189. )
  190. # get the shape of the parameters
  191. self._params_shape_tree = params_shape_tree
  192. # save required_params as set
  193. self._required_params = set(flatten_dict(unfreeze(params_shape_tree)).keys())
  194. # initialize the parameters
  195. if _do_init:
  196. self.params = random_params
  197. def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> Dict:
  198. raise NotImplementedError(f"init method has to be implemented for {self}")
  199. def enable_gradient_checkpointing(self):
  200. raise NotImplementedError(f"gradient checkpointing method has to be implemented for {self}")
  201. @classmethod
  202. def _from_config(cls, config, **kwargs):
  203. """
  204. All context managers that the model should be initialized under go here.
  205. """
  206. return cls(config, **kwargs)
  207. @property
  208. def framework(self) -> str:
  209. """
  210. :str: Identifies that this is a Flax model.
  211. """
  212. return "flax"
  213. @property
  214. def config(self) -> PretrainedConfig:
  215. return self._config
  216. @property
  217. def module(self) -> nn.Module:
  218. return self._module
  219. @property
  220. def params(self) -> Union[Dict, FrozenDict]:
  221. if not self._is_initialized:
  222. raise ValueError(
  223. "`params` cannot be accessed from model when the model is created with `_do_init=False`. "
  224. "You must call `init_weights` manually and store the params outside of the model and "
  225. "pass it explicitly where needed."
  226. )
  227. return self._params
  228. @property
  229. def required_params(self) -> Set:
  230. return self._required_params
  231. @property
  232. def params_shape_tree(self) -> Dict:
  233. return self._params_shape_tree
  234. @params.setter
  235. def params(self, params: Union[Dict, FrozenDict]):
  236. # don't set params if the model is not initialized
  237. if not self._is_initialized:
  238. raise ValueError(
  239. "`params` cannot be set from model when the model is created with `_do_init=False`. "
  240. "You store the params outside of the model."
  241. )
  242. if isinstance(params, FrozenDict):
  243. params = unfreeze(params)
  244. param_keys = set(flatten_dict(params).keys())
  245. if len(self.required_params - param_keys) > 0:
  246. raise ValueError(
  247. "Some parameters are missing. Make sure that `params` include the following "
  248. f"parameters {self.required_params - param_keys}"
  249. )
  250. self._params = params
  251. def _cast_floating_to(self, params: Union[Dict, FrozenDict], dtype: jnp.dtype, mask: Any = None) -> Any:
  252. """
  253. Helper method to cast floating-point values of given parameter `PyTree` to given `dtype`.
  254. """
  255. # taken from https://github.com/deepmind/jmp/blob/3a8318abc3292be38582794dbf7b094e6583b192/jmp/_src/policy.py#L27
  256. def conditional_cast(param):
  257. if isinstance(param, jnp.ndarray) and jnp.issubdtype(param.dtype, jnp.floating):
  258. param = param.astype(dtype)
  259. return param
  260. if mask is None:
  261. return jax.tree_util.tree_map(conditional_cast, params)
  262. flat_params = flatten_dict(params)
  263. flat_mask, _ = jax.tree_util.tree_flatten(mask)
  264. for masked, key in zip(flat_mask, sorted(flat_params.keys())):
  265. if masked:
  266. flat_params[key] = conditional_cast(flat_params[key])
  267. return unflatten_dict(flat_params)
  268. def to_bf16(self, params: Union[Dict, FrozenDict], mask: Any = None):
  269. r"""
  270. Cast the floating-point `params` to `jax.numpy.bfloat16`. This returns a new `params` tree and does not cast
  271. the `params` in place.
  272. This method can be used on TPU to explicitly convert the model parameters to bfloat16 precision to do full
  273. half-precision training or to save weights in bfloat16 for inference in order to save memory and improve speed.
  274. Arguments:
  275. params (`Union[Dict, FrozenDict]`):
  276. A `PyTree` of model parameters.
  277. mask (`Union[Dict, FrozenDict]`):
  278. A `PyTree` with same structure as the `params` tree. The leaves should be booleans, `True` for params
  279. you want to cast, and should be `False` for those you want to skip.
  280. Examples:
  281. ```python
  282. >>> from transformers import FlaxBertModel
  283. >>> # load model
  284. >>> model = FlaxBertModel.from_pretrained("google-bert/bert-base-cased")
  285. >>> # By default, the model parameters will be in fp32 precision, to cast these to bfloat16 precision
  286. >>> model.params = model.to_bf16(model.params)
  287. >>> # If you want don't want to cast certain parameters (for example layer norm bias and scale)
  288. >>> # then pass the mask as follows
  289. >>> from flax import traverse_util
  290. >>> model = FlaxBertModel.from_pretrained("google-bert/bert-base-cased")
  291. >>> flat_params = traverse_util.flatten_dict(model.params)
  292. >>> mask = {
  293. ... path: (path[-2] != ("LayerNorm", "bias") and path[-2:] != ("LayerNorm", "scale"))
  294. ... for path in flat_params
  295. ... }
  296. >>> mask = traverse_util.unflatten_dict(mask)
  297. >>> model.params = model.to_bf16(model.params, mask)
  298. ```"""
  299. return self._cast_floating_to(params, jnp.bfloat16, mask)
  300. def to_fp32(self, params: Union[Dict, FrozenDict], mask: Any = None):
  301. r"""
  302. Cast the floating-point `parmas` to `jax.numpy.float32`. This method can be used to explicitly convert the
  303. model parameters to fp32 precision. This returns a new `params` tree and does not cast the `params` in place.
  304. Arguments:
  305. params (`Union[Dict, FrozenDict]`):
  306. A `PyTree` of model parameters.
  307. mask (`Union[Dict, FrozenDict]`):
  308. A `PyTree` with same structure as the `params` tree. The leaves should be booleans, `True` for params
  309. you want to cast, and should be `False` for those you want to skip
  310. Examples:
  311. ```python
  312. >>> from transformers import FlaxBertModel
  313. >>> # Download model and configuration from huggingface.co
  314. >>> model = FlaxBertModel.from_pretrained("google-bert/bert-base-cased")
  315. >>> # By default, the model params will be in fp32, to illustrate the use of this method,
  316. >>> # we'll first cast to fp16 and back to fp32
  317. >>> model.params = model.to_f16(model.params)
  318. >>> # now cast back to fp32
  319. >>> model.params = model.to_fp32(model.params)
  320. ```"""
  321. return self._cast_floating_to(params, jnp.float32, mask)
  322. def to_fp16(self, params: Union[Dict, FrozenDict], mask: Any = None):
  323. r"""
  324. Cast the floating-point `parmas` to `jax.numpy.float16`. This returns a new `params` tree and does not cast the
  325. `params` in place.
  326. This method can be used on GPU to explicitly convert the model parameters to float16 precision to do full
  327. half-precision training or to save weights in float16 for inference in order to save memory and improve speed.
  328. Arguments:
  329. params (`Union[Dict, FrozenDict]`):
  330. A `PyTree` of model parameters.
  331. mask (`Union[Dict, FrozenDict]`):
  332. A `PyTree` with same structure as the `params` tree. The leaves should be booleans, `True` for params
  333. you want to cast, and should be `False` for those you want to skip
  334. Examples:
  335. ```python
  336. >>> from transformers import FlaxBertModel
  337. >>> # load model
  338. >>> model = FlaxBertModel.from_pretrained("google-bert/bert-base-cased")
  339. >>> # By default, the model params will be in fp32, to cast these to float16
  340. >>> model.params = model.to_fp16(model.params)
  341. >>> # If you want don't want to cast certain parameters (for example layer norm bias and scale)
  342. >>> # then pass the mask as follows
  343. >>> from flax import traverse_util
  344. >>> model = FlaxBertModel.from_pretrained("google-bert/bert-base-cased")
  345. >>> flat_params = traverse_util.flatten_dict(model.params)
  346. >>> mask = {
  347. ... path: (path[-2] != ("LayerNorm", "bias") and path[-2:] != ("LayerNorm", "scale"))
  348. ... for path in flat_params
  349. ... }
  350. >>> mask = traverse_util.unflatten_dict(mask)
  351. >>> model.params = model.to_fp16(model.params, mask)
  352. ```"""
  353. return self._cast_floating_to(params, jnp.float16, mask)
  354. @classmethod
  355. def load_flax_weights(cls, resolved_archive_file):
  356. try:
  357. if resolved_archive_file.endswith(".safetensors"):
  358. state = safe_load_file(resolved_archive_file)
  359. state = unflatten_dict(state, sep=".")
  360. else:
  361. with open(resolved_archive_file, "rb") as state_f:
  362. state = from_bytes(cls, state_f.read())
  363. except (UnpicklingError, msgpack.exceptions.ExtraData) as e:
  364. try:
  365. with open(resolved_archive_file) as f:
  366. if f.read().startswith("version"):
  367. raise OSError(
  368. "You seem to have cloned a repository without having git-lfs installed. Please"
  369. " install git-lfs and run `git lfs install` followed by `git lfs pull` in the"
  370. " folder you cloned."
  371. )
  372. else:
  373. raise ValueError from e
  374. except (UnicodeDecodeError, ValueError):
  375. raise EnvironmentError(f"Unable to convert {resolved_archive_file} to Flax deserializable object. ")
  376. return state
  377. @classmethod
  378. def load_flax_sharded_weights(cls, shard_files):
  379. """
  380. This is the same as [`flax.serialization.from_bytes`]
  381. (https:lax.readthedocs.io/en/latest/_modules/flax/serialization.html#from_bytes) but for a sharded checkpoint.
  382. This load is performed efficiently: each checkpoint shard is loaded one by one in RAM and deleted after being
  383. loaded in the model.
  384. Args:
  385. shard_files (`List[str]`:
  386. The list of shard files to load.
  387. Returns:
  388. `Dict`: A nested dictionary of the model parameters, in the expected format for flax models : `{'model':
  389. {'params': {'...'}}}`.
  390. """
  391. # Load the index
  392. state_sharded_dict = {}
  393. for shard_file in shard_files:
  394. # load using msgpack utils
  395. try:
  396. with open(shard_file, "rb") as state_f:
  397. state = from_bytes(cls, state_f.read())
  398. except (UnpicklingError, msgpack.exceptions.ExtraData) as e:
  399. with open(shard_file) as f:
  400. if f.read().startswith("version"):
  401. raise OSError(
  402. "You seem to have cloned a repository without having git-lfs installed. Please"
  403. " install git-lfs and run `git lfs install` followed by `git lfs pull` in the"
  404. " folder you cloned."
  405. )
  406. else:
  407. raise ValueError from e
  408. except (UnicodeDecodeError, ValueError):
  409. raise EnvironmentError(f"Unable to convert {shard_file} to Flax deserializable object. ")
  410. state = flatten_dict(state, sep="/")
  411. state_sharded_dict.update(state)
  412. del state
  413. gc.collect()
  414. # the state dict is unflattened to the match the format of model.params
  415. return unflatten_dict(state_sharded_dict, sep="/")
  416. @classmethod
  417. def can_generate(cls) -> bool:
  418. """
  419. Returns whether this model can generate sequences with `.generate()`. Returns:
  420. `bool`: Whether this model can generate sequences with `.generate()`.
  421. """
  422. # Detects whether `prepare_inputs_for_generation` has been overwritten, which is a requirement for generation.
  423. # Alternativelly, the model can also have a custom `generate` function.
  424. if "GenerationMixin" in str(cls.prepare_inputs_for_generation) and "GenerationMixin" in str(cls.generate):
  425. return False
  426. return True
  427. @classmethod
  428. def from_pretrained(
  429. cls,
  430. pretrained_model_name_or_path: Union[str, os.PathLike],
  431. dtype: jnp.dtype = jnp.float32,
  432. *model_args,
  433. config: Optional[Union[PretrainedConfig, str, os.PathLike]] = None,
  434. cache_dir: Optional[Union[str, os.PathLike]] = None,
  435. ignore_mismatched_sizes: bool = False,
  436. force_download: bool = False,
  437. local_files_only: bool = False,
  438. token: Optional[Union[str, bool]] = None,
  439. revision: str = "main",
  440. **kwargs,
  441. ):
  442. r"""
  443. Instantiate a pretrained flax model from a pre-trained model configuration.
  444. The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come
  445. pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning
  446. task.
  447. The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those
  448. weights are discarded.
  449. Parameters:
  450. pretrained_model_name_or_path (`str` or `os.PathLike`):
  451. Can be either:
  452. - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
  453. - A path to a *directory* containing model weights saved using
  454. [`~FlaxPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
  455. - A path or url to a *pt index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In this case,
  456. `from_pt` should be set to `True`.
  457. dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
  458. The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
  459. `jax.numpy.bfloat16` (on TPUs).
  460. This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
  461. specified all the computation will be performed with the given `dtype`.
  462. **Note that this only specifies the dtype of the computation and does not influence the dtype of model
  463. parameters.**
  464. If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
  465. [`~FlaxPreTrainedModel.to_bf16`].
  466. model_args (sequence of positional arguments, *optional*):
  467. All remaining positional arguments will be passed to the underlying model's `__init__` method.
  468. config (`Union[PretrainedConfig, str, os.PathLike]`, *optional*):
  469. Can be either:
  470. - an instance of a class derived from [`PretrainedConfig`],
  471. - a string or path valid as input to [`~PretrainedConfig.from_pretrained`].
  472. Configuration for the model to use instead of an automatically loaded configuration. Configuration can
  473. be automatically loaded when:
  474. - The model is a model provided by the library (loaded with the *model id* string of a pretrained
  475. model).
  476. - The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the
  477. save directory.
  478. - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a
  479. configuration JSON file named *config.json* is found in the directory.
  480. cache_dir (`Union[str, os.PathLike]`, *optional*):
  481. Path to a directory in which a downloaded pretrained model configuration should be cached if the
  482. standard cache should not be used.
  483. from_pt (`bool`, *optional*, defaults to `False`):
  484. Load the model weights from a PyTorch checkpoint save file (see docstring of
  485. `pretrained_model_name_or_path` argument).
  486. ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`):
  487. Whether or not to raise an error if some of the weights from the checkpoint do not have the same size
  488. as the weights of the model (if for instance, you are instantiating a model with 10 labels from a
  489. checkpoint with 3 labels).
  490. force_download (`bool`, *optional*, defaults to `False`):
  491. Whether or not to force the (re-)download of the model weights and configuration files, overriding the
  492. cached versions if they exist.
  493. resume_download:
  494. Deprecated and ignored. All downloads are now resumed by default when possible.
  495. Will be removed in v5 of Transformers.
  496. proxies (`Dict[str, str]`, *optional*):
  497. A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
  498. 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
  499. local_files_only(`bool`, *optional*, defaults to `False`):
  500. Whether or not to only look at local files (i.e., do not try to download the model).
  501. token (`str` or `bool`, *optional*):
  502. The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use
  503. the token generated when running `huggingface-cli login` (stored in `~/.huggingface`).
  504. revision (`str`, *optional*, defaults to `"main"`):
  505. The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
  506. git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
  507. identifier allowed by git.
  508. <Tip>
  509. To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>"`.
  510. </Tip>
  511. subfolder (`str`, *optional*, defaults to `""`):
  512. In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can
  513. specify the folder name here.
  514. kwargs (remaining dictionary of keyword arguments, *optional*):
  515. Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
  516. `output_attentions=True`). Behaves differently depending on whether a `config` is provided or
  517. automatically loaded:
  518. - If a configuration is provided with `config`, `**kwargs` will be directly passed to the
  519. underlying model's `__init__` method (we assume all relevant updates to the configuration have
  520. already been done)
  521. - If a configuration is not provided, `kwargs` will be first passed to the configuration class
  522. initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that
  523. corresponds to a configuration attribute will be used to override said attribute with the
  524. supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute
  525. will be passed to the underlying model's `__init__` function.
  526. Examples:
  527. ```python
  528. >>> from transformers import BertConfig, FlaxBertModel
  529. >>> # Download model and configuration from huggingface.co and cache.
  530. >>> model = FlaxBertModel.from_pretrained("google-bert/bert-base-cased")
  531. >>> # Model was saved using *save_pretrained('./test/saved_model/')* (for example purposes, not runnable).
  532. >>> model = FlaxBertModel.from_pretrained("./test/saved_model/")
  533. >>> # Loading from a PyTorch checkpoint file instead of a PyTorch model (slower, for example purposes, not runnable).
  534. >>> config = BertConfig.from_json_file("./pt_model/config.json")
  535. >>> model = FlaxBertModel.from_pretrained("./pt_model/pytorch_model.bin", from_pt=True, config=config)
  536. ```"""
  537. from_pt = kwargs.pop("from_pt", False)
  538. resume_download = kwargs.pop("resume_download", None)
  539. proxies = kwargs.pop("proxies", None)
  540. use_auth_token = kwargs.pop("use_auth_token", None)
  541. trust_remote_code = kwargs.pop("trust_remote_code", None)
  542. from_pipeline = kwargs.pop("_from_pipeline", None)
  543. from_auto_class = kwargs.pop("_from_auto", False)
  544. _do_init = kwargs.pop("_do_init", True)
  545. subfolder = kwargs.pop("subfolder", "")
  546. commit_hash = kwargs.pop("_commit_hash", None)
  547. # Not relevant for Flax Models
  548. _ = kwargs.pop("adapter_kwargs", None)
  549. if use_auth_token is not None:
  550. warnings.warn(
  551. "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
  552. FutureWarning,
  553. )
  554. if token is not None:
  555. raise ValueError(
  556. "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
  557. )
  558. token = use_auth_token
  559. if trust_remote_code is True:
  560. logger.warning(
  561. "The argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is"
  562. " ignored."
  563. )
  564. user_agent = {"file_type": "model", "framework": "flax", "from_auto_class": from_auto_class}
  565. if from_pipeline is not None:
  566. user_agent["using_pipeline"] = from_pipeline
  567. if is_offline_mode() and not local_files_only:
  568. logger.info("Offline mode: forcing local_files_only=True")
  569. local_files_only = True
  570. # Load config if we don't provide a configuration
  571. if not isinstance(config, PretrainedConfig):
  572. config_path = config if config is not None else pretrained_model_name_or_path
  573. config, model_kwargs = cls.config_class.from_pretrained(
  574. config_path,
  575. cache_dir=cache_dir,
  576. return_unused_kwargs=True,
  577. force_download=force_download,
  578. resume_download=resume_download,
  579. proxies=proxies,
  580. local_files_only=local_files_only,
  581. token=token,
  582. revision=revision,
  583. subfolder=subfolder,
  584. _from_auto=from_auto_class,
  585. _from_pipeline=from_pipeline,
  586. _commit_hash=commit_hash,
  587. **kwargs,
  588. )
  589. else:
  590. model_kwargs = kwargs.copy()
  591. if commit_hash is None:
  592. commit_hash = getattr(config, "_commit_hash", None)
  593. # Add the dtype to model_kwargs
  594. model_kwargs["dtype"] = dtype
  595. # This variable will flag if we're loading a sharded checkpoint. In this case the archive file is just the
  596. # index of the files.
  597. is_sharded = False
  598. # Load model
  599. if pretrained_model_name_or_path is not None:
  600. pretrained_model_name_or_path = str(pretrained_model_name_or_path)
  601. is_local = os.path.isdir(pretrained_model_name_or_path)
  602. if os.path.isdir(pretrained_model_name_or_path):
  603. if os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME)):
  604. # Load from a Flax checkpoint
  605. archive_file = os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME)
  606. elif os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_INDEX_NAME)):
  607. # Load from a sharded Flax checkpoint
  608. archive_file = os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_INDEX_NAME)
  609. is_sharded = True
  610. elif is_safetensors_available() and os.path.isfile(
  611. os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_NAME)
  612. ):
  613. # Load from a safetensors checkpoint
  614. archive_file = os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_NAME)
  615. elif from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, WEIGHTS_NAME)):
  616. # Load from a PyTorch checkpoint
  617. archive_file = os.path.join(pretrained_model_name_or_path, subfolder, WEIGHTS_NAME)
  618. elif from_pt and os.path.isfile(
  619. os.path.join(pretrained_model_name_or_path, subfolder, WEIGHTS_INDEX_NAME)
  620. ):
  621. # Load from a sharded pytorch checkpoint
  622. archive_file = os.path.join(pretrained_model_name_or_path, subfolder, WEIGHTS_INDEX_NAME)
  623. is_sharded = True
  624. # At this stage we don't have a weight file so we will raise an error.
  625. elif is_safetensors_available() and os.path.isfile(
  626. os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME)
  627. ):
  628. # Load from a sharded safetensors checkpoint
  629. archive_file = os.path.join(pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME)
  630. is_sharded = True
  631. raise NotImplementedError("Support for sharded checkpoints using safetensors is coming soon!")
  632. elif os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, WEIGHTS_NAME)):
  633. raise EnvironmentError(
  634. f"Error no file named {FLAX_WEIGHTS_NAME} found in directory {pretrained_model_name_or_path} "
  635. "but there is a file for PyTorch weights. Use `from_pt=True` to load this model from those "
  636. "weights."
  637. )
  638. else:
  639. raise EnvironmentError(
  640. f"Error no file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME} found in directory "
  641. f"{pretrained_model_name_or_path}."
  642. )
  643. elif os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path)):
  644. archive_file = pretrained_model_name_or_path
  645. is_local = True
  646. elif is_remote_url(pretrained_model_name_or_path):
  647. filename = pretrained_model_name_or_path
  648. resolved_archive_file = download_url(pretrained_model_name_or_path)
  649. else:
  650. if from_pt:
  651. filename = WEIGHTS_NAME
  652. else:
  653. filename = FLAX_WEIGHTS_NAME
  654. try:
  655. # Load from URL or cache if already cached
  656. cached_file_kwargs = {
  657. "cache_dir": cache_dir,
  658. "force_download": force_download,
  659. "proxies": proxies,
  660. "resume_download": resume_download,
  661. "local_files_only": local_files_only,
  662. "token": token,
  663. "user_agent": user_agent,
  664. "revision": revision,
  665. "subfolder": subfolder,
  666. "_raise_exceptions_for_gated_repo": False,
  667. "_raise_exceptions_for_missing_entries": False,
  668. "_commit_hash": commit_hash,
  669. }
  670. resolved_archive_file = cached_file(pretrained_model_name_or_path, filename, **cached_file_kwargs)
  671. # Maybe the checkpoint is sharded, we try to grab the index name in this case.
  672. if resolved_archive_file is None and filename == FLAX_WEIGHTS_NAME:
  673. resolved_archive_file = cached_file(
  674. pretrained_model_name_or_path, FLAX_WEIGHTS_INDEX_NAME, **cached_file_kwargs
  675. )
  676. if resolved_archive_file is not None:
  677. is_sharded = True
  678. # Maybe the checkpoint is pytorch sharded, we try to grab the pytorch index name in this case.
  679. if resolved_archive_file is None and from_pt:
  680. resolved_archive_file = cached_file(
  681. pretrained_model_name_or_path, WEIGHTS_INDEX_NAME, **cached_file_kwargs
  682. )
  683. if resolved_archive_file is not None:
  684. is_sharded = True
  685. # If we still haven't found anything, look for `safetensors`.
  686. if resolved_archive_file is None:
  687. # No support for sharded safetensors yet, so we'll raise an error if that's all we find.
  688. filename = SAFE_WEIGHTS_NAME
  689. resolved_archive_file = cached_file(
  690. pretrained_model_name_or_path, SAFE_WEIGHTS_NAME, **cached_file_kwargs
  691. )
  692. # Since we set _raise_exceptions_for_missing_entries=False, we don't get an exception but a None
  693. # result when internet is up, the repo and revision exist, but the file does not.
  694. if resolved_archive_file is None:
  695. # Otherwise, maybe there is a TF or Torch model file. We try those to give a helpful error
  696. # message.
  697. has_file_kwargs = {
  698. "revision": revision,
  699. "proxies": proxies,
  700. "token": token,
  701. "cache_dir": cache_dir,
  702. "local_files_only": local_files_only,
  703. }
  704. if has_file(pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME, **has_file_kwargs):
  705. is_sharded = True
  706. raise NotImplementedError(
  707. "Support for sharded checkpoints using safetensors is coming soon!"
  708. )
  709. elif has_file(pretrained_model_name_or_path, WEIGHTS_NAME, **has_file_kwargs):
  710. raise EnvironmentError(
  711. f"{pretrained_model_name_or_path} does not appear to have a file named"
  712. f" {FLAX_WEIGHTS_NAME} but there is a file for PyTorch weights. Use `from_pt=True` to"
  713. " load this model from those weights."
  714. )
  715. elif has_file(pretrained_model_name_or_path, WEIGHTS_INDEX_NAME, **has_file_kwargs):
  716. raise EnvironmentError(
  717. f"{pretrained_model_name_or_path} does not appear to have a file named"
  718. f" {FLAX_WEIGHTS_INDEX_NAME} but there is a sharded file for PyTorch weights. Use"
  719. " `from_pt=True` to load this model from those weights."
  720. )
  721. else:
  722. raise EnvironmentError(
  723. f"{pretrained_model_name_or_path} does not appear to have a file named"
  724. f" {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME}."
  725. )
  726. except EnvironmentError:
  727. # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted
  728. # to the original exception.
  729. raise
  730. except Exception:
  731. # For any other exception, we throw a generic error.
  732. raise EnvironmentError(
  733. f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it"
  734. " from 'https://huggingface.co/models', make sure you don't have a local directory with the"
  735. f" same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a"
  736. f" directory containing a file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME}."
  737. )
  738. if is_local:
  739. logger.info(f"loading weights file {archive_file}")
  740. resolved_archive_file = archive_file
  741. filename = resolved_archive_file.split(os.path.sep)[-1]
  742. else:
  743. logger.info(f"loading weights file {filename} from cache at {resolved_archive_file}")
  744. else:
  745. resolved_archive_file = None
  746. # We'll need to download and cache each checkpoint shard if the checkpoint is sharded.
  747. if is_sharded:
  748. # resolved_archive_file becomes a list of files that point to the different checkpoint shards in this case.
  749. resolved_archive_file, _ = get_checkpoint_shard_files(
  750. pretrained_model_name_or_path,
  751. resolved_archive_file,
  752. cache_dir=cache_dir,
  753. force_download=force_download,
  754. proxies=proxies,
  755. resume_download=resume_download,
  756. local_files_only=local_files_only,
  757. token=token,
  758. user_agent=user_agent,
  759. revision=revision,
  760. subfolder=subfolder,
  761. _commit_hash=commit_hash,
  762. )
  763. safetensors_from_pt = False
  764. if filename == SAFE_WEIGHTS_NAME:
  765. with safe_open(resolved_archive_file, framework="flax") as f:
  766. safetensors_metadata = f.metadata()
  767. if safetensors_metadata is None or safetensors_metadata.get("format") not in ["pt", "tf", "flax"]:
  768. raise OSError(
  769. f"The safetensors archive passed at {resolved_archive_file} does not contain the valid metadata."
  770. " Make sure you save your model with the `save_pretrained` method."
  771. )
  772. safetensors_from_pt = safetensors_metadata.get("format") == "pt"
  773. # init random models
  774. model = cls(config, *model_args, _do_init=_do_init, **model_kwargs)
  775. if from_pt or safetensors_from_pt:
  776. state = load_pytorch_checkpoint_in_flax_state_dict(model, resolved_archive_file, is_sharded)
  777. else:
  778. if is_sharded:
  779. state = cls.load_flax_sharded_weights(resolved_archive_file)
  780. else:
  781. state = cls.load_flax_weights(resolved_archive_file)
  782. # make sure all arrays are stored as jnp.arrays
  783. # NOTE: This is to prevent a bug this will be fixed in Flax >= v0.3.4:
  784. # https://github.com/google/flax/issues/1261
  785. if _do_init:
  786. state = jax.tree_util.tree_map(jnp.array, state)
  787. else:
  788. # keep the params on CPU if we don't want to initialize
  789. state = jax.tree_util.tree_map(lambda x: jax.device_put(x, jax.local_devices(backend="cpu")[0]), state)
  790. if "batch_stats" in state: # if flax model contains batch norm layers
  791. # if model is base model only use model_prefix key
  792. if (
  793. cls.base_model_prefix not in dict(model.params_shape_tree["params"])
  794. and cls.base_model_prefix in state["params"]
  795. ):
  796. state["params"] = state["params"][cls.base_model_prefix]
  797. state["batch_stats"] = state["batch_stats"][cls.base_model_prefix]
  798. # if model is head model and we are loading weights from base model
  799. # we initialize new params dict with base_model_prefix
  800. if (
  801. cls.base_model_prefix in dict(model.params_shape_tree["params"])
  802. and cls.base_model_prefix not in state["params"]
  803. ):
  804. state = {
  805. "params": {cls.base_model_prefix: state["params"]},
  806. "batch_stats": {cls.base_model_prefix: state["batch_stats"]},
  807. }
  808. else:
  809. # if model is base model only use model_prefix key
  810. if cls.base_model_prefix not in dict(model.params_shape_tree) and cls.base_model_prefix in state:
  811. state = state[cls.base_model_prefix]
  812. # if model is head model and we are loading weights from base model
  813. # we initialize new params dict with base_model_prefix
  814. if cls.base_model_prefix in dict(model.params_shape_tree) and cls.base_model_prefix not in state:
  815. state = {cls.base_model_prefix: state}
  816. # flatten dicts
  817. state = flatten_dict(state)
  818. random_state = flatten_dict(unfreeze(model.params if _do_init else model.params_shape_tree))
  819. missing_keys = model.required_params - set(state.keys())
  820. unexpected_keys = set(state.keys()) - model.required_params
  821. # Disabling warning when porting pytorch weights to flax, flax does not uses num_batches_tracked
  822. for unexpected_key in unexpected_keys.copy():
  823. if "num_batches_tracked" in unexpected_key[-1]:
  824. unexpected_keys.remove(unexpected_key)
  825. if missing_keys and not _do_init:
  826. logger.warning(
  827. f"The checkpoint {pretrained_model_name_or_path} is missing required keys: {missing_keys}. "
  828. "Make sure to call model.init_weights to initialize the missing weights."
  829. )
  830. cls._missing_keys = missing_keys
  831. # Mistmatched keys contains tuples key/shape1/shape2 of weights in the checkpoint that have a shape not
  832. # matching the weights in the model.
  833. mismatched_keys = []
  834. for key in state.keys():
  835. if key in random_state and state[key].shape != random_state[key].shape:
  836. if ignore_mismatched_sizes:
  837. mismatched_keys.append((key, state[key].shape, random_state[key].shape))
  838. state[key] = random_state[key]
  839. else:
  840. raise ValueError(
  841. f"Trying to load the pretrained weight for {key} failed: checkpoint has shape "
  842. f"{state[key].shape} which is incompatible with the model shape {random_state[key].shape}. "
  843. "Using `ignore_mismatched_sizes=True` if you really want to load this checkpoint inside this "
  844. "model."
  845. )
  846. # add missing keys as random parameters if we are initializing
  847. if missing_keys and _do_init:
  848. for missing_key in missing_keys:
  849. state[missing_key] = random_state[missing_key]
  850. # remove unexpected keys to not be saved again
  851. for unexpected_key in unexpected_keys:
  852. del state[unexpected_key]
  853. if len(unexpected_keys) > 0:
  854. logger.warning(
  855. f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when"
  856. f" initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are"
  857. f" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or"
  858. " with another architecture (e.g. initializing a BertForSequenceClassification model from a"
  859. " BertForPreTraining model).\n- This IS NOT expected if you are initializing"
  860. f" {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly identical"
  861. " (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)."
  862. )
  863. else:
  864. logger.info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n")
  865. if len(missing_keys) > 0:
  866. logger.warning(
  867. f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at"
  868. f" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\nYou should probably"
  869. " TRAIN this model on a down-stream task to be able to use it for predictions and inference."
  870. )
  871. elif len(mismatched_keys) == 0:
  872. logger.info(
  873. f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at"
  874. f" {pretrained_model_name_or_path}.\nIf your task is similar to the task the model of the checkpoint"
  875. f" was trained on, you can already use {model.__class__.__name__} for predictions without further"
  876. " training."
  877. )
  878. if len(mismatched_keys) > 0:
  879. mismatched_warning = "\n".join(
  880. [
  881. f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated"
  882. for key, shape1, shape2 in mismatched_keys
  883. ]
  884. )
  885. logger.warning(
  886. f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at"
  887. f" {pretrained_model_name_or_path} and are newly initialized because the shapes did not"
  888. f" match:\n{mismatched_warning}\nYou should probably TRAIN this model on a down-stream task to be able"
  889. " to use it for predictions and inference."
  890. )
  891. # dictionary of key: dtypes for the model params
  892. param_dtypes = jax.tree_util.tree_map(lambda x: x.dtype, state)
  893. # extract keys of parameters not in jnp.float32
  894. fp16_params = [k for k in param_dtypes if param_dtypes[k] == jnp.float16]
  895. bf16_params = [k for k in param_dtypes if param_dtypes[k] == jnp.bfloat16]
  896. # raise a warning if any of the parameters are not in jnp.float32
  897. if len(fp16_params) > 0:
  898. logger.warning(
  899. f"Some of the weights of {model.__class__.__name__} were initialized in float16 precision from "
  900. f"the model checkpoint at {pretrained_model_name_or_path}:\n{fp16_params}\n"
  901. "You should probably UPCAST the model weights to float32 if this was not intended. "
  902. "See [`~FlaxPreTrainedModel.to_fp32`] for further information on how to do this."
  903. )
  904. if len(bf16_params) > 0:
  905. logger.warning(
  906. f"Some of the weights of {model.__class__.__name__} were initialized in bfloat16 precision from "
  907. f"the model checkpoint at {pretrained_model_name_or_path}:\n{bf16_params}\n"
  908. "You should probably UPCAST the model weights to float32 if this was not intended. "
  909. "See [`~FlaxPreTrainedModel.to_fp32`] for further information on how to do this."
  910. )
  911. # If it is a model with generation capabilities, attempt to load the generation config
  912. if model.can_generate():
  913. try:
  914. model.generation_config = GenerationConfig.from_pretrained(
  915. pretrained_model_name_or_path,
  916. cache_dir=cache_dir,
  917. force_download=force_download,
  918. resume_download=resume_download,
  919. proxies=proxies,
  920. local_files_only=local_files_only,
  921. token=token,
  922. revision=revision,
  923. subfolder=subfolder,
  924. _from_auto=from_auto_class,
  925. _from_pipeline=from_pipeline,
  926. **kwargs,
  927. )
  928. except OSError:
  929. logger.info(
  930. "Generation config file not found, using a generation config created from the model config."
  931. )
  932. pass
  933. if _do_init:
  934. # set correct parameters
  935. model.params = unflatten_dict(state)
  936. return model
  937. else:
  938. return model, unflatten_dict(state)
  939. def save_pretrained(
  940. self,
  941. save_directory: Union[str, os.PathLike],
  942. params=None,
  943. push_to_hub=False,
  944. max_shard_size="10GB",
  945. token: Optional[Union[str, bool]] = None,
  946. safe_serialization: bool = False,
  947. **kwargs,
  948. ):
  949. """
  950. Save a model and its configuration file to a directory, so that it can be re-loaded using the
  951. `[`~FlaxPreTrainedModel.from_pretrained`]` class method
  952. Arguments:
  953. save_directory (`str` or `os.PathLike`):
  954. Directory to which to save. Will be created if it doesn't exist.
  955. push_to_hub (`bool`, *optional*, defaults to `False`):
  956. Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
  957. repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
  958. namespace).
  959. max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`):
  960. The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size
  961. lower than this size. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`).
  962. <Tip warning={true}>
  963. If a single weight of the model is bigger than `max_shard_size`, it will be in its own checkpoint shard
  964. which will be bigger than `max_shard_size`.
  965. </Tip>
  966. token (`str` or `bool`, *optional*):
  967. The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use
  968. the token generated when running `huggingface-cli login` (stored in `~/.huggingface`).
  969. kwargs (`Dict[str, Any]`, *optional*):
  970. Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
  971. safe_serialization (`bool`, *optional*, defaults to `False`):
  972. Whether to save the model using `safetensors` or through msgpack.
  973. """
  974. use_auth_token = kwargs.pop("use_auth_token", None)
  975. if use_auth_token is not None:
  976. warnings.warn(
  977. "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
  978. FutureWarning,
  979. )
  980. if token is not None:
  981. raise ValueError(
  982. "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
  983. )
  984. token = use_auth_token
  985. if token is not None:
  986. kwargs["token"] = token
  987. if os.path.isfile(save_directory):
  988. logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
  989. return
  990. os.makedirs(save_directory, exist_ok=True)
  991. if push_to_hub:
  992. commit_message = kwargs.pop("commit_message", None)
  993. repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1])
  994. repo_id = self._create_repo(repo_id, **kwargs)
  995. files_timestamps = self._get_files_timestamps(save_directory)
  996. # get abs dir
  997. save_directory = os.path.abspath(save_directory)
  998. # save config as well
  999. self.config.architectures = [self.__class__.__name__[4:]]
  1000. # If we have a custom model, we copy the file defining it in the folder and set the attributes so it can be
  1001. # loaded from the Hub.
  1002. if self._auto_class is not None:
  1003. custom_object_save(self, save_directory, config=self.config)
  1004. self.config.save_pretrained(save_directory)
  1005. if self.can_generate():
  1006. self.generation_config.save_pretrained(save_directory)
  1007. # save model
  1008. weights_name = SAFE_WEIGHTS_NAME if safe_serialization else FLAX_WEIGHTS_NAME
  1009. output_model_file = os.path.join(save_directory, weights_name)
  1010. shards, index = flax_shard_checkpoint(params if params is not None else self.params, max_shard_size)
  1011. # Clean the folder from a previous save
  1012. for filename in os.listdir(save_directory):
  1013. full_filename = os.path.join(save_directory, filename)
  1014. weights_no_suffix = weights_name.replace(".bin", "").replace(".safetensors", "")
  1015. if (
  1016. filename.startswith(weights_no_suffix)
  1017. and os.path.isfile(full_filename)
  1018. and filename not in shards.keys()
  1019. ):
  1020. os.remove(full_filename)
  1021. if index is None:
  1022. if safe_serialization:
  1023. params = params if params is not None else self.params
  1024. flat_dict = flatten_dict(params, sep=".")
  1025. safe_save_file(flat_dict, output_model_file, metadata={"format": "flax"})
  1026. else:
  1027. with open(output_model_file, "wb") as f:
  1028. params = params if params is not None else self.params
  1029. model_bytes = to_bytes(params)
  1030. f.write(model_bytes)
  1031. else:
  1032. save_index_file = os.path.join(save_directory, FLAX_WEIGHTS_INDEX_NAME)
  1033. # Save the index as well
  1034. with open(save_index_file, "w", encoding="utf-8") as f:
  1035. content = json.dumps(index, indent=2, sort_keys=True) + "\n"
  1036. f.write(content)
  1037. logger.info(
  1038. f"The model is bigger than the maximum size per checkpoint ({max_shard_size}) and is going to be "
  1039. f"split in {len(shards)} checkpoint shards. You can find where each parameters has been saved in the "
  1040. f"index located at {save_index_file}."
  1041. )
  1042. for shard_file, shard in shards.items():
  1043. # the shard item are unflattened, to save them we need to flatten them again
  1044. with open(os.path.join(save_directory, shard_file), mode="wb") as f:
  1045. params = unflatten_dict(shard, sep="/")
  1046. shard_bytes = to_bytes(params)
  1047. f.write(shard_bytes)
  1048. logger.info(f"Model weights saved in {output_model_file}")
  1049. if push_to_hub:
  1050. self._upload_modified_files(
  1051. save_directory,
  1052. repo_id,
  1053. files_timestamps,
  1054. commit_message=commit_message,
  1055. token=token,
  1056. )
  1057. @classmethod
  1058. def register_for_auto_class(cls, auto_class="FlaxAutoModel"):
  1059. """
  1060. Register this class with a given auto class. This should only be used for custom models as the ones in the
  1061. library are already mapped with an auto class.
  1062. <Tip warning={true}>
  1063. This API is experimental and may have some slight breaking changes in the next releases.
  1064. </Tip>
  1065. Args:
  1066. auto_class (`str` or `type`, *optional*, defaults to `"FlaxAutoModel"`):
  1067. The auto class to register this new model with.
  1068. """
  1069. if not isinstance(auto_class, str):
  1070. auto_class = auto_class.__name__
  1071. import transformers.models.auto as auto_module
  1072. if not hasattr(auto_module, auto_class):
  1073. raise ValueError(f"{auto_class} is not a valid auto class.")
  1074. cls._auto_class = auto_class
  1075. # To update the docstring, we need to copy the method, otherwise we change the original docstring.
  1076. FlaxPreTrainedModel.push_to_hub = copy_func(FlaxPreTrainedModel.push_to_hub)
  1077. if FlaxPreTrainedModel.push_to_hub.__doc__ is not None:
  1078. FlaxPreTrainedModel.push_to_hub.__doc__ = FlaxPreTrainedModel.push_to_hub.__doc__.format(
  1079. object="model", object_class="FlaxAutoModel", object_files="model checkpoint"
  1080. )
  1081. def overwrite_call_docstring(model_class, docstring):
  1082. # copy __call__ function to be sure docstring is changed only for this function
  1083. model_class.__call__ = copy_func(model_class.__call__)
  1084. # delete existing docstring
  1085. model_class.__call__.__doc__ = None
  1086. # set correct docstring
  1087. model_class.__call__ = add_start_docstrings_to_model_forward(docstring)(model_class.__call__)
  1088. def append_call_sample_docstring(
  1089. model_class, checkpoint, output_type, config_class, mask=None, revision=None, real_checkpoint=None
  1090. ):
  1091. model_class.__call__ = copy_func(model_class.__call__)
  1092. model_class.__call__ = add_code_sample_docstrings(
  1093. checkpoint=checkpoint,
  1094. output_type=output_type,
  1095. config_class=config_class,
  1096. model_cls=model_class.__name__,
  1097. revision=revision,
  1098. real_checkpoint=real_checkpoint,
  1099. )(model_class.__call__)
  1100. def append_replace_return_docstrings(model_class, output_type, config_class):
  1101. model_class.__call__ = copy_func(model_class.__call__)
  1102. model_class.__call__ = replace_return_docstrings(
  1103. output_type=output_type,
  1104. config_class=config_class,
  1105. )(model_class.__call__)