modeling_flax_opt.py 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799
  1. # coding=utf-8
  2. # Copyright 2022 The Fairseq Authors and The Google Flax Team Authors And The HuggingFace Inc. team. All rights reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """Flax OPT model."""
  16. from functools import partial
  17. from typing import Optional, Tuple
  18. import flax.linen as nn
  19. import jax
  20. import jax.numpy as jnp
  21. from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
  22. from flax.linen import combine_masks, make_causal_mask
  23. from flax.linen.attention import dot_product_attention_weights
  24. from flax.traverse_util import flatten_dict, unflatten_dict
  25. from jax import lax
  26. from jax.random import PRNGKey
  27. from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxMaskedLMOutput
  28. from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring
  29. from ...utils import add_start_docstrings, logging
  30. from .configuration_opt import OPTConfig
  31. logger = logging.get_logger(__name__)
  32. _CHECKPOINT_FOR_DOC = "facebook/opt-350m"
  33. _CONFIG_FOR_DOC = "OPTConfig"
  34. OPT_START_DOCSTRING = r"""
  35. This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
  36. library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
  37. etc.)
  38. This model is also a Flax Linen
  39. [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a
  40. regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.
  41. Finally, this model supports inherent JAX features such as:
  42. - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
  43. - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
  44. - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
  45. - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
  46. Parameters:
  47. config ([`OPTConfig`]): Model configuration class with all the parameters of the model.
  48. Initializing with a config file does not load the weights associated with the model, only the
  49. configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
  50. dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
  51. The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
  52. `jax.numpy.bfloat16` (on TPUs).
  53. This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
  54. specified all the computation will be performed with the given `dtype`.
  55. **Note that this only specifies the dtype of the computation and does not influence the dtype of model
  56. parameters.**
  57. If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
  58. [`~FlaxPreTrainedModel.to_bf16`].
  59. """
  60. OPT_INPUTS_DOCSTRING = r"""
  61. Args:
  62. input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
  63. Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
  64. it.
  65. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
  66. [`PreTrainedTokenizer.__call__`] for details.
  67. [What are input IDs?](../glossary#input-ids)
  68. attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
  69. Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
  70. - 1 for tokens that are **not masked**,
  71. - 0 for tokens that are **masked**.
  72. [What are attention masks?](../glossary#attention-mask)
  73. position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
  74. Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
  75. config.max_position_embeddings - 1]`.
  76. output_attentions (`bool`, *optional*):
  77. Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
  78. tensors for more detail.
  79. output_hidden_states (`bool`, *optional*):
  80. Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
  81. more detail.
  82. return_dict (`bool`, *optional*):
  83. Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
  84. """
  85. # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartAttention with Bart->OPT
  86. class FlaxOPTAttention(nn.Module):
  87. config: OPTConfig
  88. embed_dim: int
  89. num_heads: int
  90. dropout: float = 0.0
  91. causal: bool = False
  92. bias: bool = True
  93. dtype: jnp.dtype = jnp.float32 # the dtype of the computation
  94. def setup(self) -> None:
  95. self.head_dim = self.embed_dim // self.num_heads
  96. if self.head_dim * self.num_heads != self.embed_dim:
  97. raise ValueError(
  98. f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
  99. f" and `num_heads`: {self.num_heads})."
  100. )
  101. dense = partial(
  102. nn.Dense,
  103. self.embed_dim,
  104. use_bias=self.bias,
  105. dtype=self.dtype,
  106. kernel_init=jax.nn.initializers.normal(self.config.init_std),
  107. )
  108. self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
  109. self.out_proj = dense()
  110. self.dropout_layer = nn.Dropout(rate=self.dropout)
  111. if self.causal:
  112. self.causal_mask = make_causal_mask(
  113. jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool"
  114. )
  115. def _split_heads(self, hidden_states):
  116. return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim))
  117. def _merge_heads(self, hidden_states):
  118. return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
  119. @nn.compact
  120. def _concatenate_to_cache(self, key, value, query, attention_mask):
  121. """
  122. This function takes projected key, value states from a single input token and concatenates the states to cached
  123. states from previous steps. This function is slighly adapted from the official Flax repository:
  124. https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
  125. """
  126. # detect if we're initializing by absence of existing cache data.
  127. is_initialized = self.has_variable("cache", "cached_key")
  128. cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
  129. cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
  130. cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
  131. if is_initialized:
  132. *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
  133. # update key, value caches with our new 1d spatial slices
  134. cur_index = cache_index.value
  135. indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
  136. key = lax.dynamic_update_slice(cached_key.value, key, indices)
  137. value = lax.dynamic_update_slice(cached_value.value, value, indices)
  138. cached_key.value = key
  139. cached_value.value = value
  140. num_updated_cache_vectors = query.shape[1]
  141. cache_index.value = cache_index.value + num_updated_cache_vectors
  142. # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
  143. pad_mask = jnp.broadcast_to(
  144. jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
  145. tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
  146. )
  147. attention_mask = combine_masks(pad_mask, attention_mask)
  148. return key, value, attention_mask
  149. def __call__(
  150. self,
  151. hidden_states: jnp.ndarray,
  152. key_value_states: Optional[jnp.ndarray] = None,
  153. attention_mask: Optional[jnp.ndarray] = None,
  154. init_cache: bool = False,
  155. deterministic: bool = True,
  156. ) -> Tuple[jnp.ndarray]:
  157. """Input shape: Batch x Time x Channel"""
  158. # if key_value_states are provided this layer is used as a cross-attention layer
  159. # for the decoder
  160. is_cross_attention = key_value_states is not None
  161. batch_size = hidden_states.shape[0]
  162. # get query proj
  163. query_states = self.q_proj(hidden_states)
  164. # get key, value proj
  165. if is_cross_attention:
  166. # cross_attentions
  167. key_states = self.k_proj(key_value_states)
  168. value_states = self.v_proj(key_value_states)
  169. else:
  170. # self_attention
  171. key_states = self.k_proj(hidden_states)
  172. value_states = self.v_proj(hidden_states)
  173. query_states = self._split_heads(query_states)
  174. key_states = self._split_heads(key_states)
  175. value_states = self._split_heads(value_states)
  176. # handle cache prepare causal attention mask
  177. if self.causal:
  178. query_length, key_length = query_states.shape[1], key_states.shape[1]
  179. if self.has_variable("cache", "cached_key"):
  180. mask_shift = self.variables["cache"]["cache_index"]
  181. max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
  182. causal_mask = lax.dynamic_slice(
  183. self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
  184. )
  185. else:
  186. causal_mask = self.causal_mask[:, :, :query_length, :key_length]
  187. causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
  188. # combine masks if needed
  189. if attention_mask is not None and self.causal:
  190. attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
  191. attention_mask = combine_masks(attention_mask, causal_mask)
  192. elif self.causal:
  193. attention_mask = causal_mask
  194. elif attention_mask is not None:
  195. attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
  196. # During fast autoregressive decoding, we feed one position at a time,
  197. # and cache the keys and values step by step.
  198. if self.causal and (self.has_variable("cache", "cached_key") or init_cache):
  199. key_states, value_states, attention_mask = self._concatenate_to_cache(
  200. key_states, value_states, query_states, attention_mask
  201. )
  202. # Convert the boolean attention mask to an attention bias.
  203. if attention_mask is not None:
  204. # attention mask in the form of attention bias
  205. attention_bias = lax.select(
  206. attention_mask > 0,
  207. jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
  208. jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
  209. )
  210. else:
  211. attention_bias = None
  212. dropout_rng = None
  213. if not deterministic and self.dropout > 0.0:
  214. dropout_rng = self.make_rng("dropout")
  215. attn_weights = dot_product_attention_weights(
  216. query_states,
  217. key_states,
  218. bias=attention_bias,
  219. dropout_rng=dropout_rng,
  220. dropout_rate=self.dropout,
  221. broadcast_dropout=True,
  222. deterministic=deterministic,
  223. dtype=self.dtype,
  224. precision=None,
  225. )
  226. attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
  227. attn_output = self._merge_heads(attn_output)
  228. attn_output = self.out_proj(attn_output)
  229. return attn_output, attn_weights
  230. class FlaxOPTDecoderLayer(nn.Module):
  231. config: OPTConfig
  232. dtype: jnp.dtype = jnp.float32
  233. def setup(self) -> None:
  234. self.embed_dim = self.config.hidden_size
  235. self.self_attn = FlaxOPTAttention(
  236. config=self.config,
  237. embed_dim=self.embed_dim,
  238. num_heads=self.config.num_attention_heads,
  239. dropout=self.config.attention_dropout,
  240. causal=True,
  241. dtype=self.dtype,
  242. )
  243. self.do_layer_norm_before = self.config.do_layer_norm_before
  244. self.dropout_layer = nn.Dropout(rate=self.config.dropout)
  245. self.activation_fn = ACT2FN[self.config.activation_function]
  246. self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
  247. self.fc1 = nn.Dense(
  248. self.config.ffn_dim,
  249. dtype=self.dtype,
  250. kernel_init=jax.nn.initializers.normal(self.config.init_std),
  251. )
  252. self.fc2 = nn.Dense(
  253. self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)
  254. )
  255. self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
  256. def __call__(
  257. self,
  258. hidden_states: jnp.ndarray,
  259. attention_mask: jnp.ndarray,
  260. init_cache: bool = False,
  261. output_attentions: bool = True,
  262. deterministic: bool = True,
  263. ) -> Tuple[jnp.ndarray]:
  264. residual = hidden_states
  265. # 125m, 1.7B, ..., 175B applies layer norm BEFORE attention
  266. if self.do_layer_norm_before:
  267. hidden_states = self.self_attn_layer_norm(hidden_states)
  268. # Self Attention
  269. hidden_states, self_attn_weights = self.self_attn(
  270. hidden_states=hidden_states,
  271. attention_mask=attention_mask,
  272. init_cache=init_cache,
  273. deterministic=deterministic,
  274. )
  275. hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
  276. hidden_states = residual + hidden_states
  277. # 350m applies layer norm AFTER attention
  278. if not self.do_layer_norm_before:
  279. hidden_states = self.self_attn_layer_norm(hidden_states)
  280. # Fully Connected
  281. hidden_states_shape = hidden_states.shape
  282. hidden_states = hidden_states.reshape(-1, hidden_states.shape[-1])
  283. residual = hidden_states
  284. # 125m, 1.7B, ..., 175B applies layer norm BEFORE attention
  285. if self.do_layer_norm_before:
  286. hidden_states = self.final_layer_norm(hidden_states)
  287. hidden_states = self.fc1(hidden_states)
  288. hidden_states = self.activation_fn(hidden_states)
  289. hidden_states = self.fc2(hidden_states)
  290. hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
  291. hidden_states = (residual + hidden_states).reshape(hidden_states_shape)
  292. # 350m applies layer norm AFTER attention
  293. if not self.do_layer_norm_before:
  294. hidden_states = self.final_layer_norm(hidden_states)
  295. outputs = (hidden_states,)
  296. if output_attentions:
  297. outputs += (self_attn_weights,)
  298. return outputs
  299. class FlaxOPTDecoderLayerCollection(nn.Module):
  300. config: OPTConfig
  301. dtype: jnp.dtype = jnp.float32 # the dtype of the computation
  302. def setup(self):
  303. self.layers = [
  304. FlaxOPTDecoderLayer(self.config, name=str(i), dtype=self.dtype)
  305. for i in range(self.config.num_hidden_layers)
  306. ]
  307. self.layerdrop = self.config.layerdrop
  308. def __call__(
  309. self,
  310. hidden_states,
  311. attention_mask,
  312. deterministic: bool = True,
  313. init_cache: bool = False,
  314. output_attentions: bool = False,
  315. output_hidden_states: bool = False,
  316. ):
  317. # decoder layers
  318. all_hidden_states = () if output_hidden_states else None
  319. all_self_attns = () if output_attentions else None
  320. for decoder_layer in self.layers:
  321. if output_hidden_states:
  322. all_hidden_states += (hidden_states,)
  323. layer_outputs = decoder_layer(
  324. hidden_states,
  325. attention_mask=attention_mask,
  326. init_cache=init_cache,
  327. output_attentions=output_attentions,
  328. deterministic=deterministic,
  329. )
  330. hidden_states = layer_outputs[0]
  331. if output_attentions:
  332. all_self_attns += (layer_outputs[1],)
  333. outputs = [hidden_states, all_hidden_states, all_self_attns]
  334. return outputs
  335. class FlaxOPTLearnedPositionalEmbedding(nn.Embed):
  336. """
  337. This module learns positional embeddings up to a fixed maximum size.
  338. """
  339. def setup(self):
  340. self.offset = 2
  341. self.embedding = self.param(
  342. "embedding", self.embedding_init, (self.num_embeddings + self.offset, self.features), self.param_dtype
  343. )
  344. def __call__(self, positions):
  345. """`input_ids_shape` is expected to be [bsz x seqlen]."""
  346. return super().__call__(positions + self.offset)
  347. class FlaxOPTDecoder(nn.Module):
  348. config: OPTConfig
  349. dtype: jnp.dtype = jnp.float32 # the dtype of the computation
  350. offset: int = 2
  351. def setup(self):
  352. self.dropout_layer = nn.Dropout(rate=self.config.dropout)
  353. embed_dim = self.config.hidden_size
  354. self.padding_idx = self.config.pad_token_id
  355. self.max_target_positions = self.config.max_position_embeddings
  356. self.embed_tokens = nn.Embed(
  357. self.config.vocab_size,
  358. self.config.word_embed_proj_dim,
  359. embedding_init=jax.nn.initializers.normal(self.config.init_std),
  360. dtype=self.dtype,
  361. )
  362. self.embed_positions = FlaxOPTLearnedPositionalEmbedding(
  363. self.config.max_position_embeddings,
  364. embed_dim,
  365. embedding_init=jax.nn.initializers.normal(self.config.init_std),
  366. dtype=self.dtype,
  367. )
  368. if self.config.word_embed_proj_dim != self.config.hidden_size:
  369. self.project_in = nn.Dense(self.config.hidden_size, use_bias=False)
  370. self.project_out = nn.Dense(self.config.word_embed_proj_dim, use_bias=False)
  371. else:
  372. self.project_in = None
  373. self.project_out = None
  374. # Note that the only purpose of `config._remove_final_layer_norm` is to keep backward compatibility
  375. # with checkpoints that have been fine-tuned before transformers v4.20.1
  376. # see https://github.com/facebookresearch/metaseq/pull/164
  377. if self.config.do_layer_norm_before and not self.config._remove_final_layer_norm:
  378. self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
  379. else:
  380. self.final_layer_norm = None
  381. self.layers = FlaxOPTDecoderLayerCollection(self.config, self.dtype)
  382. def __call__(
  383. self,
  384. input_ids,
  385. attention_mask,
  386. position_ids,
  387. init_cache: bool = False,
  388. output_attentions: bool = False,
  389. output_hidden_states: bool = False,
  390. return_dict: bool = True,
  391. deterministic: bool = True,
  392. ):
  393. input_shape = input_ids.shape
  394. input_ids = input_ids.reshape(-1, input_shape[-1])
  395. inputs_embeds = self.embed_tokens(input_ids)
  396. if self.project_in is not None:
  397. inputs_embeds = self.project_in(inputs_embeds)
  398. positions = self.embed_positions(position_ids)
  399. hidden_states = inputs_embeds + positions
  400. hidden_state, all_hidden_states, attentions = self.layers(
  401. hidden_states,
  402. attention_mask,
  403. deterministic=deterministic,
  404. init_cache=init_cache,
  405. output_attentions=output_attentions,
  406. output_hidden_states=output_hidden_states,
  407. )
  408. if self.final_layer_norm is not None:
  409. hidden_state = self.final_layer_norm(hidden_state)
  410. if self.project_out is not None:
  411. hidden_state = self.project_out(hidden_state)
  412. if output_hidden_states:
  413. all_hidden_states += (hidden_state,)
  414. outputs = [hidden_state, all_hidden_states, attentions]
  415. if not return_dict:
  416. return tuple(v for v in outputs if v is not None)
  417. return FlaxBaseModelOutput(
  418. last_hidden_state=hidden_state,
  419. hidden_states=all_hidden_states,
  420. attentions=attentions,
  421. )
  422. class FlaxOPTPreTrainedModel(FlaxPreTrainedModel):
  423. config_class = OPTConfig
  424. base_model_prefix: str = "model"
  425. module_class: nn.Module = None
  426. def __init__(
  427. self,
  428. config: OPTConfig,
  429. input_shape: Tuple[int] = (1, 1),
  430. seed: int = 0,
  431. dtype: jnp.dtype = jnp.float32,
  432. _do_init: bool = True,
  433. **kwargs,
  434. ):
  435. module = self.module_class(config=config, dtype=dtype, **kwargs)
  436. super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
  437. def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
  438. # init input tensors
  439. input_ids = jnp.zeros(input_shape, dtype="i4")
  440. attention_mask = jnp.ones_like(input_ids)
  441. batch_size, sequence_length = input_ids.shape
  442. position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
  443. params_rng, dropout_rng = jax.random.split(rng)
  444. rngs = {"params": params_rng, "dropout": dropout_rng}
  445. module_init_outputs = self.module.init(
  446. rngs,
  447. input_ids,
  448. attention_mask,
  449. position_ids,
  450. return_dict=False,
  451. )
  452. random_params = module_init_outputs["params"]
  453. if params is not None:
  454. random_params = flatten_dict(unfreeze(random_params))
  455. params = flatten_dict(unfreeze(params))
  456. for missing_key in self._missing_keys:
  457. params[missing_key] = random_params[missing_key]
  458. self._missing_keys = set()
  459. return freeze(unflatten_dict(params))
  460. else:
  461. return random_params
  462. def init_cache(self, batch_size, max_length):
  463. r"""
  464. Args:
  465. batch_size (`int`):
  466. batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
  467. max_length (`int`):
  468. maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
  469. cache.
  470. """
  471. # init input variables to retrieve cache
  472. input_ids = jnp.ones((batch_size, max_length), dtype="i4")
  473. attention_mask = jnp.ones_like(input_ids, dtype="i4")
  474. position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
  475. init_variables = self.module.init(
  476. jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True
  477. )
  478. return unfreeze(init_variables["cache"])
  479. def __call__(
  480. self,
  481. input_ids: jnp.ndarray,
  482. attention_mask: Optional[jnp.ndarray] = None,
  483. position_ids: Optional[jnp.ndarray] = None,
  484. params: dict = None,
  485. past_key_values: dict = None,
  486. output_attentions: Optional[bool] = None,
  487. output_hidden_states: Optional[bool] = None,
  488. return_dict: Optional[bool] = None,
  489. dropout_rng: PRNGKey = None,
  490. deterministic: bool = True,
  491. ):
  492. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
  493. output_hidden_states = (
  494. output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
  495. )
  496. return_dict = return_dict if return_dict is not None else self.config.return_dict
  497. if attention_mask is None:
  498. attention_mask = jnp.ones_like(input_ids)
  499. if position_ids is None:
  500. position_ids = (attention_mask.cumsum(axis=1) * attention_mask) - 1
  501. # Handle any PRNG if needed
  502. rngs = {"dropout": dropout_rng} if dropout_rng is not None else {}
  503. inputs = {"params": params or self.params}
  504. # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed
  505. # down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be
  506. # changed by FlaxOPTAttention module
  507. if past_key_values:
  508. inputs["cache"] = past_key_values
  509. mutable = ["cache"]
  510. else:
  511. mutable = False
  512. outputs = self.module.apply(
  513. inputs,
  514. input_ids=jnp.array(input_ids, dtype="i4"),
  515. attention_mask=jnp.array(attention_mask, dtype="i4"),
  516. position_ids=jnp.array(position_ids, dtype="i4"),
  517. output_attentions=output_attentions,
  518. output_hidden_states=output_hidden_states,
  519. return_dict=return_dict,
  520. deterministic=deterministic,
  521. rngs=rngs,
  522. mutable=mutable,
  523. )
  524. # add updated cache to model output
  525. if past_key_values is not None and return_dict:
  526. outputs, past_key_values = outputs
  527. outputs["past_key_values"] = unfreeze(past_key_values["cache"])
  528. return outputs
  529. elif past_key_values is not None and not return_dict:
  530. outputs, past_key_values = outputs
  531. outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:]
  532. return outputs
  533. class FlaxOPTModule(nn.Module):
  534. config: OPTConfig
  535. dtype: jnp.dtype = jnp.float32 # the dtype of the computation
  536. def setup(self):
  537. self.decoder = FlaxOPTDecoder(self.config, dtype=self.dtype)
  538. def _get_decoder_module(self):
  539. return self.decoder
  540. def __call__(
  541. self,
  542. input_ids,
  543. attention_mask,
  544. position_ids,
  545. output_attentions: bool = False,
  546. output_hidden_states: bool = False,
  547. return_dict: bool = True,
  548. deterministic: bool = True,
  549. init_cache=False,
  550. ):
  551. decoder_outputs = self.decoder(
  552. input_ids=input_ids,
  553. attention_mask=attention_mask,
  554. position_ids=position_ids,
  555. output_attentions=output_attentions,
  556. output_hidden_states=output_hidden_states,
  557. return_dict=return_dict,
  558. deterministic=deterministic,
  559. init_cache=init_cache,
  560. )
  561. if not return_dict:
  562. return decoder_outputs
  563. return FlaxBaseModelOutput(
  564. last_hidden_state=decoder_outputs.last_hidden_state,
  565. hidden_states=decoder_outputs.hidden_states,
  566. attentions=decoder_outputs.attentions,
  567. )
  568. # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartModel with Bart->OPT
  569. class FlaxOPTModel(FlaxOPTPreTrainedModel):
  570. config: OPTConfig
  571. dtype: jnp.dtype = jnp.float32 # the dtype of the computation
  572. module_class = FlaxOPTModule
  573. append_call_sample_docstring(FlaxOPTModel, _CHECKPOINT_FOR_DOC, FlaxBaseModelOutput, _CONFIG_FOR_DOC)
  574. @add_start_docstrings(
  575. "The bare OPT Model transformer outputting raw hidden-states without any specific head on top.",
  576. OPT_START_DOCSTRING,
  577. )
  578. class FlaxOPTForCausalLMModule(nn.Module):
  579. config: OPTConfig
  580. dtype: jnp.dtype = jnp.float32
  581. def setup(self):
  582. self.model = FlaxOPTModule(config=self.config, dtype=self.dtype)
  583. self.lm_head = nn.Dense(
  584. self.config.vocab_size,
  585. use_bias=False,
  586. dtype=self.dtype,
  587. kernel_init=jax.nn.initializers.normal(self.config.init_std),
  588. )
  589. def __call__(
  590. self,
  591. input_ids,
  592. attention_mask,
  593. position_ids,
  594. init_cache: bool = False,
  595. output_attentions: bool = False,
  596. output_hidden_states: bool = False,
  597. return_dict: bool = True,
  598. deterministic: bool = True,
  599. ):
  600. outputs = self.model(
  601. input_ids,
  602. attention_mask,
  603. position_ids,
  604. init_cache=init_cache,
  605. output_attentions=output_attentions,
  606. output_hidden_states=output_hidden_states,
  607. return_dict=return_dict,
  608. deterministic=deterministic,
  609. )
  610. hidden_states = outputs[0]
  611. if self.config.tie_word_embeddings:
  612. shared_embedding = self.model.variables["params"]["decoder"]["embed_tokens"]["embedding"]
  613. lm_logits = self.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
  614. else:
  615. lm_logits = self.lm_head(hidden_states)
  616. if not return_dict:
  617. return (lm_logits,) + outputs[1:]
  618. return FlaxMaskedLMOutput(
  619. logits=lm_logits,
  620. hidden_states=outputs.hidden_states,
  621. attentions=outputs.attentions,
  622. )
  623. @add_start_docstrings(
  624. """
  625. OPT Model with a language modeling head on top (linear layer with weights tied to the input embeddings) e.g for
  626. autoregressive tasks.
  627. """,
  628. OPT_START_DOCSTRING,
  629. )
  630. class FlaxOPTForCausalLM(FlaxOPTPreTrainedModel):
  631. module_class = FlaxOPTForCausalLMModule
  632. def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None):
  633. # initializing the cache
  634. batch_size, seq_length = input_ids.shape
  635. past_key_values = self.init_cache(batch_size, max_length)
  636. # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
  637. # But since the decoder uses a causal mask, those positions are masked anyway.
  638. # Thus, we can create a single static attention_mask here, which is more efficient for compilation
  639. extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
  640. if attention_mask is not None:
  641. position_ids = attention_mask.cumsum(axis=1) - 1
  642. extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0))
  643. else:
  644. position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
  645. return {
  646. "past_key_values": past_key_values,
  647. "attention_mask": extended_attention_mask,
  648. "position_ids": position_ids,
  649. }
  650. def update_inputs_for_generation(self, model_outputs, model_kwargs):
  651. model_kwargs["past_key_values"] = model_outputs.past_key_values
  652. model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1
  653. return model_kwargs
  654. append_call_sample_docstring(
  655. FlaxOPTForCausalLM,
  656. _CHECKPOINT_FOR_DOC,
  657. FlaxBaseModelOutput,
  658. _CONFIG_FOR_DOC,
  659. )