| 12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094 |
- # coding=utf-8
- # Copyright 2022 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- """TF 2.0 OPT model."""
- from __future__ import annotations
- from typing import Optional, Tuple, Union
- import numpy as np
- import tensorflow as tf
- from ...activations_tf import get_tf_activation
- from ...modeling_tf_outputs import TFBaseModelOutputWithPast, TFCausalLMOutputWithPast
- # Public API
- from ...modeling_tf_utils import (
- TFCausalLanguageModelingLoss,
- TFModelInputType,
- TFPreTrainedModel,
- TFSharedEmbeddings,
- keras,
- keras_serializable,
- unpack_inputs,
- )
- from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
- from ...utils import (
- add_code_sample_docstrings,
- add_start_docstrings,
- add_start_docstrings_to_model_forward,
- logging,
- replace_return_docstrings,
- )
- from .configuration_opt import OPTConfig
- logger = logging.get_logger(__name__)
- _CHECKPOINT_FOR_DOC = "facebook/opt-350m"
- _CONFIG_FOR_DOC = "OPTConfig"
- # Base model docstring
- _EXPECTED_OUTPUT_SHAPE = [1, 8, 1024]
- # Causal LM output
- _CAUSAL_LM_EXPECTED_OUTPUT = (
- "Hey, are you conscious? Can you talk to me?\nI'm not conscious. I'm just a little bit of a weirdo."
- )
- LARGE_NEGATIVE = -1e8
- def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0):
- """
- Make causal mask used for bi-directional self-attention.
- """
- bsz = input_ids_shape[0]
- tgt_len = input_ids_shape[1]
- # We need triu with k = 1 but TF expects known compile-time dims for that, so we hack around it
- mask = tf.fill((tgt_len, tgt_len), tf.cast(LARGE_NEGATIVE, tf.float32))
- mask = tf.linalg.band_part(mask, 0, -1) - tf.linalg.band_part(mask, 0, 0)
- if past_key_values_length > 0:
- mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1)
- return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1))
- # Copied from transformers.models.bart.modeling_tf_bart._expand_mask
- def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None):
- """
- Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
- """
- src_len = shape_list(mask)[1]
- tgt_len = tgt_len if tgt_len is not None else src_len
- one_cst = tf.constant(1.0)
- mask = tf.cast(mask, dtype=one_cst.dtype)
- expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1))
- return (one_cst - expanded_mask) * LARGE_NEGATIVE
- class TFOPTLearnedPositionalEmbedding(keras.layers.Embedding):
- """
- This module learns positional embeddings up to a fixed maximum size.
- """
- def __init__(self, num_embeddings: int, embedding_dim: int, **kwargs):
- # OPT is set up so that if padding_idx is specified then offset the embedding ids by 2
- # and adjust num_embeddings appropriately. Other models don't have this hack
- self.offset = 2
- super().__init__(num_embeddings + self.offset, embedding_dim, **kwargs)
- def call(self, attention_mask, past_key_values_length: int = 0):
- """`input_ids_shape` is expected to be [bsz x seqlen]."""
- attention_mask = tf.cast(attention_mask, tf.int64)
- # create positions depending on attention_mask
- positions = tf.math.cumsum(attention_mask, axis=1) * attention_mask - 1
- # cut positions if `past_key_values_length` is > 0
- positions = positions[:, past_key_values_length:]
- return super().call(positions + self.offset)
- # Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with Bart->OPT
- class TFOPTAttention(keras.layers.Layer):
- """Multi-headed attention from "Attention Is All You Need"""
- def __init__(
- self,
- embed_dim: int,
- num_heads: int,
- dropout: float = 0.0,
- is_decoder: bool = False,
- bias: bool = True,
- **kwargs,
- ):
- super().__init__(**kwargs)
- self.embed_dim = embed_dim
- self.num_heads = num_heads
- self.dropout = keras.layers.Dropout(dropout)
- self.head_dim = embed_dim // num_heads
- if (self.head_dim * num_heads) != self.embed_dim:
- raise ValueError(
- f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
- f" and `num_heads`: {num_heads})."
- )
- self.scaling = self.head_dim**-0.5
- self.is_decoder = is_decoder
- self.k_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj")
- self.q_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj")
- self.v_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj")
- self.out_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj")
- def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int):
- return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3))
- def call(
- self,
- hidden_states: tf.Tensor,
- key_value_states: tf.Tensor | None = None,
- past_key_value: Tuple[Tuple[tf.Tensor]] | None = None,
- attention_mask: tf.Tensor | None = None,
- layer_head_mask: tf.Tensor | None = None,
- training: Optional[bool] = False,
- ) -> Tuple[tf.Tensor, tf.Tensor | None]:
- """Input shape: Batch x Time x Channel"""
- # if key_value_states are provided this layer is used as a cross-attention layer
- # for the decoder
- is_cross_attention = key_value_states is not None
- bsz, tgt_len, embed_dim = shape_list(hidden_states)
- # get query proj
- query_states = self.q_proj(hidden_states) * self.scaling
- # get key, value proj
- if is_cross_attention and past_key_value is not None:
- # reuse k,v, cross_attentions
- key_states = past_key_value[0]
- value_states = past_key_value[1]
- elif is_cross_attention:
- # cross_attentions
- key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
- value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
- elif past_key_value is not None:
- # reuse k, v, self_attention
- key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
- value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
- key_states = tf.concat([past_key_value[0], key_states], axis=2)
- value_states = tf.concat([past_key_value[1], value_states], axis=2)
- else:
- # self_attention
- key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
- value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
- if self.is_decoder:
- # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
- # Further calls to cross_attention layer can then reuse all cross-attention
- # key/value_states (first "if" case)
- # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
- # all previous decoder key/value_states. Further calls to uni-directional self-attention
- # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
- # if encoder bi-directional self-attention `past_key_value` is always `None`
- past_key_value = (key_states, value_states)
- proj_shape = (bsz * self.num_heads, -1, self.head_dim)
- query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape)
- key_states = tf.reshape(key_states, proj_shape)
- value_states = tf.reshape(value_states, proj_shape)
- src_len = shape_list(key_states)[1]
- attn_weights = tf.matmul(query_states, key_states, transpose_b=True)
- tf.debugging.assert_equal(
- shape_list(attn_weights),
- [bsz * self.num_heads, tgt_len, src_len],
- message=(
- f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
- f" {shape_list(attn_weights)}"
- ),
- )
- if attention_mask is not None:
- tf.debugging.assert_equal(
- shape_list(attention_mask),
- [bsz, 1, tgt_len, src_len],
- message=(
- f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
- f" {shape_list(attention_mask)}"
- ),
- )
- attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype)
- attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask
- attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
- attn_weights = stable_softmax(attn_weights, axis=-1)
- if layer_head_mask is not None:
- tf.debugging.assert_equal(
- shape_list(layer_head_mask),
- [self.num_heads],
- message=(
- f"Head mask for a single layer should be of size {(self.num_heads)}, but is"
- f" {shape_list(layer_head_mask)}"
- ),
- )
- attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape(
- attn_weights, (bsz, self.num_heads, tgt_len, src_len)
- )
- attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
- attn_probs = self.dropout(attn_weights, training=training)
- attn_output = tf.matmul(attn_probs, value_states)
- tf.debugging.assert_equal(
- shape_list(attn_output),
- [bsz * self.num_heads, tgt_len, self.head_dim],
- message=(
- f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
- f" {shape_list(attn_output)}"
- ),
- )
- attn_output = tf.transpose(
- tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3)
- )
- attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim))
- attn_output = self.out_proj(attn_output)
- attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len))
- return attn_output, attn_weights, past_key_value
- def build(self, input_shape=None):
- if self.built:
- return
- self.built = True
- if getattr(self, "k_proj", None) is not None:
- with tf.name_scope(self.k_proj.name):
- self.k_proj.build([None, None, self.embed_dim])
- if getattr(self, "q_proj", None) is not None:
- with tf.name_scope(self.q_proj.name):
- self.q_proj.build([None, None, self.embed_dim])
- if getattr(self, "v_proj", None) is not None:
- with tf.name_scope(self.v_proj.name):
- self.v_proj.build([None, None, self.embed_dim])
- if getattr(self, "out_proj", None) is not None:
- with tf.name_scope(self.out_proj.name):
- self.out_proj.build([None, None, self.embed_dim])
- class TFOPTDecoderLayer(keras.layers.Layer):
- def __init__(self, config: OPTConfig, **kwargs):
- super().__init__(**kwargs)
- self.do_layer_norm_before = config.do_layer_norm_before
- self.embed_dim = config.hidden_size
- self.self_attn = TFOPTAttention(
- embed_dim=self.embed_dim,
- num_heads=config.num_attention_heads,
- dropout=config.attention_dropout,
- name="self_attn",
- is_decoder=True,
- )
- self.dropout = keras.layers.Dropout(config.dropout)
- self.activation_fn = get_tf_activation(config.activation_function)
- self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
- self.fc1 = keras.layers.Dense(config.ffn_dim, name="fc1")
- self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2")
- self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
- self.config = config
- def call(
- self,
- hidden_states: tf.Tensor,
- attention_mask: np.ndarray | tf.Tensor | None = None,
- layer_head_mask: tf.Tensor | None = None,
- past_key_value: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
- training: Optional[bool] = False,
- output_attentions: Optional[bool] = False,
- use_cache: Optional[bool] = False,
- ) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]:
- """
- Args:
- hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
- attention_mask (`tf.Tensor`, *optional*): attention mask of size
- `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
- layer_head_mask (`tf.Tensor`, *optional*): mask for attention heads in a given layer of size
- `(decoder_attention_heads,)`
- past_key_value (`Tuple(tf.Tensor)`, *optional*): cached past key and value projection states
- training (`bool`, *optional*, defaults to `False`):
- Whether or not to use the model in training mode (some modules like dropout modules have different
- behaviors between training and evaluation).
- """
- residual = hidden_states
- # 125m, 1.7B, ..., 175B applies layer norm BEFORE attention
- if self.do_layer_norm_before:
- hidden_states = self.self_attn_layer_norm(hidden_states)
- # Self Attention
- # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
- self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
- # add present self-attn cache to positions 1,2 of present_key_value tuple
- hidden_states, self_attn_weights, present_key_value = self.self_attn(
- hidden_states=hidden_states,
- past_key_value=self_attn_past_key_value,
- attention_mask=attention_mask,
- layer_head_mask=layer_head_mask,
- )
- hidden_states = self.dropout(hidden_states, training=training)
- hidden_states = residual + hidden_states
- # 350m applies layer norm AFTER attention
- if not self.do_layer_norm_before:
- hidden_states = self.self_attn_layer_norm(hidden_states)
- # Fully Connected
- residual = hidden_states
- # 125m, 1.7B, ..., 175B applies layer norm BEFORE attention
- if self.do_layer_norm_before:
- hidden_states = self.final_layer_norm(hidden_states)
- hidden_states = self.fc1(hidden_states)
- hidden_states = self.activation_fn(hidden_states)
- hidden_states = self.fc2(hidden_states)
- hidden_states = self.dropout(hidden_states, training=training)
- hidden_states = residual + hidden_states
- # 350m applies layer norm AFTER attention
- if not self.do_layer_norm_before:
- hidden_states = self.final_layer_norm(hidden_states)
- return (hidden_states, self_attn_weights, present_key_value)
- def build(self, input_shape=None):
- if self.built:
- return
- self.built = True
- if getattr(self, "self_attn", None) is not None:
- with tf.name_scope(self.self_attn.name):
- self.self_attn.build(None)
- if getattr(self, "self_attn_layer_norm", None) is not None:
- with tf.name_scope(self.self_attn_layer_norm.name):
- self.self_attn_layer_norm.build([None, None, self.embed_dim])
- if getattr(self, "fc1", None) is not None:
- with tf.name_scope(self.fc1.name):
- self.fc1.build([None, None, self.embed_dim])
- if getattr(self, "fc2", None) is not None:
- with tf.name_scope(self.fc2.name):
- self.fc2.build([None, None, self.config.ffn_dim])
- if getattr(self, "final_layer_norm", None) is not None:
- with tf.name_scope(self.final_layer_norm.name):
- self.final_layer_norm.build([None, None, self.embed_dim])
- OPT_START_DOCSTRING = r"""
- This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
- library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
- etc.)
- This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
- as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
- behavior.
- <Tip>
- TensorFlow models and layers in `transformers` accept two formats as input:
- - having all inputs as keyword arguments (like PyTorch models), or
- - having all inputs as a list, tuple or dict in the first positional argument.
- The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
- and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
- pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
- format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
- the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
- positional argument:
- - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
- - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
- `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
- - a dictionary with one or several input Tensors associated to the input names given in the docstring:
- `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
- Note that when creating models and layers with
- [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
- about any of this, as you can just pass inputs like you would to any other Python function!
- </Tip>
- Args:
- config ([`OPTConfig`]): Model configuration class with all the parameters of the model.
- Initializing with a config file does not load the weights associated with the model, only the
- configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
- """
- @add_start_docstrings(
- "The bare OPT Model outputting raw hidden-states without any specific head on top.",
- OPT_START_DOCSTRING,
- )
- class TFOPTPreTrainedModel(TFPreTrainedModel):
- """
- TFOPT Pretrained Model that inheritates from transformers.TFPreTrainedModel
- Args:
- config: OPTConfig
- """
- config_class = OPTConfig
- base_model_prefix = "model"
- OPT_INPUTS_DOCSTRING = r"""
- Args:
- input_ids (`tf.Tensor` of shape `({0})`):
- Indices of input sequence tokens in the vocabulary.
- Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
- [`PreTrainedTokenizer.__call__`] for details.
- [What are input IDs?](../glossary#input-ids)
- attention_mask (`tf.Tensor` of shape `({0})`, *optional*):
- Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- - 1 for tokens that are **not masked**,
- - 0 for tokens that are **masked**.
- [What are attention masks?](../glossary#attention-mask)
- head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
- Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
- - 1 indicates the head is **not masked**,
- - 0 indicates the head is **masked**.
- past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
- contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
- If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
- don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
- `decoder_input_ids` of shape `(batch_size, sequence_length)`.
- use_cache (`bool`, *optional*, defaults to `True`):
- If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
- `past_key_values`). Set to `False` during training, `True` during generation
- output_attentions (`bool`, *optional*):
- Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
- tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
- config will be used instead.
- output_hidden_states (`bool`, *optional*):
- Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
- more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
- used instead.
- return_dict (`bool`, *optional*):
- Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
- eager mode, in graph mode the value will always be set to True.
- training (`bool`, *optional*, defaults to `False`):
- Whether or not to use the model in training mode (some modules like dropout modules have different
- behaviors between training and evaluation).
- """
- @keras_serializable
- class TFOPTDecoder(keras.layers.Layer):
- config_class = OPTConfig
- def __init__(self, config: OPTConfig, **kwargs):
- super().__init__(**kwargs)
- self.config = config
- self.padding_idx = config.pad_token_id
- self.layerdrop = config.layerdrop
- num_embeddings = config.max_position_embeddings
- self.embed_tokens = TFSharedEmbeddings(
- config.vocab_size, config.word_embed_proj_dim, config.pad_token_id, name="embed_tokens"
- )
- self.embed_positions = TFOPTLearnedPositionalEmbedding(
- num_embeddings,
- config.hidden_size,
- name="embed_positions",
- )
- # Note that the only purpose of `config._remove_final_layer_norm` is to keep backward compatibility
- # with checkpoints that have been fine-tuned before transformers v4.20.1
- # see https://github.com/facebookresearch/metaseq/pull/164
- if config.do_layer_norm_before and not config._remove_final_layer_norm:
- self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
- else:
- self.final_layer_norm = None
- if config.word_embed_proj_dim != config.hidden_size:
- self.project_out = keras.layers.Dense(config.word_embed_proj_dim, name="project_out", use_bias=False)
- self.project_in = keras.layers.Dense(config.hidden_size, name="project_in", use_bias=False)
- else:
- self.project_in = None
- self.project_out = None
- self.layers = [TFOPTDecoderLayer(config, name=f"layers.{i}") for i in range(config.num_hidden_layers)]
- self.dropout = keras.layers.Dropout(config.dropout)
- def get_embed_tokens(self):
- return self.embed_tokens
- def set_embed_tokens(self, embed_tokens):
- self.embed_tokens = embed_tokens
- def set_input_embeddings(self, new_embeddings):
- self.embed_tokens.vocab_size = new_embeddings.shape[0]
- self.embed_tokens.weight = new_embeddings
- def get_input_embeddings(self):
- return self.embed_tokens
- def _prepare_decoder_attention_mask(self, attention_mask, input_shape, past_key_values_length):
- # create causal mask
- # # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
- _, seq_length = input_shape
- tf.debugging.assert_equal(
- seq_length + past_key_values_length,
- shape_list(attention_mask)[1],
- message="Attention mask shape should be (batch_size, seq_length + past_key_values_length)"
- f" but is {shape_list(attention_mask)[1]} with input_ids shape {input_shape} and past length"
- f" {past_key_values_length}.",
- )
- expanded_attn_mask = _expand_mask(attention_mask, tgt_len=input_shape[-1])
- if seq_length > 1:
- combined_attention_mask = (
- _make_causal_mask(input_shape, past_key_values_length=past_key_values_length) + expanded_attn_mask
- )
- else:
- combined_attention_mask = expanded_attn_mask
- return combined_attention_mask
- @unpack_inputs
- def call(
- self,
- input_ids: TFModelInputType | None = None,
- inputs_embeds: np.ndarray | tf.Tensor | None = None,
- attention_mask: np.ndarray | tf.Tensor | None = None,
- head_mask: np.ndarray | tf.Tensor | None = None,
- past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
- use_cache: Optional[bool] = None,
- output_attentions: Optional[bool] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- training: Optional[bool] = False,
- ) -> Union[TFBaseModelOutputWithPast, Tuple[tf.Tensor]]:
- r"""
- Args:
- input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
- Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
- provide it.
- Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
- [`PreTrainedTokenizer.__call__`] for details.
- [What are input IDs?](../glossary#input-ids)
- attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
- Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- - 1 for tokens that are **not masked**,
- - 0 for tokens that are **masked**.
- [What are attention masks?](../glossary#attention-mask)
- head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
- Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- - 1 indicates the head is **not masked**,
- - 0 indicates the head is **masked**.
- past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
- Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
- decoding.
- If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
- that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
- all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
- inputs_embeds (`tf.Tensor` of
- shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing
- `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more
- control over how to convert `input_ids` indices into associated vectors than the model's internal
- embedding lookup matrix.
- output_attentions (`bool`, *optional*):
- Whether or not to return the attentions tensors of all attention layers. See `attentions` under
- returned tensors for more detail.
- output_hidden_states (`bool`, *optional*):
- Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
- for more detail.
- return_dict (`bool`, *optional*):
- Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
- training (`bool`, *optional*, defaults to `False`):
- Whether or not to use the model in training mode (some modules like dropout modules have different
- behaviors between training and evaluation).
- """
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
- output_hidden_states = (
- output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
- )
- use_cache = use_cache if use_cache is not None else self.config.use_cache
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
- if input_ids is not None and inputs_embeds is not None:
- raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
- elif input_ids is not None:
- input_shape = shape_list(input_ids)
- elif inputs_embeds is not None:
- input_shape = shape_list(inputs_embeds)[:-1]
- else:
- raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
- past_key_values_length = shape_list(past_key_values[0][0])[2] if past_key_values is not None else 0
- if inputs_embeds is None:
- check_embeddings_within_bounds(input_ids, self.embed_tokens.vocab_size)
- inputs_embeds = self.embed_tokens(input_ids)
- if attention_mask is None:
- attention_mask = tf.ones((input_shape[0], input_shape[1] + past_key_values_length), dtype=tf.bool)
- else:
- tf.debugging.assert_equal(
- shape_list(attention_mask)[1],
- past_key_values_length + input_shape[1],
- message=(
- f"The provided attention mask has length {tf.shape(attention_mask)[1]}, but its length should be "
- f"{past_key_values_length + input_shape[1]} (sum of the lengths of current and past inputs)"
- ),
- )
- pos_embeds = self.embed_positions(attention_mask, past_key_values_length)
- attention_mask = self._prepare_decoder_attention_mask(attention_mask, input_shape, past_key_values_length)
- if self.project_in is not None:
- inputs_embeds = self.project_in(inputs_embeds)
- hidden_states = inputs_embeds + pos_embeds
- # decoder layers
- all_hidden_states = () if output_hidden_states else None
- all_self_attns = () if output_attentions else None
- present_key_values = () if use_cache else None
- # check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired
- for attn_mask_name, attn_mask in [("head_mask", head_mask)]:
- if attn_mask is not None:
- tf.debugging.assert_equal(
- shape_list(attn_mask)[0],
- len(self.layers),
- message=(
- f"The {attn_mask_name} should be specified for {len(self.layers)} layers, but it is for"
- f" {shape_list(attn_mask)[0]}."
- ),
- )
- for idx, decoder_layer in enumerate(self.layers):
- if output_hidden_states:
- all_hidden_states += (hidden_states,)
- past_key_value = past_key_values[idx] if past_key_values is not None else None
- hidden_states, layer_self_attn, present_key_value = decoder_layer(
- hidden_states,
- attention_mask=attention_mask,
- layer_head_mask=head_mask[idx] if head_mask is not None else None,
- past_key_value=past_key_value,
- )
- if use_cache:
- present_key_values += (present_key_value,)
- if output_attentions:
- all_self_attns += (layer_self_attn,)
- if self.final_layer_norm is not None:
- hidden_states = self.final_layer_norm(hidden_states)
- if self.project_out is not None:
- hidden_states = self.project_out(hidden_states)
- if output_hidden_states:
- all_hidden_states += (hidden_states,)
- if not return_dict:
- return tuple(
- v for v in [hidden_states, present_key_values, all_hidden_states, all_self_attns] if v is not None
- )
- else:
- return TFBaseModelOutputWithPast(
- last_hidden_state=hidden_states,
- past_key_values=present_key_values,
- hidden_states=all_hidden_states,
- attentions=all_self_attns,
- )
- def build(self, input_shape=None):
- if self.built:
- return
- self.built = True
- if getattr(self, "embed_tokens", None) is not None:
- with tf.name_scope(self.embed_tokens.name):
- self.embed_tokens.build(None)
- if getattr(self, "embed_positions", None) is not None:
- with tf.name_scope(self.embed_positions.name):
- self.embed_positions.build(None)
- if getattr(self, "final_layer_norm", None) is not None:
- with tf.name_scope(self.final_layer_norm.name):
- self.final_layer_norm.build([None, None, self.config.hidden_size])
- if getattr(self, "project_out", None) is not None:
- with tf.name_scope(self.project_out.name):
- self.project_out.build([None, None, self.config.hidden_size])
- if getattr(self, "project_in", None) is not None:
- with tf.name_scope(self.project_in.name):
- self.project_in.build([None, None, self.config.word_embed_proj_dim])
- if getattr(self, "layers", None) is not None:
- for layer in self.layers:
- with tf.name_scope(layer.name):
- layer.build(None)
- @keras_serializable
- class TFOPTMainLayer(keras.layers.Layer):
- config_class = OPTConfig
- def __init__(self, config: OPTConfig, **kwargs):
- super().__init__(**kwargs)
- self.config = config
- self.decoder = TFOPTDecoder(config, name="decoder")
- def get_input_embeddings(self):
- return self.decoder.embed_tokens
- def set_input_embeddings(self, new_embeddings):
- self.decoder.set_input_embeddings(new_embeddings)
- @unpack_inputs
- def call(
- self,
- input_ids: TFModelInputType | None = None,
- attention_mask: np.ndarray | tf.Tensor | None = None,
- head_mask: np.ndarray | tf.Tensor | None = None,
- past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
- inputs_embeds: np.ndarray | tf.Tensor | None = None,
- use_cache: Optional[bool] = None,
- output_attentions: Optional[bool] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- training: Optional[bool] = False,
- **kwargs,
- ) -> Union[TFBaseModelOutputWithPast, Tuple[tf.Tensor]]:
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
- output_hidden_states = (
- output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
- )
- use_cache = use_cache if use_cache is not None else self.config.use_cache
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
- outputs = self.decoder(
- input_ids,
- attention_mask=attention_mask,
- head_mask=head_mask,
- past_key_values=past_key_values,
- inputs_embeds=inputs_embeds,
- use_cache=use_cache,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- training=training,
- )
- if not return_dict:
- return outputs
- return TFBaseModelOutputWithPast(
- last_hidden_state=outputs.last_hidden_state,
- past_key_values=outputs.past_key_values,
- hidden_states=outputs.hidden_states,
- attentions=outputs.attentions,
- )
- def build(self, input_shape=None):
- if self.built:
- return
- self.built = True
- if getattr(self, "decoder", None) is not None:
- with tf.name_scope(self.decoder.name):
- self.decoder.build(None)
- @add_start_docstrings(
- "The bare TF OPT Model outputting raw hidden-states without any specific head on top.",
- OPT_START_DOCSTRING,
- )
- @keras_serializable
- class TFOPTModel(TFOPTPreTrainedModel):
- config_class = OPTConfig
- def __init__(self, config: OPTConfig, **kwargs):
- super().__init__(config, **kwargs)
- self.config = config
- self.model = TFOPTMainLayer(config, name="model")
- def get_input_embeddings(self):
- return self.model.decoder.embed_tokens
- def set_input_embeddings(self, new_embeddings):
- self.model.set_input_embeddings(new_embeddings)
- @unpack_inputs
- @add_start_docstrings_to_model_forward(OPT_INPUTS_DOCSTRING)
- @add_code_sample_docstrings(
- checkpoint=_CHECKPOINT_FOR_DOC,
- output_type=TFBaseModelOutputWithPast,
- config_class=_CONFIG_FOR_DOC,
- expected_output=_EXPECTED_OUTPUT_SHAPE,
- )
- def call(
- self,
- input_ids: TFModelInputType | None = None,
- attention_mask: np.ndarray | tf.Tensor | None = None,
- head_mask: np.ndarray | tf.Tensor | None = None,
- past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
- inputs_embeds: np.ndarray | tf.Tensor | None = None,
- use_cache: Optional[bool] = None,
- output_attentions: Optional[bool] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- training: Optional[bool] = False,
- **kwargs,
- ) -> Union[TFBaseModelOutputWithPast, Tuple[tf.Tensor]]:
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
- output_hidden_states = (
- output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
- )
- use_cache = use_cache if use_cache is not None else self.config.use_cache
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
- outputs = self.model(
- input_ids,
- attention_mask=attention_mask,
- head_mask=head_mask,
- past_key_values=past_key_values,
- inputs_embeds=inputs_embeds,
- use_cache=use_cache,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- training=training,
- )
- if not return_dict:
- return outputs
- return TFBaseModelOutputWithPast(
- last_hidden_state=outputs.last_hidden_state,
- past_key_values=outputs.past_key_values,
- hidden_states=outputs.hidden_states,
- attentions=outputs.attentions,
- )
- def serving_output(self, output):
- pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
- hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
- attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
- return TFBaseModelOutputWithPast(
- last_hidden_state=output.last_hidden_state,
- past_key_values=pkv,
- hidden_states=hs,
- attentions=attns,
- )
- def build(self, input_shape=None):
- if self.built:
- return
- self.built = True
- if getattr(self, "model", None) is not None:
- with tf.name_scope(self.model.name):
- self.model.build(None)
- @add_start_docstrings(
- """
- The OPT Model transformer with a language modeling head on top.
- """,
- OPT_START_DOCSTRING,
- )
- @keras_serializable
- class TFOPTForCausalLM(TFOPTPreTrainedModel, TFCausalLanguageModelingLoss):
- config_class = OPTConfig
- def __init__(self, config: OPTConfig, **kwargs):
- super().__init__(config, **kwargs)
- self.config = config
- self.model = TFOPTMainLayer(config, name="model")
- def get_output_embeddings(self):
- return self.model.get_input_embeddings()
- def prepare_inputs_for_generation(self, inputs, past_key_values=None, use_cache=None, **kwargs):
- attention_mask = kwargs.get("attention_mask", None)
- # only last token for inputs_ids if past is defined in kwargs
- if past_key_values:
- inputs = tf.expand_dims(inputs[:, -1], -1)
- return {
- "input_ids": inputs,
- "attention_mask": attention_mask,
- "past_key_values": past_key_values,
- "use_cache": use_cache,
- }
- @unpack_inputs
- @replace_return_docstrings(output_type=TFCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
- @add_code_sample_docstrings(
- checkpoint=_CHECKPOINT_FOR_DOC,
- output_type=TFCausalLMOutputWithPast,
- config_class=_CONFIG_FOR_DOC,
- expected_output=_CAUSAL_LM_EXPECTED_OUTPUT,
- )
- def call(
- self,
- input_ids: TFModelInputType | None = None,
- past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
- attention_mask: np.ndarray | tf.Tensor | None = None,
- position_ids: np.ndarray | tf.Tensor | None = None,
- head_mask: np.ndarray | tf.Tensor | None = None,
- inputs_embeds: np.ndarray | tf.Tensor | None = None,
- labels: np.ndarray | tf.Tensor | None = None,
- use_cache: Optional[bool] = None,
- output_attentions: Optional[bool] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- training: Optional[bool] = False,
- **kwargs,
- ) -> Union[TFCausalLMOutputWithPast, Tuple[tf.Tensor]]:
- r"""
- Args:
- input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
- Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
- provide it.
- Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
- [`PreTrainedTokenizer.__call__`] for details.
- [What are input IDs?](../glossary#input-ids)
- attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
- Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- - 1 for tokens that are **not masked**,
- - 0 for tokens that are **masked**.
- [What are attention masks?](../glossary#attention-mask)
- head_mask (`torch.Tensor` of shape `(num_hidden_layers, num_attention_heads)`, *optional*):
- Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- - 1 indicates the head is **not masked**,
- - 0 indicates the head is **masked**.
- past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
- Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
- shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
- shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional
- tensors are only required when the model is used as a decoder in a Sequence to Sequence model.
- Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
- cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
- If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that
- don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
- `decoder_input_ids` of shape `(batch_size, sequence_length)`.
- inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
- Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
- This is useful if you want more control over how to convert `input_ids` indices into associated vectors
- than the model's internal embedding lookup matrix.
- labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
- Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
- config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
- (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
- use_cache (`bool`, *optional*):
- If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
- (see `past_key_values`).
- output_attentions (`bool`, *optional*):
- Whether or not to return the attentions tensors of all attention layers. See `attentions` under
- returned tensors for more detail.
- output_hidden_states (`bool`, *optional*):
- Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
- for more detail.
- return_dict (`bool`, *optional*):
- Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
- """
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
- output_hidden_states = (
- output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
- )
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
- outputs = self.model(
- input_ids=input_ids,
- past_key_values=past_key_values,
- attention_mask=attention_mask,
- position_ids=position_ids,
- head_mask=head_mask,
- inputs_embeds=inputs_embeds,
- use_cache=use_cache,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- training=training,
- )
- logits = self.model.decoder.embed_tokens(outputs[0], mode="linear")
- loss = None
- if labels is not None:
- # shift labels to the left and cut last logit token
- shifted_logits = logits[:, :-1]
- labels = labels[:, 1:]
- loss = self.hf_compute_loss(labels, shifted_logits)
- if not return_dict:
- output = (logits,) + outputs[1:]
- return ((loss,) + output) if loss is not None else output
- return TFCausalLMOutputWithPast(
- loss=loss,
- logits=logits,
- past_key_values=outputs.past_key_values,
- hidden_states=outputs.hidden_states,
- attentions=outputs.attentions,
- )
- def serving_output(self, output):
- pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
- hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
- attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
- return TFCausalLMOutputWithPast(
- past_key_values=pkv,
- hidden_states=hs,
- attentions=attns,
- loss=output.loss,
- logits=output.logits,
- )
- def build(self, input_shape=None):
- if self.built:
- return
- self.built = True
- if getattr(self, "model", None) is not None:
- with tf.name_scope(self.model.name):
- self.model.build(None)
|