| 12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601 |
- # coding=utf-8
- # Copyright 2021 The Google Flax Team Authors and The HuggingFace Inc. team.
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- from typing import Callable, Optional, Tuple
- import flax
- import flax.linen as nn
- import jax
- import jax.numpy as jnp
- import numpy as np
- from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
- from flax.linen import combine_masks, make_causal_mask
- from flax.linen import partitioning as nn_partitioning
- from flax.linen.attention import dot_product_attention_weights
- from flax.traverse_util import flatten_dict, unflatten_dict
- from jax import lax
- from ...modeling_flax_outputs import (
- FlaxBaseModelOutput,
- FlaxBaseModelOutputWithPastAndCrossAttentions,
- FlaxCausalLMOutputWithCrossAttentions,
- FlaxMaskedLMOutput,
- FlaxMultipleChoiceModelOutput,
- FlaxQuestionAnsweringModelOutput,
- FlaxSequenceClassifierOutput,
- FlaxTokenClassifierOutput,
- )
- from ...modeling_flax_utils import (
- ACT2FN,
- FlaxPreTrainedModel,
- append_call_sample_docstring,
- append_replace_return_docstrings,
- overwrite_call_docstring,
- )
- from ...utils import ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging
- from .configuration_electra import ElectraConfig
- logger = logging.get_logger(__name__)
- _CHECKPOINT_FOR_DOC = "google/electra-small-discriminator"
- _CONFIG_FOR_DOC = "ElectraConfig"
- remat = nn_partitioning.remat
- @flax.struct.dataclass
- class FlaxElectraForPreTrainingOutput(ModelOutput):
- """
- Output type of [`ElectraForPreTraining`].
- Args:
- logits (`jnp.ndarray` of shape `(batch_size, sequence_length, config.vocab_size)`):
- Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
- hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
- Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape
- `(batch_size, sequence_length, hidden_size)`.
- Hidden-states of the model at the output of each layer plus the initial embedding outputs.
- attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
- Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
- sequence_length)`.
- Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
- heads.
- """
- logits: jnp.ndarray = None
- hidden_states: Optional[Tuple[jnp.ndarray]] = None
- attentions: Optional[Tuple[jnp.ndarray]] = None
- ELECTRA_START_DOCSTRING = r"""
- This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
- library implements for all its model (such as downloading, saving and converting weights from PyTorch models)
- This model is also a Flax Linen
- [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a
- regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.
- Finally, this model supports inherent JAX features such as:
- - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
- - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
- - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
- - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
- Parameters:
- config ([`ElectraConfig`]): Model configuration class with all the parameters of the model.
- Initializing with a config file does not load the weights associated with the model, only the
- configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
- """
- ELECTRA_INPUTS_DOCSTRING = r"""
- Args:
- input_ids (`numpy.ndarray` of shape `({0})`):
- Indices of input sequence tokens in the vocabulary.
- Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
- [`PreTrainedTokenizer.__call__`] for details.
- [What are input IDs?](../glossary#input-ids)
- attention_mask (`numpy.ndarray` of shape `({0})`, *optional*):
- Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- - 1 for tokens that are **not masked**,
- - 0 for tokens that are **masked**.
- [What are attention masks?](../glossary#attention-mask)
- token_type_ids (`numpy.ndarray` of shape `({0})`, *optional*):
- Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
- 1]`:
- - 0 corresponds to a *sentence A* token,
- - 1 corresponds to a *sentence B* token.
- [What are token type IDs?](../glossary#token-type-ids)
- position_ids (`numpy.ndarray` of shape `({0})`, *optional*):
- Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
- config.max_position_embeddings - 1]`.
- head_mask (`numpy.ndarray` of shape `({0})`, `optional):
- Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
- - 1 indicates the head is **not masked**,
- - 0 indicates the head is **masked**.
- return_dict (`bool`, *optional*):
- Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
- """
- class FlaxElectraEmbeddings(nn.Module):
- """Construct the embeddings from word, position and token_type embeddings."""
- config: ElectraConfig
- dtype: jnp.dtype = jnp.float32 # the dtype of the computation
- def setup(self):
- self.word_embeddings = nn.Embed(
- self.config.vocab_size,
- self.config.embedding_size,
- embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
- )
- self.position_embeddings = nn.Embed(
- self.config.max_position_embeddings,
- self.config.embedding_size,
- embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
- )
- self.token_type_embeddings = nn.Embed(
- self.config.type_vocab_size,
- self.config.embedding_size,
- embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range),
- )
- self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
- self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
- # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertEmbeddings.__call__
- def __call__(self, input_ids, token_type_ids, position_ids, attention_mask, deterministic: bool = True):
- # Embed
- inputs_embeds = self.word_embeddings(input_ids.astype("i4"))
- position_embeds = self.position_embeddings(position_ids.astype("i4"))
- token_type_embeddings = self.token_type_embeddings(token_type_ids.astype("i4"))
- # Sum all embeddings
- hidden_states = inputs_embeds + token_type_embeddings + position_embeds
- # Layer Norm
- hidden_states = self.LayerNorm(hidden_states)
- hidden_states = self.dropout(hidden_states, deterministic=deterministic)
- return hidden_states
- # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertSelfAttention with Bert->Electra
- class FlaxElectraSelfAttention(nn.Module):
- config: ElectraConfig
- causal: bool = False
- dtype: jnp.dtype = jnp.float32 # the dtype of the computation
- def setup(self):
- self.head_dim = self.config.hidden_size // self.config.num_attention_heads
- if self.config.hidden_size % self.config.num_attention_heads != 0:
- raise ValueError(
- "`config.hidden_size`: {self.config.hidden_size} has to be a multiple of `config.num_attention_heads` "
- " : {self.config.num_attention_heads}"
- )
- self.query = nn.Dense(
- self.config.hidden_size,
- dtype=self.dtype,
- kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
- )
- self.key = nn.Dense(
- self.config.hidden_size,
- dtype=self.dtype,
- kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
- )
- self.value = nn.Dense(
- self.config.hidden_size,
- dtype=self.dtype,
- kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
- )
- if self.causal:
- self.causal_mask = make_causal_mask(
- jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool"
- )
- def _split_heads(self, hidden_states):
- return hidden_states.reshape(hidden_states.shape[:2] + (self.config.num_attention_heads, self.head_dim))
- def _merge_heads(self, hidden_states):
- return hidden_states.reshape(hidden_states.shape[:2] + (self.config.hidden_size,))
- @nn.compact
- # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartAttention._concatenate_to_cache
- def _concatenate_to_cache(self, key, value, query, attention_mask):
- """
- This function takes projected key, value states from a single input token and concatenates the states to cached
- states from previous steps. This function is slighly adapted from the official Flax repository:
- https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
- """
- # detect if we're initializing by absence of existing cache data.
- is_initialized = self.has_variable("cache", "cached_key")
- cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
- cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
- cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
- if is_initialized:
- *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
- # update key, value caches with our new 1d spatial slices
- cur_index = cache_index.value
- indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
- key = lax.dynamic_update_slice(cached_key.value, key, indices)
- value = lax.dynamic_update_slice(cached_value.value, value, indices)
- cached_key.value = key
- cached_value.value = value
- num_updated_cache_vectors = query.shape[1]
- cache_index.value = cache_index.value + num_updated_cache_vectors
- # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
- pad_mask = jnp.broadcast_to(
- jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
- tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
- )
- attention_mask = combine_masks(pad_mask, attention_mask)
- return key, value, attention_mask
- def __call__(
- self,
- hidden_states,
- attention_mask,
- layer_head_mask,
- key_value_states: Optional[jnp.ndarray] = None,
- init_cache: bool = False,
- deterministic=True,
- output_attentions: bool = False,
- ):
- # if key_value_states are provided this layer is used as a cross-attention layer
- # for the decoder
- is_cross_attention = key_value_states is not None
- batch_size = hidden_states.shape[0]
- # get query proj
- query_states = self.query(hidden_states)
- # get key, value proj
- if is_cross_attention:
- # cross_attentions
- key_states = self.key(key_value_states)
- value_states = self.value(key_value_states)
- else:
- # self_attention
- key_states = self.key(hidden_states)
- value_states = self.value(hidden_states)
- query_states = self._split_heads(query_states)
- key_states = self._split_heads(key_states)
- value_states = self._split_heads(value_states)
- # handle cache prepare causal attention mask
- if self.causal:
- query_length, key_length = query_states.shape[1], key_states.shape[1]
- if self.has_variable("cache", "cached_key"):
- mask_shift = self.variables["cache"]["cache_index"]
- max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
- causal_mask = lax.dynamic_slice(
- self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
- )
- else:
- causal_mask = self.causal_mask[:, :, :query_length, :key_length]
- causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
- # combine masks if needed
- if attention_mask is not None and self.causal:
- attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
- attention_mask = combine_masks(attention_mask, causal_mask)
- elif self.causal:
- attention_mask = causal_mask
- elif attention_mask is not None:
- attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
- # During fast autoregressive decoding, we feed one position at a time,
- # and cache the keys and values step by step.
- if self.causal and (self.has_variable("cache", "cached_key") or init_cache):
- key_states, value_states, attention_mask = self._concatenate_to_cache(
- key_states, value_states, query_states, attention_mask
- )
- # Convert the boolean attention mask to an attention bias.
- if attention_mask is not None:
- # attention mask in the form of attention bias
- attention_bias = lax.select(
- attention_mask > 0,
- jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
- jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
- )
- else:
- attention_bias = None
- dropout_rng = None
- if not deterministic and self.config.attention_probs_dropout_prob > 0.0:
- dropout_rng = self.make_rng("dropout")
- attn_weights = dot_product_attention_weights(
- query_states,
- key_states,
- bias=attention_bias,
- dropout_rng=dropout_rng,
- dropout_rate=self.config.attention_probs_dropout_prob,
- broadcast_dropout=True,
- deterministic=deterministic,
- dtype=self.dtype,
- precision=None,
- )
- # Mask heads if we want to
- if layer_head_mask is not None:
- attn_weights = jnp.einsum("...hqk,h->...hqk", attn_weights, layer_head_mask)
- attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
- attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,))
- outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
- return outputs
- # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertSelfOutput with Bert->Electra
- class FlaxElectraSelfOutput(nn.Module):
- config: ElectraConfig
- dtype: jnp.dtype = jnp.float32 # the dtype of the computation
- def setup(self):
- self.dense = nn.Dense(
- self.config.hidden_size,
- kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
- dtype=self.dtype,
- )
- self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
- self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
- def __call__(self, hidden_states, input_tensor, deterministic: bool = True):
- hidden_states = self.dense(hidden_states)
- hidden_states = self.dropout(hidden_states, deterministic=deterministic)
- hidden_states = self.LayerNorm(hidden_states + input_tensor)
- return hidden_states
- # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertAttention with Bert->Electra
- class FlaxElectraAttention(nn.Module):
- config: ElectraConfig
- causal: bool = False
- dtype: jnp.dtype = jnp.float32
- def setup(self):
- self.self = FlaxElectraSelfAttention(self.config, causal=self.causal, dtype=self.dtype)
- self.output = FlaxElectraSelfOutput(self.config, dtype=self.dtype)
- def __call__(
- self,
- hidden_states,
- attention_mask,
- layer_head_mask,
- key_value_states=None,
- init_cache=False,
- deterministic=True,
- output_attentions: bool = False,
- ):
- # Attention mask comes in as attention_mask.shape == (*batch_sizes, kv_length)
- # FLAX expects: attention_mask.shape == (*batch_sizes, 1, 1, kv_length) such that it is broadcastable
- # with attn_weights.shape == (*batch_sizes, num_heads, q_length, kv_length)
- attn_outputs = self.self(
- hidden_states,
- attention_mask,
- layer_head_mask=layer_head_mask,
- key_value_states=key_value_states,
- init_cache=init_cache,
- deterministic=deterministic,
- output_attentions=output_attentions,
- )
- attn_output = attn_outputs[0]
- hidden_states = self.output(attn_output, hidden_states, deterministic=deterministic)
- outputs = (hidden_states,)
- if output_attentions:
- outputs += (attn_outputs[1],)
- return outputs
- # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertIntermediate with Bert->Electra
- class FlaxElectraIntermediate(nn.Module):
- config: ElectraConfig
- dtype: jnp.dtype = jnp.float32 # the dtype of the computation
- def setup(self):
- self.dense = nn.Dense(
- self.config.intermediate_size,
- kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
- dtype=self.dtype,
- )
- self.activation = ACT2FN[self.config.hidden_act]
- def __call__(self, hidden_states):
- hidden_states = self.dense(hidden_states)
- hidden_states = self.activation(hidden_states)
- return hidden_states
- # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertOutput with Bert->Electra
- class FlaxElectraOutput(nn.Module):
- config: ElectraConfig
- dtype: jnp.dtype = jnp.float32 # the dtype of the computation
- def setup(self):
- self.dense = nn.Dense(
- self.config.hidden_size,
- kernel_init=jax.nn.initializers.normal(self.config.initializer_range),
- dtype=self.dtype,
- )
- self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob)
- self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
- def __call__(self, hidden_states, attention_output, deterministic: bool = True):
- hidden_states = self.dense(hidden_states)
- hidden_states = self.dropout(hidden_states, deterministic=deterministic)
- hidden_states = self.LayerNorm(hidden_states + attention_output)
- return hidden_states
- # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertLayer with Bert->Electra
- class FlaxElectraLayer(nn.Module):
- config: ElectraConfig
- dtype: jnp.dtype = jnp.float32 # the dtype of the computation
- def setup(self):
- self.attention = FlaxElectraAttention(self.config, causal=self.config.is_decoder, dtype=self.dtype)
- self.intermediate = FlaxElectraIntermediate(self.config, dtype=self.dtype)
- self.output = FlaxElectraOutput(self.config, dtype=self.dtype)
- if self.config.add_cross_attention:
- self.crossattention = FlaxElectraAttention(self.config, causal=False, dtype=self.dtype)
- def __call__(
- self,
- hidden_states,
- attention_mask,
- layer_head_mask,
- encoder_hidden_states: Optional[jnp.ndarray] = None,
- encoder_attention_mask: Optional[jnp.ndarray] = None,
- init_cache: bool = False,
- deterministic: bool = True,
- output_attentions: bool = False,
- ):
- # Self Attention
- attention_outputs = self.attention(
- hidden_states,
- attention_mask,
- layer_head_mask=layer_head_mask,
- init_cache=init_cache,
- deterministic=deterministic,
- output_attentions=output_attentions,
- )
- attention_output = attention_outputs[0]
- # Cross-Attention Block
- if encoder_hidden_states is not None:
- cross_attention_outputs = self.crossattention(
- attention_output,
- attention_mask=encoder_attention_mask,
- layer_head_mask=layer_head_mask,
- key_value_states=encoder_hidden_states,
- deterministic=deterministic,
- output_attentions=output_attentions,
- )
- attention_output = cross_attention_outputs[0]
- hidden_states = self.intermediate(attention_output)
- hidden_states = self.output(hidden_states, attention_output, deterministic=deterministic)
- outputs = (hidden_states,)
- if output_attentions:
- outputs += (attention_outputs[1],)
- if encoder_hidden_states is not None:
- outputs += (cross_attention_outputs[1],)
- return outputs
- # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertLayerCollection with Bert->Electra
- class FlaxElectraLayerCollection(nn.Module):
- config: ElectraConfig
- dtype: jnp.dtype = jnp.float32 # the dtype of the computation
- gradient_checkpointing: bool = False
- def setup(self):
- if self.gradient_checkpointing:
- FlaxElectraCheckpointLayer = remat(FlaxElectraLayer, static_argnums=(5, 6, 7))
- self.layers = [
- FlaxElectraCheckpointLayer(self.config, name=str(i), dtype=self.dtype)
- for i in range(self.config.num_hidden_layers)
- ]
- else:
- self.layers = [
- FlaxElectraLayer(self.config, name=str(i), dtype=self.dtype)
- for i in range(self.config.num_hidden_layers)
- ]
- def __call__(
- self,
- hidden_states,
- attention_mask,
- head_mask,
- encoder_hidden_states: Optional[jnp.ndarray] = None,
- encoder_attention_mask: Optional[jnp.ndarray] = None,
- init_cache: bool = False,
- deterministic: bool = True,
- output_attentions: bool = False,
- output_hidden_states: bool = False,
- return_dict: bool = True,
- ):
- all_attentions = () if output_attentions else None
- all_hidden_states = () if output_hidden_states else None
- all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
- # Check if head_mask has a correct number of layers specified if desired
- if head_mask is not None:
- if head_mask.shape[0] != (len(self.layers)):
- raise ValueError(
- f"The head_mask should be specified for {len(self.layers)} layers, but it is for "
- f" {head_mask.shape[0]}."
- )
- for i, layer in enumerate(self.layers):
- if output_hidden_states:
- all_hidden_states += (hidden_states,)
- layer_outputs = layer(
- hidden_states,
- attention_mask,
- head_mask[i] if head_mask is not None else None,
- encoder_hidden_states,
- encoder_attention_mask,
- init_cache,
- deterministic,
- output_attentions,
- )
- hidden_states = layer_outputs[0]
- if output_attentions:
- all_attentions += (layer_outputs[1],)
- if encoder_hidden_states is not None:
- all_cross_attentions += (layer_outputs[2],)
- if output_hidden_states:
- all_hidden_states += (hidden_states,)
- outputs = (hidden_states, all_hidden_states, all_attentions, all_cross_attentions)
- if not return_dict:
- return tuple(v for v in outputs if v is not None)
- return FlaxBaseModelOutputWithPastAndCrossAttentions(
- last_hidden_state=hidden_states,
- hidden_states=all_hidden_states,
- attentions=all_attentions,
- cross_attentions=all_cross_attentions,
- )
- # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertEncoder with Bert->Electra
- class FlaxElectraEncoder(nn.Module):
- config: ElectraConfig
- dtype: jnp.dtype = jnp.float32 # the dtype of the computation
- gradient_checkpointing: bool = False
- def setup(self):
- self.layer = FlaxElectraLayerCollection(
- self.config,
- dtype=self.dtype,
- gradient_checkpointing=self.gradient_checkpointing,
- )
- def __call__(
- self,
- hidden_states,
- attention_mask,
- head_mask,
- encoder_hidden_states: Optional[jnp.ndarray] = None,
- encoder_attention_mask: Optional[jnp.ndarray] = None,
- init_cache: bool = False,
- deterministic: bool = True,
- output_attentions: bool = False,
- output_hidden_states: bool = False,
- return_dict: bool = True,
- ):
- return self.layer(
- hidden_states,
- attention_mask,
- head_mask=head_mask,
- encoder_hidden_states=encoder_hidden_states,
- encoder_attention_mask=encoder_attention_mask,
- init_cache=init_cache,
- deterministic=deterministic,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
- class FlaxElectraGeneratorPredictions(nn.Module):
- config: ElectraConfig
- dtype: jnp.dtype = jnp.float32
- def setup(self):
- self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
- self.dense = nn.Dense(self.config.embedding_size, dtype=self.dtype)
- def __call__(self, hidden_states):
- hidden_states = self.dense(hidden_states)
- hidden_states = ACT2FN[self.config.hidden_act](hidden_states)
- hidden_states = self.LayerNorm(hidden_states)
- return hidden_states
- class FlaxElectraDiscriminatorPredictions(nn.Module):
- """Prediction module for the discriminator, made up of two dense layers."""
- config: ElectraConfig
- dtype: jnp.dtype = jnp.float32
- def setup(self):
- self.dense = nn.Dense(self.config.hidden_size, dtype=self.dtype)
- self.dense_prediction = nn.Dense(1, dtype=self.dtype)
- def __call__(self, hidden_states):
- hidden_states = self.dense(hidden_states)
- hidden_states = ACT2FN[self.config.hidden_act](hidden_states)
- hidden_states = self.dense_prediction(hidden_states).squeeze(-1)
- return hidden_states
- class FlaxElectraPreTrainedModel(FlaxPreTrainedModel):
- """
- An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
- models.
- """
- config_class = ElectraConfig
- base_model_prefix = "electra"
- module_class: nn.Module = None
- def __init__(
- self,
- config: ElectraConfig,
- input_shape: Tuple = (1, 1),
- seed: int = 0,
- dtype: jnp.dtype = jnp.float32,
- _do_init: bool = True,
- gradient_checkpointing: bool = False,
- **kwargs,
- ):
- module = self.module_class(config=config, dtype=dtype, gradient_checkpointing=gradient_checkpointing, **kwargs)
- super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
- # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPreTrainedModel.enable_gradient_checkpointing
- def enable_gradient_checkpointing(self):
- self._module = self.module_class(
- config=self.config,
- dtype=self.dtype,
- gradient_checkpointing=True,
- )
- # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPreTrainedModel.init_weights
- def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
- # init input tensors
- input_ids = jnp.zeros(input_shape, dtype="i4")
- token_type_ids = jnp.zeros_like(input_ids)
- position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape)
- attention_mask = jnp.ones_like(input_ids)
- head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads))
- params_rng, dropout_rng = jax.random.split(rng)
- rngs = {"params": params_rng, "dropout": dropout_rng}
- if self.config.add_cross_attention:
- encoder_hidden_states = jnp.zeros(input_shape + (self.config.hidden_size,))
- encoder_attention_mask = attention_mask
- module_init_outputs = self.module.init(
- rngs,
- input_ids,
- attention_mask,
- token_type_ids,
- position_ids,
- head_mask,
- encoder_hidden_states,
- encoder_attention_mask,
- return_dict=False,
- )
- else:
- module_init_outputs = self.module.init(
- rngs, input_ids, attention_mask, token_type_ids, position_ids, head_mask, return_dict=False
- )
- random_params = module_init_outputs["params"]
- if params is not None:
- random_params = flatten_dict(unfreeze(random_params))
- params = flatten_dict(unfreeze(params))
- for missing_key in self._missing_keys:
- params[missing_key] = random_params[missing_key]
- self._missing_keys = set()
- return freeze(unflatten_dict(params))
- else:
- return random_params
- # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderPreTrainedModel.init_cache
- def init_cache(self, batch_size, max_length):
- r"""
- Args:
- batch_size (`int`):
- batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
- max_length (`int`):
- maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
- cache.
- """
- # init input variables to retrieve cache
- input_ids = jnp.ones((batch_size, max_length), dtype="i4")
- attention_mask = jnp.ones_like(input_ids, dtype="i4")
- position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
- init_variables = self.module.init(
- jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True
- )
- return unfreeze(init_variables["cache"])
- @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
- def __call__(
- self,
- input_ids,
- attention_mask=None,
- token_type_ids=None,
- position_ids=None,
- head_mask=None,
- encoder_hidden_states=None,
- encoder_attention_mask=None,
- params: dict = None,
- dropout_rng: jax.random.PRNGKey = None,
- train: bool = False,
- output_attentions: Optional[bool] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- past_key_values: dict = None,
- ):
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
- output_hidden_states = (
- output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
- )
- return_dict = return_dict if return_dict is not None else self.config.return_dict
- # init input tensors if not passed
- if token_type_ids is None:
- token_type_ids = jnp.ones_like(input_ids)
- if position_ids is None:
- position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
- if attention_mask is None:
- attention_mask = jnp.ones_like(input_ids)
- if head_mask is None:
- head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads))
- # Handle any PRNG if needed
- rngs = {}
- if dropout_rng is not None:
- rngs["dropout"] = dropout_rng
- inputs = {"params": params or self.params}
- if self.config.add_cross_attention:
- # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed
- # down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be
- # changed by FlaxElectraAttention module
- if past_key_values:
- inputs["cache"] = past_key_values
- mutable = ["cache"]
- else:
- mutable = False
- outputs = self.module.apply(
- inputs,
- jnp.array(input_ids, dtype="i4"),
- jnp.array(attention_mask, dtype="i4"),
- token_type_ids=jnp.array(token_type_ids, dtype="i4"),
- position_ids=jnp.array(position_ids, dtype="i4"),
- head_mask=jnp.array(head_mask, dtype="i4"),
- encoder_hidden_states=encoder_hidden_states,
- encoder_attention_mask=encoder_attention_mask,
- deterministic=not train,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- rngs=rngs,
- mutable=mutable,
- )
- # add updated cache to model output
- if past_key_values is not None and return_dict:
- outputs, past_key_values = outputs
- outputs["past_key_values"] = unfreeze(past_key_values["cache"])
- return outputs
- elif past_key_values is not None and not return_dict:
- outputs, past_key_values = outputs
- outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:]
- else:
- outputs = self.module.apply(
- inputs,
- jnp.array(input_ids, dtype="i4"),
- jnp.array(attention_mask, dtype="i4"),
- token_type_ids=jnp.array(token_type_ids, dtype="i4"),
- position_ids=jnp.array(position_ids, dtype="i4"),
- head_mask=jnp.array(head_mask, dtype="i4"),
- deterministic=not train,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- rngs=rngs,
- )
- return outputs
- class FlaxElectraModule(nn.Module):
- config: ElectraConfig
- dtype: jnp.dtype = jnp.float32 # the dtype of the computation
- gradient_checkpointing: bool = False
- def setup(self):
- self.embeddings = FlaxElectraEmbeddings(self.config, dtype=self.dtype)
- if self.config.embedding_size != self.config.hidden_size:
- self.embeddings_project = nn.Dense(self.config.hidden_size, dtype=self.dtype)
- self.encoder = FlaxElectraEncoder(
- self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing
- )
- def __call__(
- self,
- input_ids,
- attention_mask,
- token_type_ids,
- position_ids,
- head_mask: Optional[np.ndarray] = None,
- encoder_hidden_states: Optional[jnp.ndarray] = None,
- encoder_attention_mask: Optional[jnp.ndarray] = None,
- init_cache: bool = False,
- deterministic: bool = True,
- output_attentions: bool = False,
- output_hidden_states: bool = False,
- return_dict: bool = True,
- ):
- embeddings = self.embeddings(
- input_ids, token_type_ids, position_ids, attention_mask, deterministic=deterministic
- )
- if hasattr(self, "embeddings_project"):
- embeddings = self.embeddings_project(embeddings)
- return self.encoder(
- embeddings,
- attention_mask,
- head_mask=head_mask,
- deterministic=deterministic,
- encoder_hidden_states=encoder_hidden_states,
- encoder_attention_mask=encoder_attention_mask,
- init_cache=init_cache,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
- @add_start_docstrings(
- "The bare Electra Model transformer outputting raw hidden-states without any specific head on top.",
- ELECTRA_START_DOCSTRING,
- )
- class FlaxElectraModel(FlaxElectraPreTrainedModel):
- module_class = FlaxElectraModule
- append_call_sample_docstring(FlaxElectraModel, _CHECKPOINT_FOR_DOC, FlaxBaseModelOutput, _CONFIG_FOR_DOC)
- class FlaxElectraTiedDense(nn.Module):
- embedding_size: int
- dtype: jnp.dtype = jnp.float32
- precision = None
- bias_init: Callable[..., np.ndarray] = jax.nn.initializers.zeros
- def setup(self):
- self.bias = self.param("bias", self.bias_init, (self.embedding_size,))
- def __call__(self, x, kernel):
- x = jnp.asarray(x, self.dtype)
- kernel = jnp.asarray(kernel, self.dtype)
- y = lax.dot_general(
- x,
- kernel,
- (((x.ndim - 1,), (0,)), ((), ())),
- precision=self.precision,
- )
- bias = jnp.asarray(self.bias, self.dtype)
- return y + bias
- class FlaxElectraForMaskedLMModule(nn.Module):
- config: ElectraConfig
- dtype: jnp.dtype = jnp.float32
- gradient_checkpointing: bool = False
- def setup(self):
- self.electra = FlaxElectraModule(
- config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing
- )
- self.generator_predictions = FlaxElectraGeneratorPredictions(config=self.config, dtype=self.dtype)
- if self.config.tie_word_embeddings:
- self.generator_lm_head = FlaxElectraTiedDense(self.config.vocab_size, dtype=self.dtype)
- else:
- self.generator_lm_head = nn.Dense(self.config.vocab_size, dtype=self.dtype)
- def __call__(
- self,
- input_ids,
- attention_mask=None,
- token_type_ids=None,
- position_ids=None,
- head_mask=None,
- deterministic: bool = True,
- output_attentions: bool = False,
- output_hidden_states: bool = False,
- return_dict: bool = True,
- ):
- outputs = self.electra(
- input_ids,
- attention_mask,
- token_type_ids,
- position_ids,
- head_mask,
- deterministic=deterministic,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
- hidden_states = outputs[0]
- prediction_scores = self.generator_predictions(hidden_states)
- if self.config.tie_word_embeddings:
- shared_embedding = self.electra.variables["params"]["embeddings"]["word_embeddings"]["embedding"]
- prediction_scores = self.generator_lm_head(prediction_scores, shared_embedding.T)
- else:
- prediction_scores = self.generator_lm_head(prediction_scores)
- if not return_dict:
- return (prediction_scores,) + outputs[1:]
- return FlaxMaskedLMOutput(
- logits=prediction_scores,
- hidden_states=outputs.hidden_states,
- attentions=outputs.attentions,
- )
- @add_start_docstrings("""Electra Model with a `language modeling` head on top.""", ELECTRA_START_DOCSTRING)
- class FlaxElectraForMaskedLM(FlaxElectraPreTrainedModel):
- module_class = FlaxElectraForMaskedLMModule
- append_call_sample_docstring(FlaxElectraForMaskedLM, _CHECKPOINT_FOR_DOC, FlaxMaskedLMOutput, _CONFIG_FOR_DOC)
- class FlaxElectraForPreTrainingModule(nn.Module):
- config: ElectraConfig
- dtype: jnp.dtype = jnp.float32
- gradient_checkpointing: bool = False
- def setup(self):
- self.electra = FlaxElectraModule(
- config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing
- )
- self.discriminator_predictions = FlaxElectraDiscriminatorPredictions(config=self.config, dtype=self.dtype)
- def __call__(
- self,
- input_ids,
- attention_mask=None,
- token_type_ids=None,
- position_ids=None,
- head_mask=None,
- deterministic: bool = True,
- output_attentions: bool = False,
- output_hidden_states: bool = False,
- return_dict: bool = True,
- ):
- # Model
- outputs = self.electra(
- input_ids,
- attention_mask,
- token_type_ids,
- position_ids,
- head_mask,
- deterministic=deterministic,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
- hidden_states = outputs[0]
- logits = self.discriminator_predictions(hidden_states)
- if not return_dict:
- return (logits,) + outputs[1:]
- return FlaxElectraForPreTrainingOutput(
- logits=logits,
- hidden_states=outputs.hidden_states,
- attentions=outputs.attentions,
- )
- @add_start_docstrings(
- """
- Electra model with a binary classification head on top as used during pretraining for identifying generated tokens.
- It is recommended to load the discriminator checkpoint into that model.
- """,
- ELECTRA_START_DOCSTRING,
- )
- class FlaxElectraForPreTraining(FlaxElectraPreTrainedModel):
- module_class = FlaxElectraForPreTrainingModule
- FLAX_ELECTRA_FOR_PRETRAINING_DOCSTRING = """
- Returns:
- Example:
- ```python
- >>> from transformers import AutoTokenizer, FlaxElectraForPreTraining
- >>> tokenizer = AutoTokenizer.from_pretrained("google/electra-small-discriminator")
- >>> model = FlaxElectraForPreTraining.from_pretrained("google/electra-small-discriminator")
- >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="np")
- >>> outputs = model(**inputs)
- >>> prediction_logits = outputs.logits
- ```
- """
- overwrite_call_docstring(
- FlaxElectraForPreTraining,
- ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length") + FLAX_ELECTRA_FOR_PRETRAINING_DOCSTRING,
- )
- append_replace_return_docstrings(
- FlaxElectraForPreTraining, output_type=FlaxElectraForPreTrainingOutput, config_class=_CONFIG_FOR_DOC
- )
- class FlaxElectraForTokenClassificationModule(nn.Module):
- config: ElectraConfig
- dtype: jnp.dtype = jnp.float32
- gradient_checkpointing: bool = False
- def setup(self):
- self.electra = FlaxElectraModule(
- config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing
- )
- classifier_dropout = (
- self.config.classifier_dropout
- if self.config.classifier_dropout is not None
- else self.config.hidden_dropout_prob
- )
- self.dropout = nn.Dropout(classifier_dropout)
- self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype)
- def __call__(
- self,
- input_ids,
- attention_mask=None,
- token_type_ids=None,
- position_ids=None,
- head_mask=None,
- deterministic: bool = True,
- output_attentions: bool = False,
- output_hidden_states: bool = False,
- return_dict: bool = True,
- ):
- # Model
- outputs = self.electra(
- input_ids,
- attention_mask,
- token_type_ids,
- position_ids,
- head_mask,
- deterministic=deterministic,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
- hidden_states = outputs[0]
- hidden_states = self.dropout(hidden_states, deterministic=deterministic)
- logits = self.classifier(hidden_states)
- if not return_dict:
- return (logits,) + outputs[1:]
- return FlaxTokenClassifierOutput(
- logits=logits,
- hidden_states=outputs.hidden_states,
- attentions=outputs.attentions,
- )
- @add_start_docstrings(
- """
- Electra model with a token classification head on top.
- Both the discriminator and generator may be loaded into this model.
- """,
- ELECTRA_START_DOCSTRING,
- )
- class FlaxElectraForTokenClassification(FlaxElectraPreTrainedModel):
- module_class = FlaxElectraForTokenClassificationModule
- append_call_sample_docstring(
- FlaxElectraForTokenClassification,
- _CHECKPOINT_FOR_DOC,
- FlaxTokenClassifierOutput,
- _CONFIG_FOR_DOC,
- )
- def identity(x, **kwargs):
- return x
- class FlaxElectraSequenceSummary(nn.Module):
- r"""
- Compute a single vector summary of a sequence hidden states.
- Args:
- config ([`PretrainedConfig`]):
- The config used by the model. Relevant arguments in the config class of the model are (refer to the actual
- config class of your model for the default values it uses):
- - **summary_use_proj** (`bool`) -- Add a projection after the vector extraction.
- - **summary_proj_to_labels** (`bool`) -- If `True`, the projection outputs to `config.num_labels` classes
- (otherwise to `config.hidden_size`).
- - **summary_activation** (`Optional[str]`) -- Set to `"tanh"` to add a tanh activation to the output,
- another string or `None` will add no activation.
- - **summary_first_dropout** (`float`) -- Optional dropout probability before the projection and activation.
- - **summary_last_dropout** (`float`)-- Optional dropout probability after the projection and activation.
- """
- config: ElectraConfig
- dtype: jnp.dtype = jnp.float32
- def setup(self):
- self.summary = identity
- if hasattr(self.config, "summary_use_proj") and self.config.summary_use_proj:
- if (
- hasattr(self.config, "summary_proj_to_labels")
- and self.config.summary_proj_to_labels
- and self.config.num_labels > 0
- ):
- num_classes = self.config.num_labels
- else:
- num_classes = self.config.hidden_size
- self.summary = nn.Dense(num_classes, dtype=self.dtype)
- activation_string = getattr(self.config, "summary_activation", None)
- self.activation = ACT2FN[activation_string] if activation_string else lambda x: x # noqa F407
- self.first_dropout = identity
- if hasattr(self.config, "summary_first_dropout") and self.config.summary_first_dropout > 0:
- self.first_dropout = nn.Dropout(self.config.summary_first_dropout)
- self.last_dropout = identity
- if hasattr(self.config, "summary_last_dropout") and self.config.summary_last_dropout > 0:
- self.last_dropout = nn.Dropout(self.config.summary_last_dropout)
- def __call__(self, hidden_states, cls_index=None, deterministic: bool = True):
- """
- Compute a single vector summary of a sequence hidden states.
- Args:
- hidden_states (`jnp.ndarray` of shape `[batch_size, seq_len, hidden_size]`):
- The hidden states of the last layer.
- cls_index (`jnp.ndarray` of shape `[batch_size]` or `[batch_size, ...]` where ... are optional leading dimensions of `hidden_states`, *optional*):
- Used if `summary_type == "cls_index"` and takes the last token of the sequence as classification token.
- Returns:
- `jnp.ndarray`: The summary of the sequence hidden states.
- """
- # NOTE: this doest "first" type summary always
- output = hidden_states[:, 0]
- output = self.first_dropout(output, deterministic=deterministic)
- output = self.summary(output)
- output = self.activation(output)
- output = self.last_dropout(output, deterministic=deterministic)
- return output
- class FlaxElectraForMultipleChoiceModule(nn.Module):
- config: ElectraConfig
- dtype: jnp.dtype = jnp.float32
- gradient_checkpointing: bool = False
- def setup(self):
- self.electra = FlaxElectraModule(
- config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing
- )
- self.sequence_summary = FlaxElectraSequenceSummary(config=self.config, dtype=self.dtype)
- self.classifier = nn.Dense(1, dtype=self.dtype)
- def __call__(
- self,
- input_ids,
- attention_mask=None,
- token_type_ids=None,
- position_ids=None,
- head_mask=None,
- deterministic: bool = True,
- output_attentions: bool = False,
- output_hidden_states: bool = False,
- return_dict: bool = True,
- ):
- num_choices = input_ids.shape[1]
- input_ids = input_ids.reshape(-1, input_ids.shape[-1]) if input_ids is not None else None
- attention_mask = attention_mask.reshape(-1, attention_mask.shape[-1]) if attention_mask is not None else None
- token_type_ids = token_type_ids.reshape(-1, token_type_ids.shape[-1]) if token_type_ids is not None else None
- position_ids = position_ids.reshape(-1, position_ids.shape[-1]) if position_ids is not None else None
- # Model
- outputs = self.electra(
- input_ids,
- attention_mask,
- token_type_ids,
- position_ids,
- head_mask,
- deterministic=deterministic,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
- hidden_states = outputs[0]
- pooled_output = self.sequence_summary(hidden_states, deterministic=deterministic)
- logits = self.classifier(pooled_output)
- reshaped_logits = logits.reshape(-1, num_choices)
- if not return_dict:
- return (reshaped_logits,) + outputs[1:]
- return FlaxMultipleChoiceModelOutput(
- logits=reshaped_logits,
- hidden_states=outputs.hidden_states,
- attentions=outputs.attentions,
- )
- @add_start_docstrings(
- """
- ELECTRA Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
- softmax) e.g. for RocStories/SWAG tasks.
- """,
- ELECTRA_START_DOCSTRING,
- )
- class FlaxElectraForMultipleChoice(FlaxElectraPreTrainedModel):
- module_class = FlaxElectraForMultipleChoiceModule
- # adapt docstring slightly for FlaxElectraForMultipleChoice
- overwrite_call_docstring(
- FlaxElectraForMultipleChoice, ELECTRA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
- )
- append_call_sample_docstring(
- FlaxElectraForMultipleChoice,
- _CHECKPOINT_FOR_DOC,
- FlaxMultipleChoiceModelOutput,
- _CONFIG_FOR_DOC,
- )
- class FlaxElectraForQuestionAnsweringModule(nn.Module):
- config: ElectraConfig
- dtype: jnp.dtype = jnp.float32
- gradient_checkpointing: bool = False
- def setup(self):
- self.electra = FlaxElectraModule(
- config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing
- )
- self.qa_outputs = nn.Dense(self.config.num_labels, dtype=self.dtype)
- def __call__(
- self,
- input_ids,
- attention_mask=None,
- token_type_ids=None,
- position_ids=None,
- head_mask=None,
- deterministic: bool = True,
- output_attentions: bool = False,
- output_hidden_states: bool = False,
- return_dict: bool = True,
- ):
- # Model
- outputs = self.electra(
- input_ids,
- attention_mask,
- token_type_ids,
- position_ids,
- head_mask,
- deterministic=deterministic,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
- hidden_states = outputs[0]
- logits = self.qa_outputs(hidden_states)
- start_logits, end_logits = logits.split(self.config.num_labels, axis=-1)
- start_logits = start_logits.squeeze(-1)
- end_logits = end_logits.squeeze(-1)
- if not return_dict:
- return (start_logits, end_logits) + outputs[1:]
- return FlaxQuestionAnsweringModelOutput(
- start_logits=start_logits,
- end_logits=end_logits,
- hidden_states=outputs.hidden_states,
- attentions=outputs.attentions,
- )
- @add_start_docstrings(
- """
- ELECTRA Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
- layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
- """,
- ELECTRA_START_DOCSTRING,
- )
- class FlaxElectraForQuestionAnswering(FlaxElectraPreTrainedModel):
- module_class = FlaxElectraForQuestionAnsweringModule
- append_call_sample_docstring(
- FlaxElectraForQuestionAnswering,
- _CHECKPOINT_FOR_DOC,
- FlaxQuestionAnsweringModelOutput,
- _CONFIG_FOR_DOC,
- )
- class FlaxElectraClassificationHead(nn.Module):
- """Head for sentence-level classification tasks."""
- config: ElectraConfig
- dtype: jnp.dtype = jnp.float32
- def setup(self):
- self.dense = nn.Dense(self.config.hidden_size, dtype=self.dtype)
- classifier_dropout = (
- self.config.classifier_dropout
- if self.config.classifier_dropout is not None
- else self.config.hidden_dropout_prob
- )
- self.dropout = nn.Dropout(classifier_dropout)
- self.out_proj = nn.Dense(self.config.num_labels, dtype=self.dtype)
- def __call__(self, hidden_states, deterministic: bool = True):
- x = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS])
- x = self.dropout(x, deterministic=deterministic)
- x = self.dense(x)
- x = ACT2FN["gelu"](x) # although BERT uses tanh here, it seems Electra authors used gelu
- x = self.dropout(x, deterministic=deterministic)
- x = self.out_proj(x)
- return x
- class FlaxElectraForSequenceClassificationModule(nn.Module):
- config: ElectraConfig
- dtype: jnp.dtype = jnp.float32
- gradient_checkpointing: bool = False
- def setup(self):
- self.electra = FlaxElectraModule(
- config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing
- )
- self.classifier = FlaxElectraClassificationHead(config=self.config, dtype=self.dtype)
- def __call__(
- self,
- input_ids,
- attention_mask=None,
- token_type_ids=None,
- position_ids=None,
- head_mask=None,
- deterministic: bool = True,
- output_attentions: bool = False,
- output_hidden_states: bool = False,
- return_dict: bool = True,
- ):
- # Model
- outputs = self.electra(
- input_ids,
- attention_mask,
- token_type_ids,
- position_ids,
- head_mask,
- deterministic=deterministic,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
- hidden_states = outputs[0]
- logits = self.classifier(hidden_states, deterministic=deterministic)
- if not return_dict:
- return (logits,) + outputs[1:]
- return FlaxSequenceClassifierOutput(
- logits=logits,
- hidden_states=outputs.hidden_states,
- attentions=outputs.attentions,
- )
- @add_start_docstrings(
- """
- Electra Model transformer with a sequence classification/regression head on top (a linear layer on top of the
- pooled output) e.g. for GLUE tasks.
- """,
- ELECTRA_START_DOCSTRING,
- )
- class FlaxElectraForSequenceClassification(FlaxElectraPreTrainedModel):
- module_class = FlaxElectraForSequenceClassificationModule
- append_call_sample_docstring(
- FlaxElectraForSequenceClassification,
- _CHECKPOINT_FOR_DOC,
- FlaxSequenceClassifierOutput,
- _CONFIG_FOR_DOC,
- )
- class FlaxElectraForCausalLMModule(nn.Module):
- config: ElectraConfig
- dtype: jnp.dtype = jnp.float32
- gradient_checkpointing: bool = False
- def setup(self):
- self.electra = FlaxElectraModule(
- config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing
- )
- self.generator_predictions = FlaxElectraGeneratorPredictions(config=self.config, dtype=self.dtype)
- if self.config.tie_word_embeddings:
- self.generator_lm_head = FlaxElectraTiedDense(self.config.vocab_size, dtype=self.dtype)
- else:
- self.generator_lm_head = nn.Dense(self.config.vocab_size, dtype=self.dtype)
- def __call__(
- self,
- input_ids,
- attention_mask: Optional[jnp.ndarray] = None,
- token_type_ids: Optional[jnp.ndarray] = None,
- position_ids: Optional[jnp.ndarray] = None,
- head_mask: Optional[jnp.ndarray] = None,
- encoder_hidden_states: Optional[jnp.ndarray] = None,
- encoder_attention_mask: Optional[jnp.ndarray] = None,
- init_cache: bool = False,
- deterministic: bool = True,
- output_attentions: bool = False,
- output_hidden_states: bool = False,
- return_dict: bool = True,
- ):
- outputs = self.electra(
- input_ids,
- attention_mask,
- token_type_ids,
- position_ids,
- head_mask,
- encoder_hidden_states=encoder_hidden_states,
- encoder_attention_mask=encoder_attention_mask,
- init_cache=init_cache,
- deterministic=deterministic,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
- hidden_states = outputs[0]
- prediction_scores = self.generator_predictions(hidden_states)
- if self.config.tie_word_embeddings:
- shared_embedding = self.electra.variables["params"]["embeddings"]["word_embeddings"]["embedding"]
- prediction_scores = self.generator_lm_head(prediction_scores, shared_embedding.T)
- else:
- prediction_scores = self.generator_lm_head(prediction_scores)
- if not return_dict:
- return (prediction_scores,) + outputs[1:]
- return FlaxCausalLMOutputWithCrossAttentions(
- logits=prediction_scores,
- hidden_states=outputs.hidden_states,
- attentions=outputs.attentions,
- cross_attentions=outputs.cross_attentions,
- )
- @add_start_docstrings(
- """
- Electra Model with a language modeling head on top (a linear layer on top of the hidden-states output) e.g for
- autoregressive tasks.
- """,
- ELECTRA_START_DOCSTRING,
- )
- # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertForCausalLM with Bert->Electra
- class FlaxElectraForCausalLM(FlaxElectraPreTrainedModel):
- module_class = FlaxElectraForCausalLMModule
- def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None):
- # initializing the cache
- batch_size, seq_length = input_ids.shape
- past_key_values = self.init_cache(batch_size, max_length)
- # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
- # But since the decoder uses a causal mask, those positions are masked anyway.
- # Thus, we can create a single static attention_mask here, which is more efficient for compilation
- extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
- if attention_mask is not None:
- position_ids = attention_mask.cumsum(axis=-1) - 1
- extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0))
- else:
- position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
- return {
- "past_key_values": past_key_values,
- "attention_mask": extended_attention_mask,
- "position_ids": position_ids,
- }
- def update_inputs_for_generation(self, model_outputs, model_kwargs):
- model_kwargs["past_key_values"] = model_outputs.past_key_values
- model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1
- return model_kwargs
- append_call_sample_docstring(
- FlaxElectraForCausalLM,
- _CHECKPOINT_FOR_DOC,
- FlaxCausalLMOutputWithCrossAttentions,
- _CONFIG_FOR_DOC,
- )
|