| 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648 |
- # coding=utf-8
- # Copyright 2021 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved.
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- """PyTorch Hubert model."""
- import warnings
- from typing import Optional, Tuple, Union
- import numpy as np
- import torch
- import torch.utils.checkpoint
- from torch import nn
- from torch.nn import CrossEntropyLoss
- from ...activations import ACT2FN
- from ...integrations.deepspeed import is_deepspeed_zero3_enabled
- from ...integrations.fsdp import is_fsdp_managed_module
- from ...modeling_outputs import BaseModelOutput, CausalLMOutput, SequenceClassifierOutput
- from ...modeling_utils import PreTrainedModel
- from ...utils import (
- add_code_sample_docstrings,
- add_start_docstrings,
- add_start_docstrings_to_model_forward,
- is_flash_attn_2_available,
- is_flash_attn_greater_or_equal_2_10,
- logging,
- replace_return_docstrings,
- )
- from .configuration_hubert import HubertConfig
- if is_flash_attn_2_available():
- from ...modeling_flash_attention_utils import _flash_attention_forward
- logger = logging.get_logger(__name__)
- _HIDDEN_STATES_START_POSITION = 1
- # General docstring
- _CONFIG_FOR_DOC = "HubertConfig"
- # Base docstring
- _CHECKPOINT_FOR_DOC = "facebook/hubert-large-ls960-ft"
- _EXPECTED_OUTPUT_SHAPE = [1, 292, 768]
- # CTC docstring
- _CTC_EXPECTED_OUTPUT = "'MISTER QUILTER IS THE APOSTLE OF THE MIDDLE CLASSES AND WE ARE GLAD TO WELCOME HIS GOSPEL'"
- _CTC_EXPECTED_LOSS = 22.68
- # Audio class docstring
- _SEQ_CLASS_CHECKPOINT = "superb/hubert-base-superb-ks"
- _SEQ_CLASS_EXPECTED_OUTPUT = "'_unknown_'"
- _SEQ_CLASS_EXPECTED_LOSS = 8.53
- # Copied from transformers.models.wav2vec2.modeling_wav2vec2._compute_mask_indices
- def _compute_mask_indices(
- shape: Tuple[int, int],
- mask_prob: float,
- mask_length: int,
- attention_mask: Optional[torch.LongTensor] = None,
- min_masks: int = 0,
- ) -> np.ndarray:
- """
- Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for
- ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on
- CPU as part of the preprocessing during training.
- Args:
- shape: The shape for which to compute masks. This should be of a tuple of size 2 where
- the first element is the batch size and the second element is the length of the axis to span.
- mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of
- independently generated mask spans of length `mask_length` is computed by
- `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the
- actual percentage will be smaller.
- mask_length: size of the mask
- min_masks: minimum number of masked spans
- attention_mask: A (right-padded) attention mask which independently shortens the feature axis of
- each batch dimension.
- """
- batch_size, sequence_length = shape
- if mask_length < 1:
- raise ValueError("`mask_length` has to be bigger than 0.")
- if mask_length > sequence_length:
- raise ValueError(
- f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}"
- f" and `sequence_length`: {sequence_length}`"
- )
- # epsilon is used for probabilistic rounding
- epsilon = np.random.rand(1).item()
- def compute_num_masked_span(input_length):
- """Given input length, compute how many spans should be masked"""
- num_masked_span = int(mask_prob * input_length / mask_length + epsilon)
- num_masked_span = max(num_masked_span, min_masks)
- # make sure num masked span <= sequence_length
- if num_masked_span * mask_length > sequence_length:
- num_masked_span = sequence_length // mask_length
- # make sure num_masked span is also <= input_length - (mask_length - 1)
- if input_length - (mask_length - 1) < num_masked_span:
- num_masked_span = max(input_length - (mask_length - 1), 0)
- return num_masked_span
- # compute number of masked spans in batch
- input_lengths = (
- attention_mask.sum(-1).detach().tolist()
- if attention_mask is not None
- else [sequence_length for _ in range(batch_size)]
- )
- # SpecAugment mask to fill
- spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool)
- spec_aug_mask_idxs = []
- max_num_masked_span = compute_num_masked_span(sequence_length)
- if max_num_masked_span == 0:
- return spec_aug_mask
- for input_length in input_lengths:
- # compute num of masked spans for this input
- num_masked_span = compute_num_masked_span(input_length)
- # get random indices to mask
- spec_aug_mask_idx = np.random.choice(
- np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False
- )
- # pick first sampled index that will serve as a dummy index to pad vector
- # to ensure same dimension for all batches due to probabilistic rounding
- # Picking first sample just pads those vectors twice.
- if len(spec_aug_mask_idx) == 0:
- # this case can only happen if `input_length` is strictly smaller then
- # `sequence_length` in which case the last token has to be a padding
- # token which we can use as a dummy mask id
- dummy_mask_idx = sequence_length - 1
- else:
- dummy_mask_idx = spec_aug_mask_idx[0]
- spec_aug_mask_idx = np.concatenate(
- [spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx]
- )
- spec_aug_mask_idxs.append(spec_aug_mask_idx)
- spec_aug_mask_idxs = np.array(spec_aug_mask_idxs)
- # expand masked indices to masked spans
- spec_aug_mask_idxs = np.broadcast_to(
- spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length)
- )
- spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length)
- # add offset to the starting indexes so that indexes now create a span
- offsets = np.arange(mask_length)[None, None, :]
- offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape(
- batch_size, max_num_masked_span * mask_length
- )
- spec_aug_mask_idxs = spec_aug_mask_idxs + offsets
- # ensure that we cannot have indices larger than sequence_length
- if spec_aug_mask_idxs.max() > sequence_length - 1:
- spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1
- # scatter indices to mask
- np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1)
- return spec_aug_mask
- # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2NoLayerNormConvLayer with Wav2Vec2->Hubert
- class HubertNoLayerNormConvLayer(nn.Module):
- def __init__(self, config, layer_id=0):
- super().__init__()
- self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
- self.out_conv_dim = config.conv_dim[layer_id]
- self.conv = nn.Conv1d(
- self.in_conv_dim,
- self.out_conv_dim,
- kernel_size=config.conv_kernel[layer_id],
- stride=config.conv_stride[layer_id],
- bias=config.conv_bias,
- )
- self.activation = ACT2FN[config.feat_extract_activation]
- def forward(self, hidden_states):
- hidden_states = self.conv(hidden_states)
- hidden_states = self.activation(hidden_states)
- return hidden_states
- # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2LayerNormConvLayer with Wav2Vec2->Hubert
- class HubertLayerNormConvLayer(nn.Module):
- def __init__(self, config, layer_id=0):
- super().__init__()
- self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
- self.out_conv_dim = config.conv_dim[layer_id]
- self.conv = nn.Conv1d(
- self.in_conv_dim,
- self.out_conv_dim,
- kernel_size=config.conv_kernel[layer_id],
- stride=config.conv_stride[layer_id],
- bias=config.conv_bias,
- )
- self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True)
- self.activation = ACT2FN[config.feat_extract_activation]
- def forward(self, hidden_states):
- hidden_states = self.conv(hidden_states)
- hidden_states = hidden_states.transpose(-2, -1)
- hidden_states = self.layer_norm(hidden_states)
- hidden_states = hidden_states.transpose(-2, -1)
- hidden_states = self.activation(hidden_states)
- return hidden_states
- # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2GroupNormConvLayer with Wav2Vec2->Hubert
- class HubertGroupNormConvLayer(nn.Module):
- def __init__(self, config, layer_id=0):
- super().__init__()
- self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1
- self.out_conv_dim = config.conv_dim[layer_id]
- self.conv = nn.Conv1d(
- self.in_conv_dim,
- self.out_conv_dim,
- kernel_size=config.conv_kernel[layer_id],
- stride=config.conv_stride[layer_id],
- bias=config.conv_bias,
- )
- self.activation = ACT2FN[config.feat_extract_activation]
- self.layer_norm = nn.GroupNorm(num_groups=self.out_conv_dim, num_channels=self.out_conv_dim, affine=True)
- def forward(self, hidden_states):
- hidden_states = self.conv(hidden_states)
- hidden_states = self.layer_norm(hidden_states)
- hidden_states = self.activation(hidden_states)
- return hidden_states
- # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2PositionalConvEmbedding with Wav2Vec2->Hubert
- class HubertPositionalConvEmbedding(nn.Module):
- def __init__(self, config):
- super().__init__()
- self.conv = nn.Conv1d(
- config.hidden_size,
- config.hidden_size,
- kernel_size=config.num_conv_pos_embeddings,
- padding=config.num_conv_pos_embeddings // 2,
- groups=config.num_conv_pos_embedding_groups,
- )
- weight_norm = nn.utils.weight_norm
- if hasattr(nn.utils.parametrizations, "weight_norm"):
- weight_norm = nn.utils.parametrizations.weight_norm
- if is_deepspeed_zero3_enabled():
- import deepspeed
- with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0):
- self.conv = weight_norm(self.conv, name="weight", dim=2)
- if hasattr(self.conv, "parametrizations"):
- weight_g = self.conv.parametrizations.weight.original0
- weight_v = self.conv.parametrizations.weight.original1
- else:
- weight_g = self.conv.weight_g
- weight_v = self.conv.weight_v
- deepspeed.zero.register_external_parameter(self, weight_v)
- deepspeed.zero.register_external_parameter(self, weight_g)
- else:
- self.conv = weight_norm(self.conv, name="weight", dim=2)
- self.padding = HubertSamePadLayer(config.num_conv_pos_embeddings)
- self.activation = ACT2FN[config.feat_extract_activation]
- def forward(self, hidden_states):
- hidden_states = hidden_states.transpose(1, 2)
- hidden_states = self.conv(hidden_states)
- hidden_states = self.padding(hidden_states)
- hidden_states = self.activation(hidden_states)
- hidden_states = hidden_states.transpose(1, 2)
- return hidden_states
- # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2SamePadLayer with Wav2Vec2->Hubert
- class HubertSamePadLayer(nn.Module):
- def __init__(self, num_conv_pos_embeddings):
- super().__init__()
- self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0
- def forward(self, hidden_states):
- if self.num_pad_remove > 0:
- hidden_states = hidden_states[:, :, : -self.num_pad_remove]
- return hidden_states
- # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureEncoder with Wav2Vec2->Hubert
- class HubertFeatureEncoder(nn.Module):
- """Construct the features from raw audio waveform"""
- def __init__(self, config):
- super().__init__()
- if config.feat_extract_norm == "group":
- conv_layers = [HubertGroupNormConvLayer(config, layer_id=0)] + [
- HubertNoLayerNormConvLayer(config, layer_id=i + 1) for i in range(config.num_feat_extract_layers - 1)
- ]
- elif config.feat_extract_norm == "layer":
- conv_layers = [HubertLayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)]
- else:
- raise ValueError(
- f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']"
- )
- self.conv_layers = nn.ModuleList(conv_layers)
- self.gradient_checkpointing = False
- self._requires_grad = True
- def _freeze_parameters(self):
- for param in self.parameters():
- param.requires_grad = False
- self._requires_grad = False
- def forward(self, input_values):
- hidden_states = input_values[:, None]
- # make sure hidden_states require grad for gradient_checkpointing
- if self._requires_grad and self.training:
- hidden_states.requires_grad = True
- for conv_layer in self.conv_layers:
- if self._requires_grad and self.gradient_checkpointing and self.training:
- hidden_states = self._gradient_checkpointing_func(
- conv_layer.__call__,
- hidden_states,
- )
- else:
- hidden_states = conv_layer(hidden_states)
- return hidden_states
- class HubertFeatureExtractor(HubertFeatureEncoder):
- def __init__(self, config):
- super().__init__(config)
- warnings.warn(
- f"The class `{self.__class__.__name__}` has been depreciated "
- "and will be removed in Transformers v5. "
- f"Use `{self.__class__.__bases__[0].__name__}` instead.",
- FutureWarning,
- )
- class HubertFeatureProjection(nn.Module):
- def __init__(self, config):
- super().__init__()
- self.feat_proj_layer_norm = config.feat_proj_layer_norm
- if self.feat_proj_layer_norm:
- self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps)
- self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size)
- self.dropout = nn.Dropout(config.feat_proj_dropout)
- def forward(self, hidden_states):
- # non-projected hidden states are needed for quantization
- if self.feat_proj_layer_norm:
- hidden_states = self.layer_norm(hidden_states)
- hidden_states = self.projection(hidden_states)
- hidden_states = self.dropout(hidden_states)
- return hidden_states
- # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->Hubert
- class HubertAttention(nn.Module):
- """Multi-headed attention from 'Attention Is All You Need' paper"""
- def __init__(
- self,
- embed_dim: int,
- num_heads: int,
- dropout: float = 0.0,
- is_decoder: bool = False,
- bias: bool = True,
- is_causal: bool = False,
- config: Optional[HubertConfig] = None,
- ):
- super().__init__()
- self.embed_dim = embed_dim
- self.num_heads = num_heads
- self.dropout = dropout
- self.head_dim = embed_dim // num_heads
- self.config = config
- if (self.head_dim * num_heads) != self.embed_dim:
- raise ValueError(
- f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
- f" and `num_heads`: {num_heads})."
- )
- self.scaling = self.head_dim**-0.5
- self.is_decoder = is_decoder
- self.is_causal = is_causal
- self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
- self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
- self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
- self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
- def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
- return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
- def forward(
- self,
- hidden_states: torch.Tensor,
- key_value_states: Optional[torch.Tensor] = None,
- past_key_value: Optional[Tuple[torch.Tensor]] = None,
- attention_mask: Optional[torch.Tensor] = None,
- layer_head_mask: Optional[torch.Tensor] = None,
- output_attentions: bool = False,
- ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
- """Input shape: Batch x Time x Channel"""
- # if key_value_states are provided this layer is used as a cross-attention layer
- # for the decoder
- is_cross_attention = key_value_states is not None
- bsz, tgt_len, _ = hidden_states.size()
- # get query proj
- query_states = self.q_proj(hidden_states) * self.scaling
- # get key, value proj
- # `past_key_value[0].shape[2] == key_value_states.shape[1]`
- # is checking that the `sequence_length` of the `past_key_value` is the same as
- # the provided `key_value_states` to support prefix tuning
- if (
- is_cross_attention
- and past_key_value is not None
- and past_key_value[0].shape[2] == key_value_states.shape[1]
- ):
- # reuse k,v, cross_attentions
- key_states = past_key_value[0]
- value_states = past_key_value[1]
- elif is_cross_attention:
- # cross_attentions
- key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
- value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
- elif past_key_value is not None:
- # reuse k, v, self_attention
- key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
- value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
- key_states = torch.cat([past_key_value[0], key_states], dim=2)
- value_states = torch.cat([past_key_value[1], value_states], dim=2)
- else:
- # self_attention
- key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
- value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
- if self.is_decoder:
- # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
- # Further calls to cross_attention layer can then reuse all cross-attention
- # key/value_states (first "if" case)
- # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
- # all previous decoder key/value_states. Further calls to uni-directional self-attention
- # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
- # if encoder bi-directional self-attention `past_key_value` is always `None`
- past_key_value = (key_states, value_states)
- proj_shape = (bsz * self.num_heads, -1, self.head_dim)
- query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
- key_states = key_states.reshape(*proj_shape)
- value_states = value_states.reshape(*proj_shape)
- src_len = key_states.size(1)
- attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
- if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
- raise ValueError(
- f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
- f" {attn_weights.size()}"
- )
- if attention_mask is not None:
- if attention_mask.size() != (bsz, 1, tgt_len, src_len):
- raise ValueError(
- f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
- )
- attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
- attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
- attn_weights = nn.functional.softmax(attn_weights, dim=-1)
- if layer_head_mask is not None:
- if layer_head_mask.size() != (self.num_heads,):
- raise ValueError(
- f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
- f" {layer_head_mask.size()}"
- )
- attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
- attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
- if output_attentions:
- # this operation is a bit awkward, but it's required to
- # make sure that attn_weights keeps its gradient.
- # In order to do so, attn_weights have to be reshaped
- # twice and have to be reused in the following
- attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
- attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
- else:
- attn_weights_reshaped = None
- attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
- attn_output = torch.bmm(attn_probs, value_states)
- if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
- raise ValueError(
- f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
- f" {attn_output.size()}"
- )
- attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
- attn_output = attn_output.transpose(1, 2)
- # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
- # partitioned across GPUs when using tensor-parallelism.
- attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
- attn_output = self.out_proj(attn_output)
- return attn_output, attn_weights_reshaped, past_key_value
- # Copied from transformers.models.bart.modeling_bart.BartFlashAttention2 with Bart->Hubert
- class HubertFlashAttention2(HubertAttention):
- """
- Hubert flash attention module. This module inherits from `HubertAttention` as the weights of the module stays
- untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
- flash attention and deal with padding tokens in case the input contains any of them.
- """
- # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
- def __init__(self, *args, **kwargs):
- super().__init__(*args, **kwargs)
- # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
- # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
- # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
- self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
- def _reshape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
- return tensor.view(bsz, seq_len, self.num_heads, self.head_dim)
- def forward(
- self,
- hidden_states: torch.Tensor,
- key_value_states: Optional[torch.Tensor] = None,
- past_key_value: Optional[Tuple[torch.Tensor]] = None,
- attention_mask: Optional[torch.Tensor] = None,
- layer_head_mask: Optional[torch.Tensor] = None,
- output_attentions: bool = False,
- ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
- # HubertFlashAttention2 attention does not support output_attentions
- if output_attentions:
- raise ValueError("HubertFlashAttention2 attention does not support output_attentions")
- # if key_value_states are provided this layer is used as a cross-attention layer
- # for the decoder
- is_cross_attention = key_value_states is not None
- bsz, q_len, _ = hidden_states.size()
- # get query proj
- query_states = self._reshape(self.q_proj(hidden_states), -1, bsz)
- # get key, value proj
- # `past_key_value[0].shape[2] == key_value_states.shape[1]`
- # is checking that the `sequence_length` of the `past_key_value` is the same as
- # the provided `key_value_states` to support prefix tuning
- if (
- is_cross_attention
- and past_key_value is not None
- and past_key_value[0].shape[2] == key_value_states.shape[1]
- ):
- # reuse k,v, cross_attentions
- key_states = past_key_value[0].transpose(1, 2)
- value_states = past_key_value[1].transpose(1, 2)
- elif is_cross_attention:
- # cross_attentions
- key_states = self._reshape(self.k_proj(key_value_states), -1, bsz)
- value_states = self._reshape(self.v_proj(key_value_states), -1, bsz)
- elif past_key_value is not None:
- # reuse k, v, self_attention
- key_states = self._reshape(self.k_proj(hidden_states), -1, bsz)
- value_states = self._reshape(self.v_proj(hidden_states), -1, bsz)
- key_states = torch.cat([past_key_value[0].transpose(1, 2), key_states], dim=1)
- value_states = torch.cat([past_key_value[1].transpose(1, 2), value_states], dim=1)
- else:
- # self_attention
- key_states = self._reshape(self.k_proj(hidden_states), -1, bsz)
- value_states = self._reshape(self.v_proj(hidden_states), -1, bsz)
- if self.is_decoder:
- # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
- # Further calls to cross_attention layer can then reuse all cross-attention
- # key/value_states (first "if" case)
- # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
- # all previous decoder key/value_states. Further calls to uni-directional self-attention
- # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
- # if encoder bi-directional self-attention `past_key_value` is always `None`
- past_key_value = (key_states.transpose(1, 2), value_states.transpose(1, 2))
- kv_seq_len = key_states.shape[-2]
- if past_key_value is not None:
- kv_seq_len += past_key_value[0].shape[-2]
- # In PEFT, usually we cast the layer norms in float32 for training stability reasons
- # therefore the input hidden states gets silently casted in float32. Hence, we need
- # cast them back in the correct dtype just to be sure everything works as expected.
- # This might slowdown training & inference so it is recommended to not cast the LayerNorms
- # in fp32. (LlamaRMSNorm handles it correctly)
- input_dtype = query_states.dtype
- if input_dtype == torch.float32:
- if torch.is_autocast_enabled():
- target_dtype = torch.get_autocast_gpu_dtype()
- # Handle the case where the model is quantized
- elif hasattr(self.config, "_pre_quantization_dtype"):
- target_dtype = self.config._pre_quantization_dtype
- else:
- target_dtype = self.q_proj.weight.dtype
- logger.warning_once(
- f"The input hidden states seems to be silently casted in float32, this might be related to"
- f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
- f" {target_dtype}."
- )
- query_states = query_states.to(target_dtype)
- key_states = key_states.to(target_dtype)
- value_states = value_states.to(target_dtype)
- attn_output = _flash_attention_forward(
- query_states,
- key_states,
- value_states,
- attention_mask,
- q_len,
- dropout=self.dropout if self.training else 0.0,
- is_causal=self.is_causal,
- use_top_left_mask=self._flash_attn_uses_top_left_mask,
- )
- attn_output = attn_output.reshape(bsz, q_len, -1)
- attn_output = self.out_proj(attn_output)
- if not output_attentions:
- attn_weights = None
- return attn_output, attn_weights, past_key_value
- class HubertSdpaAttention(HubertAttention):
- # Copied from transformers.models.bart.modeling_bart.BartSdpaAttention.forward with Bart->Hubert
- def forward(
- self,
- hidden_states: torch.Tensor,
- key_value_states: Optional[torch.Tensor] = None,
- past_key_value: Optional[Tuple[torch.Tensor]] = None,
- attention_mask: Optional[torch.Tensor] = None,
- layer_head_mask: Optional[torch.Tensor] = None,
- output_attentions: bool = False,
- ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
- """Input shape: Batch x Time x Channel"""
- if output_attentions or layer_head_mask is not None:
- # TODO: Improve this warning with e.g. `model.config._attn_implementation = "manual"` once this is implemented.
- logger.warning_once(
- "HubertModel is using HubertSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True` or `layer_head_mask` not None. Falling back to the manual attention"
- ' implementation, but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
- )
- return super().forward(
- hidden_states,
- key_value_states=key_value_states,
- past_key_value=past_key_value,
- attention_mask=attention_mask,
- layer_head_mask=layer_head_mask,
- output_attentions=output_attentions,
- )
- # if key_value_states are provided this layer is used as a cross-attention layer
- # for the decoder
- is_cross_attention = key_value_states is not None
- bsz, tgt_len, _ = hidden_states.size()
- # get query proj
- query_states = self.q_proj(hidden_states)
- # get key, value proj
- # `past_key_value[0].shape[2] == key_value_states.shape[1]`
- # is checking that the `sequence_length` of the `past_key_value` is the same as
- # the provided `key_value_states` to support prefix tuning
- if (
- is_cross_attention
- and past_key_value is not None
- and past_key_value[0].shape[2] == key_value_states.shape[1]
- ):
- # reuse k,v, cross_attentions
- key_states = past_key_value[0]
- value_states = past_key_value[1]
- elif is_cross_attention:
- # cross_attentions
- key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
- value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
- elif past_key_value is not None:
- # reuse k, v, self_attention
- key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
- value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
- key_states = torch.cat([past_key_value[0], key_states], dim=2)
- value_states = torch.cat([past_key_value[1], value_states], dim=2)
- else:
- # self_attention
- key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
- value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
- if self.is_decoder:
- # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
- # Further calls to cross_attention layer can then reuse all cross-attention
- # key/value_states (first "if" case)
- # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
- # all previous decoder key/value_states. Further calls to uni-directional self-attention
- # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
- # if encoder bi-directional self-attention `past_key_value` is always `None`
- past_key_value = (key_states, value_states)
- query_states = self._shape(query_states, tgt_len, bsz)
- # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
- # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
- # The tgt_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case tgt_len == 1.
- is_causal = True if self.is_causal and attention_mask is None and tgt_len > 1 else False
- # NOTE: SDPA with memory-efficient backend is currently (torch==2.1.2) bugged when using non-contiguous inputs and a custom attn_mask,
- # but we are fine here as `_shape` do call `.contiguous()`. Reference: https://github.com/pytorch/pytorch/issues/112577
- attn_output = torch.nn.functional.scaled_dot_product_attention(
- query_states,
- key_states,
- value_states,
- attn_mask=attention_mask,
- dropout_p=self.dropout if self.training else 0.0,
- is_causal=is_causal,
- )
- if attn_output.size() != (bsz, self.num_heads, tgt_len, self.head_dim):
- raise ValueError(
- f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
- f" {attn_output.size()}"
- )
- attn_output = attn_output.transpose(1, 2)
- # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
- # partitioned across GPUs when using tensor-parallelism.
- attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
- attn_output = self.out_proj(attn_output)
- return attn_output, None, past_key_value
- HUBERT_ATTENTION_CLASSES = {
- "eager": HubertAttention,
- "sdpa": HubertSdpaAttention,
- "flash_attention_2": HubertFlashAttention2,
- }
- # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeedForward with Wav2Vec2->Hubert
- class HubertFeedForward(nn.Module):
- def __init__(self, config):
- super().__init__()
- self.intermediate_dropout = nn.Dropout(config.activation_dropout)
- self.intermediate_dense = nn.Linear(config.hidden_size, config.intermediate_size)
- if isinstance(config.hidden_act, str):
- self.intermediate_act_fn = ACT2FN[config.hidden_act]
- else:
- self.intermediate_act_fn = config.hidden_act
- self.output_dense = nn.Linear(config.intermediate_size, config.hidden_size)
- self.output_dropout = nn.Dropout(config.hidden_dropout)
- def forward(self, hidden_states):
- hidden_states = self.intermediate_dense(hidden_states)
- hidden_states = self.intermediate_act_fn(hidden_states)
- hidden_states = self.intermediate_dropout(hidden_states)
- hidden_states = self.output_dense(hidden_states)
- hidden_states = self.output_dropout(hidden_states)
- return hidden_states
- # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderLayer with Wav2Vec2->Hubert, WAV2VEC2->HUBERT
- class HubertEncoderLayer(nn.Module):
- def __init__(self, config):
- super().__init__()
- self.attention = HUBERT_ATTENTION_CLASSES[config._attn_implementation](
- embed_dim=config.hidden_size,
- num_heads=config.num_attention_heads,
- dropout=config.attention_dropout,
- is_decoder=False,
- )
- self.dropout = nn.Dropout(config.hidden_dropout)
- self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
- self.feed_forward = HubertFeedForward(config)
- self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
- def forward(self, hidden_states, attention_mask=None, output_attentions=False):
- attn_residual = hidden_states
- hidden_states, attn_weights, _ = self.attention(
- hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
- )
- hidden_states = self.dropout(hidden_states)
- hidden_states = attn_residual + hidden_states
- hidden_states = self.layer_norm(hidden_states)
- hidden_states = hidden_states + self.feed_forward(hidden_states)
- hidden_states = self.final_layer_norm(hidden_states)
- outputs = (hidden_states,)
- if output_attentions:
- outputs += (attn_weights,)
- return outputs
- # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2AttnAdapterLayer with Wav2Vec2->Hubert
- class HubertAttnAdapterLayer(nn.Module):
- def __init__(self, config):
- """
- Implements adapter modules directly with 3D tensor weight as parameters and without using ModuleList to speed
- up training throughput.
- """
- super().__init__()
- self.input_dim = config.adapter_attn_dim
- self.hidden_dim = config.hidden_size
- self.norm = nn.LayerNorm(self.hidden_dim)
- self.linear_1 = nn.Linear(self.hidden_dim, self.input_dim)
- self.act_fn = nn.ReLU()
- self.linear_2 = nn.Linear(self.input_dim, self.hidden_dim)
- def forward(self, hidden_states: torch.FloatTensor):
- hidden_states = self.norm(hidden_states)
- hidden_states = self.linear_1(hidden_states)
- hidden_states = self.act_fn(hidden_states)
- hidden_states = self.linear_2(hidden_states)
- return hidden_states
- # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderLayerStableLayerNorm with Wav2Vec2->Hubert, WAV2VEC2->HUBERT
- class HubertEncoderLayerStableLayerNorm(nn.Module):
- def __init__(self, config):
- super().__init__()
- self.attention = HUBERT_ATTENTION_CLASSES[config._attn_implementation](
- embed_dim=config.hidden_size,
- num_heads=config.num_attention_heads,
- dropout=config.attention_dropout,
- is_decoder=False,
- )
- self.dropout = nn.Dropout(config.hidden_dropout)
- self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
- self.feed_forward = HubertFeedForward(config)
- self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
- if getattr(config, "adapter_attn_dim", None) is not None:
- self.adapter_layer = HubertAttnAdapterLayer(config)
- else:
- self.adapter_layer = None
- def forward(
- self,
- hidden_states: torch.Tensor,
- attention_mask: Optional[torch.Tensor] = None,
- output_attentions: bool = False,
- ):
- attn_residual = hidden_states
- hidden_states = self.layer_norm(hidden_states)
- hidden_states, attn_weights, _ = self.attention(
- hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
- )
- hidden_states = self.dropout(hidden_states)
- hidden_states = attn_residual + hidden_states
- hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states))
- if self.adapter_layer is not None:
- hidden_states = hidden_states + self.adapter_layer(hidden_states)
- outputs = (hidden_states,)
- if output_attentions:
- outputs += (attn_weights,)
- return outputs
- # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Encoder with Wav2Vec2->Hubert
- class HubertEncoder(nn.Module):
- def __init__(self, config):
- super().__init__()
- self.config = config
- self.pos_conv_embed = HubertPositionalConvEmbedding(config)
- self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
- self.dropout = nn.Dropout(config.hidden_dropout)
- self.layers = nn.ModuleList([HubertEncoderLayer(config) for _ in range(config.num_hidden_layers)])
- self.gradient_checkpointing = False
- self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
- def forward(
- self,
- hidden_states: torch.tensor,
- attention_mask: Optional[torch.Tensor] = None,
- output_attentions: bool = False,
- output_hidden_states: bool = False,
- return_dict: bool = True,
- ):
- all_hidden_states = () if output_hidden_states else None
- all_self_attentions = () if output_attentions else None
- if attention_mask is not None:
- # make sure padded tokens output 0
- expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2])
- hidden_states[~expand_attention_mask] = 0
- if self._use_flash_attention_2:
- # 2d mask is passed through the layers
- attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
- else:
- # extend attention_mask
- attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)
- attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min
- attention_mask = attention_mask.expand(
- attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]
- )
- position_embeddings = self.pos_conv_embed(hidden_states)
- hidden_states = hidden_states + position_embeddings
- hidden_states = self.layer_norm(hidden_states)
- hidden_states = self.dropout(hidden_states)
- synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self)
- for layer in self.layers:
- if output_hidden_states:
- all_hidden_states = all_hidden_states + (hidden_states,)
- # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
- dropout_probability = torch.rand([])
- skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False
- if not skip_the_layer or synced_gpus:
- # under fsdp or deepspeed zero3 all gpus must run in sync
- if self.gradient_checkpointing and self.training:
- layer_outputs = self._gradient_checkpointing_func(
- layer.__call__,
- hidden_states,
- attention_mask,
- output_attentions,
- )
- else:
- layer_outputs = layer(
- hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
- )
- hidden_states = layer_outputs[0]
- if skip_the_layer:
- layer_outputs = (None, None)
- if output_attentions:
- all_self_attentions = all_self_attentions + (layer_outputs[1],)
- if output_hidden_states:
- all_hidden_states = all_hidden_states + (hidden_states,)
- if not return_dict:
- return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
- return BaseModelOutput(
- last_hidden_state=hidden_states,
- hidden_states=all_hidden_states,
- attentions=all_self_attentions,
- )
- # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2EncoderStableLayerNorm with Wav2Vec2->Hubert
- class HubertEncoderStableLayerNorm(nn.Module):
- def __init__(self, config):
- super().__init__()
- self.config = config
- self.pos_conv_embed = HubertPositionalConvEmbedding(config)
- self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
- self.dropout = nn.Dropout(config.hidden_dropout)
- self.layers = nn.ModuleList(
- [HubertEncoderLayerStableLayerNorm(config) for _ in range(config.num_hidden_layers)]
- )
- self.gradient_checkpointing = False
- self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
- def forward(
- self,
- hidden_states,
- attention_mask=None,
- output_attentions=False,
- output_hidden_states=False,
- return_dict=True,
- ):
- all_hidden_states = () if output_hidden_states else None
- all_self_attentions = () if output_attentions else None
- if attention_mask is not None:
- # make sure padded tokens are not attended to
- expand_attention_mask = attention_mask.unsqueeze(-1).repeat(1, 1, hidden_states.shape[2])
- hidden_states = hidden_states * expand_attention_mask.to(dtype=hidden_states.dtype)
- if self._use_flash_attention_2:
- # 2d mask is passed through the layers
- attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
- else:
- # extend attention_mask
- attention_mask = 1.0 - attention_mask[:, None, None, :].to(dtype=hidden_states.dtype)
- attention_mask = attention_mask * torch.finfo(hidden_states.dtype).min
- attention_mask = attention_mask.expand(
- attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1]
- )
- position_embeddings = self.pos_conv_embed(hidden_states)
- hidden_states = hidden_states + position_embeddings
- hidden_states = self.dropout(hidden_states)
- synced_gpus = is_deepspeed_zero3_enabled() or is_fsdp_managed_module(self)
- for layer in self.layers:
- if output_hidden_states:
- all_hidden_states = all_hidden_states + (hidden_states,)
- # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
- dropout_probability = torch.rand([])
- skip_the_layer = True if self.training and (dropout_probability < self.config.layerdrop) else False
- if not skip_the_layer or synced_gpus:
- # under fsdp or deepspeed zero3 all gpus must run in sync
- # XXX: could optimize this like synced_gpus in generate_utils but not sure if it's worth the code complication
- if self.gradient_checkpointing and self.training:
- layer_outputs = self._gradient_checkpointing_func(
- layer.__call__,
- hidden_states,
- attention_mask,
- output_attentions,
- )
- else:
- layer_outputs = layer(
- hidden_states, attention_mask=attention_mask, output_attentions=output_attentions
- )
- hidden_states = layer_outputs[0]
- if skip_the_layer:
- layer_outputs = (None, None)
- if output_attentions:
- all_self_attentions = all_self_attentions + (layer_outputs[1],)
- hidden_states = self.layer_norm(hidden_states)
- if output_hidden_states:
- all_hidden_states = all_hidden_states + (hidden_states,)
- if not return_dict:
- return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
- return BaseModelOutput(
- last_hidden_state=hidden_states,
- hidden_states=all_hidden_states,
- attentions=all_self_attentions,
- )
- class HubertPreTrainedModel(PreTrainedModel):
- """
- An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
- models.
- """
- config_class = HubertConfig
- base_model_prefix = "hubert"
- main_input_name = "input_values"
- supports_gradient_checkpointing = True
- _supports_flash_attn_2 = True
- _supports_sdpa = True
- def _init_weights(self, module):
- """Initialize the weights"""
- if isinstance(module, nn.Linear):
- # Slightly different from the TF version which uses truncated_normal for initialization
- # cf https://github.com/pytorch/pytorch/pull/5617
- module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
- elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)):
- module.bias.data.zero_()
- module.weight.data.fill_(1.0)
- elif isinstance(module, nn.Conv1d):
- if is_deepspeed_zero3_enabled():
- import deepspeed
- if hasattr(module, "weight_v") and hasattr(module, "weight_g"):
- with deepspeed.zero.GatheredParameters([module.weight_v, module.weight_g], modifier_rank=0):
- nn.init.kaiming_normal_(module.weight.data)
- else:
- with deepspeed.zero.GatheredParameters(module.weight, modifier_rank=0):
- nn.init.kaiming_normal_(module.weight.data)
- else:
- nn.init.kaiming_normal_(module.weight.data)
- if isinstance(module, (nn.Linear, nn.Conv1d)) and module.bias is not None:
- module.bias.data.zero_()
- def _get_feat_extract_output_lengths(self, input_lengths: Union[torch.LongTensor, int]):
- """
- Computes the output length of the convolutional layers
- """
- def _conv_out_length(input_length, kernel_size, stride):
- # 1D convolutional layer output length formula taken
- # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
- return torch.div(input_length - kernel_size, stride, rounding_mode="floor") + 1
- for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride):
- input_lengths = _conv_out_length(input_lengths, kernel_size, stride)
- return input_lengths
- def _get_feature_vector_attention_mask(self, feature_vector_length: int, attention_mask: torch.LongTensor):
- output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)
- batch_size = attention_mask.shape[0]
- attention_mask = torch.zeros(
- (batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device
- )
- # these two operations makes sure that all values before the output lengths idxs are attended to
- attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1
- attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
- return attention_mask
- HUBERT_START_DOCSTRING = r"""
- Hubert was proposed in [HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden
- Units](https://arxiv.org/abs/2106.07447) by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia,
- Ruslan Salakhutdinov, Abdelrahman Mohamed.
- This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
- library implements for all its model (such as downloading or saving etc.).
- This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
- it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
- behavior.
- Parameters:
- config ([`HubertConfig`]): Model configuration class with all the parameters of the model.
- Initializing with a config file does not load the weights associated with the model, only the
- configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
- """
- HUBERT_INPUTS_DOCSTRING = r"""
- Args:
- input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
- Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file
- into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install
- soundfile`). To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and
- conversion into a tensor of type `torch.FloatTensor`. See [`Wav2Vec2Processor.__call__`] for details.
- attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
- Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0,
- 1]`:
- - 1 for tokens that are **not masked**,
- - 0 for tokens that are **masked**.
- [What are attention masks?](../glossary#attention-mask)
- <Tip warning={true}>
- `attention_mask` should only be passed if the corresponding processor has `config.return_attention_mask ==
- True`. For all models whose processor has `config.return_attention_mask == False`, such as
- [hubert-base](https://huggingface.co/facebook/hubert-base-ls960), `attention_mask` should **not** be passed
- to avoid degraded performance when doing batched inference. For such models `input_values` should simply be
- padded with 0 and passed without `attention_mask`. Be aware that these models also yield slightly different
- results depending on whether `input_values` is padded or not.
- </Tip>
- output_attentions (`bool`, *optional*):
- Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
- tensors for more detail.
- output_hidden_states (`bool`, *optional*):
- Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
- more detail.
- return_dict (`bool`, *optional*):
- Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
- """
- @add_start_docstrings(
- "The bare Hubert Model transformer outputting raw hidden-states without any specific head on top.",
- HUBERT_START_DOCSTRING,
- )
- class HubertModel(HubertPreTrainedModel):
- def __init__(self, config: HubertConfig):
- super().__init__(config)
- self.config = config
- self.feature_extractor = HubertFeatureEncoder(config)
- self.feature_projection = HubertFeatureProjection(config)
- if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0:
- self.masked_spec_embed = nn.Parameter(torch.Tensor(config.hidden_size).uniform_())
- if config.do_stable_layer_norm:
- self.encoder = HubertEncoderStableLayerNorm(config)
- else:
- self.encoder = HubertEncoder(config)
- # Initialize weights and apply final processing
- self.post_init()
- # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model._mask_hidden_states
- def _mask_hidden_states(
- self,
- hidden_states: torch.FloatTensor,
- mask_time_indices: Optional[torch.FloatTensor] = None,
- attention_mask: Optional[torch.LongTensor] = None,
- ):
- """
- Masks extracted features along time axis and/or along feature axis according to
- [SpecAugment](https://arxiv.org/abs/1904.08779).
- """
- # `config.apply_spec_augment` can set masking to False
- if not getattr(self.config, "apply_spec_augment", True):
- return hidden_states
- # generate indices & apply SpecAugment along time axis
- batch_size, sequence_length, hidden_size = hidden_states.size()
- if mask_time_indices is not None:
- # apply SpecAugment along time axis with given mask_time_indices
- hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
- elif self.config.mask_time_prob > 0 and self.training:
- mask_time_indices = _compute_mask_indices(
- (batch_size, sequence_length),
- mask_prob=self.config.mask_time_prob,
- mask_length=self.config.mask_time_length,
- attention_mask=attention_mask,
- min_masks=self.config.mask_time_min_masks,
- )
- mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool)
- hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype)
- if self.config.mask_feature_prob > 0 and self.training:
- # generate indices & apply SpecAugment along feature axis
- mask_feature_indices = _compute_mask_indices(
- (batch_size, hidden_size),
- mask_prob=self.config.mask_feature_prob,
- mask_length=self.config.mask_feature_length,
- min_masks=self.config.mask_feature_min_masks,
- )
- mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool)
- mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1)
- hidden_states[mask_feature_indices] = 0
- return hidden_states
- @add_start_docstrings_to_model_forward(HUBERT_INPUTS_DOCSTRING)
- @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC)
- def forward(
- self,
- input_values: Optional[torch.Tensor],
- attention_mask: Optional[torch.Tensor] = None,
- mask_time_indices: Optional[torch.FloatTensor] = None,
- output_attentions: Optional[bool] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- ) -> Union[Tuple, BaseModelOutput]:
- """
- Returns:
- Example:
- ```python
- >>> from transformers import AutoProcessor, HubertModel
- >>> from datasets import load_dataset
- >>> import soundfile as sf
- >>> processor = AutoProcessor.from_pretrained("facebook/hubert-large-ls960-ft")
- >>> model = HubertModel.from_pretrained("facebook/hubert-large-ls960-ft")
- >>> def map_to_array(batch):
- ... speech, _ = sf.read(batch["file"])
- ... batch["speech"] = speech
- ... return batch
- >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
- >>> ds = ds.map(map_to_array)
- >>> input_values = processor(ds["speech"][0], return_tensors="pt").input_values # Batch size 1
- >>> hidden_states = model(input_values).last_hidden_state
- ```"""
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
- output_hidden_states = (
- output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
- )
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
- extract_features = self.feature_extractor(input_values)
- extract_features = extract_features.transpose(1, 2)
- if attention_mask is not None:
- # compute reduced attention_mask corresponding to feature vectors
- attention_mask = self._get_feature_vector_attention_mask(extract_features.shape[1], attention_mask)
- hidden_states = self.feature_projection(extract_features)
- hidden_states = self._mask_hidden_states(hidden_states, mask_time_indices=mask_time_indices)
- encoder_outputs = self.encoder(
- hidden_states,
- attention_mask=attention_mask,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
- hidden_states = encoder_outputs[0]
- if not return_dict:
- return (hidden_states,) + encoder_outputs[1:]
- return BaseModelOutput(
- last_hidden_state=hidden_states,
- hidden_states=encoder_outputs.hidden_states,
- attentions=encoder_outputs.attentions,
- )
- @add_start_docstrings(
- """Hubert Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""",
- HUBERT_START_DOCSTRING,
- )
- # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForCTC with Wav2Vec2->Hubert, wav2vec2->hubert, WAV_2_VEC_2->HUBERT
- class HubertForCTC(HubertPreTrainedModel):
- def __init__(self, config, target_lang: Optional[str] = None):
- super().__init__(config)
- self.hubert = HubertModel(config)
- self.dropout = nn.Dropout(config.final_dropout)
- self.target_lang = target_lang
- if config.vocab_size is None:
- raise ValueError(
- f"You are trying to instantiate {self.__class__} with a configuration that "
- "does not define the vocabulary size of the language model head. Please "
- "instantiate the model as follows: `HubertForCTC.from_pretrained(..., vocab_size=vocab_size)`. "
- "or define `vocab_size` of your model's configuration."
- )
- output_hidden_size = (
- config.output_hidden_size if hasattr(config, "add_adapter") and config.add_adapter else config.hidden_size
- )
- self.lm_head = nn.Linear(output_hidden_size, config.vocab_size)
- # Initialize weights and apply final processing
- self.post_init()
- def tie_weights(self):
- """
- This method overwrites [`~PreTrainedModel.tie_weights`] so that adapter weights can be correctly loaded when
- passing `target_lang=...` to `from_pretrained(...)`.
- This method is **not** supposed to be called by the user and is prone to be changed in the future.
- """
- # Note that `tie_weights` is usually used to tie input and output embedding weights. The method is re-purposed to
- # correctly load adapter layers for Hubert so that we do not have to introduce a new API to
- # [`PreTrainedModel`]. While slightly hacky, Hubert never has to tie input and output embeddings, so that it is
- # ok to repurpose this function here.
- target_lang = self.target_lang
- if target_lang is not None and getattr(self.config, "adapter_attn_dim", None) is None:
- raise ValueError(f"Cannot pass `target_lang`: {target_lang} if `config.adapter_attn_dim` is not defined.")
- elif target_lang is None and getattr(self.config, "adapter_attn_dim", None) is not None:
- logger.info("By default `target_lang` is set to 'eng'.")
- elif target_lang is not None:
- self.load_adapter(target_lang, force_load=True)
- def freeze_feature_extractor(self):
- """
- Calling this function will disable the gradient computation for the feature encoder so that its parameter will
- not be updated during training.
- """
- warnings.warn(
- "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
- "Please use the equivalent `freeze_feature_encoder` method instead.",
- FutureWarning,
- )
- self.freeze_feature_encoder()
- def freeze_feature_encoder(self):
- """
- Calling this function will disable the gradient computation for the feature encoder so that its parameter will
- not be updated during training.
- """
- self.hubert.feature_extractor._freeze_parameters()
- def freeze_base_model(self):
- """
- Calling this function will disable the gradient computation for the base model so that its parameters will not
- be updated during training. Only the classification head will be updated.
- """
- for param in self.hubert.parameters():
- param.requires_grad = False
- @add_start_docstrings_to_model_forward(HUBERT_INPUTS_DOCSTRING)
- @add_code_sample_docstrings(
- checkpoint=_CHECKPOINT_FOR_DOC,
- output_type=CausalLMOutput,
- config_class=_CONFIG_FOR_DOC,
- expected_output=_CTC_EXPECTED_OUTPUT,
- expected_loss=_CTC_EXPECTED_LOSS,
- )
- def forward(
- self,
- input_values: Optional[torch.Tensor],
- attention_mask: Optional[torch.Tensor] = None,
- output_attentions: Optional[bool] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- labels: Optional[torch.Tensor] = None,
- ) -> Union[Tuple, CausalLMOutput]:
- r"""
- labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*):
- Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to
- the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`.
- All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ...,
- config.vocab_size - 1]`.
- """
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
- if labels is not None and labels.max() >= self.config.vocab_size:
- raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}")
- outputs = self.hubert(
- input_values,
- attention_mask=attention_mask,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
- hidden_states = outputs[0]
- hidden_states = self.dropout(hidden_states)
- logits = self.lm_head(hidden_states)
- loss = None
- if labels is not None:
- # retrieve loss input_lengths from attention_mask
- attention_mask = (
- attention_mask if attention_mask is not None else torch.ones_like(input_values, dtype=torch.long)
- )
- input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long)
- # assuming that padded tokens are filled with -100
- # when not being attended to
- labels_mask = labels >= 0
- target_lengths = labels_mask.sum(-1)
- flattened_targets = labels.masked_select(labels_mask)
- # ctc_loss doesn't support fp16
- log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1)
- with torch.backends.cudnn.flags(enabled=False):
- loss = nn.functional.ctc_loss(
- log_probs,
- flattened_targets,
- input_lengths,
- target_lengths,
- blank=self.config.pad_token_id,
- reduction=self.config.ctc_loss_reduction,
- zero_infinity=self.config.ctc_zero_infinity,
- )
- if not return_dict:
- output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
- return ((loss,) + output) if loss is not None else output
- return CausalLMOutput(
- loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions
- )
- @add_start_docstrings(
- """
- Hubert Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like
- SUPERB Keyword Spotting.
- """,
- HUBERT_START_DOCSTRING,
- )
- # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification with Wav2Vec2->Hubert, wav2vec2->hubert, WAV_2_VEC_2->HUBERT
- class HubertForSequenceClassification(HubertPreTrainedModel):
- def __init__(self, config):
- super().__init__(config)
- if hasattr(config, "add_adapter") and config.add_adapter:
- raise ValueError(
- "Sequence classification does not support the use of Hubert adapters (config.add_adapter=True)"
- )
- self.hubert = HubertModel(config)
- num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings
- if config.use_weighted_layer_sum:
- self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers)
- self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size)
- self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels)
- # Initialize weights and apply final processing
- self.post_init()
- def freeze_feature_extractor(self):
- """
- Calling this function will disable the gradient computation for the feature encoder so that its parameters will
- not be updated during training.
- """
- warnings.warn(
- "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5. "
- "Please use the equivalent `freeze_feature_encoder` method instead.",
- FutureWarning,
- )
- self.freeze_feature_encoder()
- def freeze_feature_encoder(self):
- """
- Calling this function will disable the gradient computation for the feature encoder so that its parameter will
- not be updated during training.
- """
- self.hubert.feature_extractor._freeze_parameters()
- def freeze_base_model(self):
- """
- Calling this function will disable the gradient computation for the base model so that its parameters will not
- be updated during training. Only the classification head will be updated.
- """
- for param in self.hubert.parameters():
- param.requires_grad = False
- @add_start_docstrings_to_model_forward(HUBERT_INPUTS_DOCSTRING)
- @add_code_sample_docstrings(
- checkpoint=_SEQ_CLASS_CHECKPOINT,
- output_type=SequenceClassifierOutput,
- config_class=_CONFIG_FOR_DOC,
- modality="audio",
- expected_output=_SEQ_CLASS_EXPECTED_OUTPUT,
- expected_loss=_SEQ_CLASS_EXPECTED_LOSS,
- )
- def forward(
- self,
- input_values: Optional[torch.Tensor],
- attention_mask: Optional[torch.Tensor] = None,
- output_attentions: Optional[bool] = None,
- output_hidden_states: Optional[bool] = None,
- return_dict: Optional[bool] = None,
- labels: Optional[torch.Tensor] = None,
- ) -> Union[Tuple, SequenceClassifierOutput]:
- r"""
- labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
- Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
- config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
- `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
- """
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
- output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states
- outputs = self.hubert(
- input_values,
- attention_mask=attention_mask,
- output_attentions=output_attentions,
- output_hidden_states=output_hidden_states,
- return_dict=return_dict,
- )
- if self.config.use_weighted_layer_sum:
- hidden_states = outputs[_HIDDEN_STATES_START_POSITION]
- hidden_states = torch.stack(hidden_states, dim=1)
- norm_weights = nn.functional.softmax(self.layer_weights, dim=-1)
- hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1)
- else:
- hidden_states = outputs[0]
- hidden_states = self.projector(hidden_states)
- if attention_mask is None:
- pooled_output = hidden_states.mean(dim=1)
- else:
- padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask)
- hidden_states[~padding_mask] = 0.0
- pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1)
- logits = self.classifier(pooled_output)
- loss = None
- if labels is not None:
- loss_fct = CrossEntropyLoss()
- loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
- if not return_dict:
- output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:]
- return ((loss,) + output) if loss is not None else output
- return SequenceClassifierOutput(
- loss=loss,
- logits=logits,
- hidden_states=outputs.hidden_states,
- attentions=outputs.attentions,
- )
|