modeling_clap.py 102 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303
  1. # coding=utf-8
  2. # Copyright 2023 The LAION-AI Team and The HuggingFace Team. All rights reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """PyTorch CLAP model."""
  16. import collections
  17. import math
  18. from dataclasses import dataclass
  19. from typing import Any, List, Optional, Tuple, Union
  20. import torch
  21. import torch.nn.functional as F
  22. from torch import nn
  23. from ...activations import ACT2FN
  24. from ...modeling_outputs import (
  25. BaseModelOutputWithPastAndCrossAttentions,
  26. BaseModelOutputWithPooling,
  27. BaseModelOutputWithPoolingAndCrossAttentions,
  28. )
  29. from ...modeling_utils import PreTrainedModel
  30. from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, meshgrid, prune_linear_layer
  31. from ...utils import (
  32. ModelOutput,
  33. add_start_docstrings,
  34. add_start_docstrings_to_model_forward,
  35. logging,
  36. replace_return_docstrings,
  37. torch_int,
  38. )
  39. from .configuration_clap import ClapAudioConfig, ClapConfig, ClapTextConfig
  40. logger = logging.get_logger(__name__)
  41. _CHECKPOINT_FOR_DOC = "laion/clap-htsat-fused"
  42. # Adapted from: https://github.com/LAION-AI/CLAP/blob/6ad05a971ba0622f6acee8c41993e0d02bbed639/src/open_clip/utils.py#L191
  43. def interpolate(hidden_states, ratio):
  44. """
  45. Interpolate data in time domain. This is used to compensate the resolution reduction in downsampling of a CNN.
  46. Args:
  47. hidden_states (`torch.FloatTensor` of shape (batch_size, time_length, classes_num)):
  48. Input hidden states
  49. ratio (`int`):
  50. The ratio of the length of the output to the length of the input.
  51. """
  52. (batch_size, time_length, classes_num) = hidden_states.shape
  53. upsampled = hidden_states[:, :, None, :].repeat(1, 1, ratio, 1)
  54. upsampled = upsampled.reshape(batch_size, time_length * ratio, classes_num)
  55. return upsampled
  56. # Adapted from https://github.com/LAION-AI/CLAP/blob/6ad05a971ba0622f6acee8c41993e0d02bbed639/src/open_clip/htsat.py#L249
  57. def window_partition(hidden_states, window_size):
  58. """
  59. Returns the resized hidden states. The output shape should be `(batch_size * num_windows, window_size, window_size,
  60. num_channels)`
  61. Args:
  62. hidden_states (`torch.FloatTensor` of shape `(batch_size, height, width, num_channels)`):
  63. Input hidden states
  64. window_size (`int`):
  65. Window size
  66. """
  67. batch_size, height, width, num_channels = hidden_states.shape
  68. hidden_states = hidden_states.view(
  69. batch_size, height // window_size, window_size, width // window_size, window_size, num_channels
  70. )
  71. windows = hidden_states.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels)
  72. return windows
  73. # Adapted from https://github.com/LAION-AI/CLAP/blob/6ad05a971ba0622f6acee8c41993e0d02bbed639/src/open_clip/htsat.py#L263
  74. def window_reverse(windows, window_size, height, width):
  75. """
  76. Merges windows to produce higher resolution features.
  77. Args:
  78. windows (`torch.FloatTensor` of shape `(num_windows * batch_size, window_size, window_size, num_channels)`):
  79. Input windows
  80. window_size (`int`):
  81. Window size
  82. height (`int`):
  83. Height of the resized audio
  84. width (`int`):
  85. Width of the resized audio
  86. """
  87. num_channels = windows.shape[-1]
  88. windows = windows.view(-1, height // window_size, width // window_size, window_size, window_size, num_channels)
  89. windows = windows.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, height, width, num_channels)
  90. return windows
  91. # Copied from transformers.models.roberta.modeling_roberta.create_position_ids_from_input_ids
  92. def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
  93. """
  94. Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
  95. are ignored. This is modified from fairseq's `utils.make_positions`.
  96. Args:
  97. x: torch.Tensor x:
  98. Returns: torch.Tensor
  99. """
  100. # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
  101. mask = input_ids.ne(padding_idx).int()
  102. incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
  103. return incremental_indices.long() + padding_idx
  104. # contrastive loss function, adapted from
  105. # https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html#CLIP-loss-function
  106. def contrastive_loss(logits: torch.Tensor) -> torch.Tensor:
  107. labels = torch.arange(len(logits), device=logits.device)
  108. return nn.functional.cross_entropy(logits, labels)
  109. @dataclass
  110. # Copied from transformers.models.clip.modeling_clip.CLIPTextModelOutput with CLIP->Clap
  111. class ClapTextModelOutput(ModelOutput):
  112. """
  113. Base class for text model's outputs that also contains a pooling of the last hidden states.
  114. Args:
  115. text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
  116. The text embeddings obtained by applying the projection layer to the pooler_output.
  117. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
  118. Sequence of hidden-states at the output of the last layer of the model.
  119. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
  120. Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
  121. one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
  122. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
  123. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
  124. Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
  125. sequence_length)`.
  126. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
  127. heads.
  128. """
  129. text_embeds: Optional[torch.FloatTensor] = None
  130. last_hidden_state: torch.FloatTensor = None
  131. hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
  132. attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
  133. @dataclass
  134. class ClapAudioModelOutput(ModelOutput):
  135. """
  136. ClapAudio model output to mimic the output of the original implementation.
  137. Args:
  138. audio_embeds (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
  139. The Audio embeddings obtained by applying the projection layer to the pooler_output.
  140. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
  141. Sequence of hidden-states at the output of the last layer of the model.
  142. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
  143. Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
  144. sequence_length)`.
  145. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
  146. heads.
  147. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
  148. Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
  149. one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
  150. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
  151. """
  152. audio_embeds: Optional[torch.FloatTensor] = None
  153. last_hidden_state: torch.FloatTensor = None
  154. hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
  155. attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
  156. @dataclass
  157. # Copied from transformers.models.clip.modeling_clip.CLIPOutput with CLIP->Clap, vision->audio, Vision->Audio, image->audio
  158. class ClapOutput(ModelOutput):
  159. """
  160. Args:
  161. loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
  162. Contrastive loss for audio-text similarity.
  163. logits_per_audio (`torch.FloatTensor` of shape `(audio_batch_size, text_batch_size)`):
  164. The scaled dot product scores between `audio_embeds` and `text_embeds`. This represents the audio-text
  165. similarity scores.
  166. logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, audio_batch_size)`):
  167. The scaled dot product scores between `text_embeds` and `audio_embeds`. This represents the text-audio
  168. similarity scores.
  169. text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
  170. The text embeddings obtained by applying the projection layer to the pooled output of [`ClapTextModel`].
  171. audio_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
  172. The audio embeddings obtained by applying the projection layer to the pooled output of [`ClapAudioModel`].
  173. text_model_output (`BaseModelOutputWithPooling`):
  174. The output of the [`ClapTextModel`].
  175. audio_model_output (`BaseModelOutputWithPooling`):
  176. The output of the [`ClapAudioModel`].
  177. """
  178. loss: Optional[torch.FloatTensor] = None
  179. logits_per_audio: torch.FloatTensor = None
  180. logits_per_text: torch.FloatTensor = None
  181. text_embeds: torch.FloatTensor = None
  182. audio_embeds: torch.FloatTensor = None
  183. text_model_output: BaseModelOutputWithPooling = None
  184. audio_model_output: BaseModelOutputWithPooling = None
  185. def to_tuple(self) -> Tuple[Any]:
  186. return tuple(
  187. self[k] if k not in ["text_model_output", "audio_model_output"] else getattr(self, k).to_tuple()
  188. for k in self.keys()
  189. )
  190. # Adapted from transformers.models.swin.modeling_swin.SwinDropPath
  191. class ClapDropPath(nn.Module):
  192. """
  193. Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is a slightly
  194. refactored version of the `SwinDropPath` implementation.
  195. """
  196. def __init__(self, drop_prob=None):
  197. super().__init__()
  198. self.drop_prob = drop_prob
  199. def forward(self, hidden_states):
  200. if self.drop_prob == 0.0 or not self.training:
  201. return hidden_states
  202. keep_prob = 1 - self.drop_prob
  203. # work with diff dim tensors, not just 2D ConvNets
  204. shape = (hidden_states.shape[0],) + (1,) * (hidden_states.ndim - 1)
  205. random_tensor = keep_prob + torch.rand(shape, dtype=hidden_states.dtype, device=hidden_states.device)
  206. random_tensor.floor_() # binarize
  207. output = hidden_states.div(keep_prob) * random_tensor
  208. return output
  209. # Adapted from https://github.com/LAION-AI/CLAP/blob/6ad05a971ba0622f6acee8c41993e0d02bbed639/src/open_clip/feature_fusion.py#L133
  210. class ClapAudioAFFBlock(nn.Module):
  211. r"""
  212. ATTENTIONAL FEATURE FUSION Block from CLAP, since in CLAP we are always in 2D mode, it is not needed to implement
  213. the 1D version.
  214. """
  215. def __init__(self, config: ClapAudioConfig):
  216. super().__init__()
  217. channels = config.patch_embeds_hidden_size
  218. downsize_ratio = config.aff_block_r
  219. inter_channels = int(channels // downsize_ratio)
  220. self.local_att = nn.Sequential(
  221. nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
  222. nn.BatchNorm2d(inter_channels),
  223. nn.ReLU(inplace=True),
  224. nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
  225. nn.BatchNorm2d(channels),
  226. )
  227. self.global_att = nn.Sequential(
  228. nn.AdaptiveAvgPool2d(1),
  229. nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
  230. nn.BatchNorm2d(inter_channels),
  231. nn.ReLU(inplace=True),
  232. nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
  233. nn.BatchNorm2d(channels),
  234. )
  235. self.sigmoid = nn.Sigmoid()
  236. def forward(self, hidden_states, residual):
  237. attention_input = hidden_states + residual
  238. fused_layer_output = self.local_att(attention_input) + self.global_att(attention_input)
  239. fused_layer_output = self.sigmoid(fused_layer_output)
  240. output = 2 * hidden_states * fused_layer_output + 2 * residual * (1 - fused_layer_output)
  241. return output
  242. class ClapAudioPatchEmbed(nn.Module):
  243. """
  244. This module converts the hidden states reshaped as an image to patch embeddings ready to be passed to the
  245. Transformer block.
  246. """
  247. def __init__(self, config: ClapAudioConfig):
  248. super().__init__()
  249. img_size = (config.spec_size, config.spec_size) if isinstance(config.spec_size, int) else config.spec_size
  250. patch_size = (
  251. (config.patch_size, config.patch_size) if isinstance(config.patch_size, int) else config.patch_size
  252. )
  253. patch_stride = (
  254. (config.patch_stride, config.patch_stride) if isinstance(config.patch_stride, int) else config.patch_stride
  255. )
  256. self.img_size = img_size
  257. self.patch_stride = patch_stride
  258. self.grid_size = (img_size[0] // patch_stride[0], img_size[1] // patch_stride[1])
  259. self.num_patches = self.grid_size[0] * self.grid_size[1]
  260. self.flatten = config.flatten_patch_embeds
  261. self.enable_fusion = config.enable_fusion
  262. padding = ((patch_size[0] - patch_stride[0]) // 2, (patch_size[1] - patch_stride[1]) // 2)
  263. scale_factor = 4 if (self.enable_fusion) and (config.fusion_type == "channel_map") else 1
  264. self.proj = nn.Conv2d(
  265. config.patch_embed_input_channels * scale_factor,
  266. config.patch_embeds_hidden_size,
  267. kernel_size=patch_size,
  268. stride=patch_stride,
  269. padding=padding,
  270. )
  271. self.norm = nn.LayerNorm(config.patch_embeds_hidden_size) if config.enable_patch_layer_norm else nn.Identity()
  272. if self.enable_fusion:
  273. self.fusion_model = ClapAudioAFFBlock(config)
  274. self.mel_conv2d = nn.Conv2d(
  275. config.patch_embed_input_channels,
  276. config.patch_embeds_hidden_size,
  277. kernel_size=(patch_size[0], patch_size[1] * 3),
  278. stride=(patch_stride[0], patch_stride[1] * 3),
  279. padding=padding,
  280. )
  281. def forward(self, hidden_states, is_longer_idx=None):
  282. if self.enable_fusion:
  283. # retrieve the last mel as we have transposed the input
  284. global_hidden_states = hidden_states[:, 0:1, :, :]
  285. # global processing
  286. batch_size, num_channels, height, width = global_hidden_states.shape
  287. if height != self.img_size[0] or width != self.img_size[1]:
  288. raise ValueError(
  289. f"Input audio size ({height}*{width}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
  290. )
  291. global_hidden_states = self.proj(global_hidden_states)
  292. output_width = global_hidden_states.size(-1)
  293. if len(is_longer_idx) > 0:
  294. # local processing
  295. local_hidden_states = hidden_states[is_longer_idx, 1:, :, :].contiguous()
  296. batch_size, num_channels, height, width = local_hidden_states.shape
  297. local_hidden_states = local_hidden_states.view(batch_size * num_channels, 1, height, width)
  298. local_hidden_states = self.mel_conv2d(local_hidden_states)
  299. _, features, height, width = local_hidden_states.shape
  300. local_hidden_states = local_hidden_states.view(batch_size, num_channels, features, height, width)
  301. local_hidden_states = local_hidden_states.permute((0, 2, 3, 1, 4)).contiguous().flatten(3)
  302. local_width = local_hidden_states.size(-1)
  303. local_hidden_states = torch.nn.functional.pad(
  304. local_hidden_states, (0, output_width - local_width), "constant", 0
  305. )
  306. global_hidden_states[is_longer_idx] = self.fusion_model(
  307. global_hidden_states[is_longer_idx], local_hidden_states
  308. )
  309. hidden_states = global_hidden_states
  310. else:
  311. _, _, height, width = hidden_states.shape
  312. if height != self.img_size[0] or width != self.img_size[1]:
  313. raise ValueError(
  314. f"Input audio size ({height}*{width}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
  315. )
  316. hidden_states = self.proj(hidden_states)
  317. if self.flatten:
  318. hidden_states = hidden_states.flatten(2).transpose(1, 2)
  319. hidden_states = self.norm(hidden_states)
  320. return hidden_states
  321. # Copied from transformers.models.swin.modeling_swin.SwinSelfAttention with Swin->ClapAudio
  322. class ClapAudioSelfAttention(nn.Module):
  323. def __init__(self, config, dim, num_heads, window_size):
  324. super().__init__()
  325. if dim % num_heads != 0:
  326. raise ValueError(
  327. f"The hidden size ({dim}) is not a multiple of the number of attention heads ({num_heads})"
  328. )
  329. self.num_attention_heads = num_heads
  330. self.attention_head_size = int(dim / num_heads)
  331. self.all_head_size = self.num_attention_heads * self.attention_head_size
  332. self.window_size = (
  333. window_size if isinstance(window_size, collections.abc.Iterable) else (window_size, window_size)
  334. )
  335. self.relative_position_bias_table = nn.Parameter(
  336. torch.zeros((2 * self.window_size[0] - 1) * (2 * self.window_size[1] - 1), num_heads)
  337. )
  338. # get pair-wise relative position index for each token inside the window
  339. coords_h = torch.arange(self.window_size[0])
  340. coords_w = torch.arange(self.window_size[1])
  341. coords = torch.stack(meshgrid([coords_h, coords_w], indexing="ij"))
  342. coords_flatten = torch.flatten(coords, 1)
  343. relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]
  344. relative_coords = relative_coords.permute(1, 2, 0).contiguous()
  345. relative_coords[:, :, 0] += self.window_size[0] - 1
  346. relative_coords[:, :, 1] += self.window_size[1] - 1
  347. relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
  348. relative_position_index = relative_coords.sum(-1)
  349. self.register_buffer("relative_position_index", relative_position_index)
  350. self.query = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
  351. self.key = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
  352. self.value = nn.Linear(self.all_head_size, self.all_head_size, bias=config.qkv_bias)
  353. self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
  354. def transpose_for_scores(self, x):
  355. new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
  356. x = x.view(new_x_shape)
  357. return x.permute(0, 2, 1, 3)
  358. def forward(
  359. self,
  360. hidden_states: torch.Tensor,
  361. attention_mask: Optional[torch.FloatTensor] = None,
  362. head_mask: Optional[torch.FloatTensor] = None,
  363. output_attentions: Optional[bool] = False,
  364. ) -> Tuple[torch.Tensor]:
  365. batch_size, dim, num_channels = hidden_states.shape
  366. mixed_query_layer = self.query(hidden_states)
  367. key_layer = self.transpose_for_scores(self.key(hidden_states))
  368. value_layer = self.transpose_for_scores(self.value(hidden_states))
  369. query_layer = self.transpose_for_scores(mixed_query_layer)
  370. # Take the dot product between "query" and "key" to get the raw attention scores.
  371. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
  372. attention_scores = attention_scores / math.sqrt(self.attention_head_size)
  373. relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)]
  374. relative_position_bias = relative_position_bias.view(
  375. self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1
  376. )
  377. relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous()
  378. attention_scores = attention_scores + relative_position_bias.unsqueeze(0)
  379. if attention_mask is not None:
  380. # Apply the attention mask is (precomputed for all layers in ClapAudioModel forward() function)
  381. mask_shape = attention_mask.shape[0]
  382. attention_scores = attention_scores.view(
  383. batch_size // mask_shape, mask_shape, self.num_attention_heads, dim, dim
  384. )
  385. attention_scores = attention_scores + attention_mask.unsqueeze(1).unsqueeze(0)
  386. attention_scores = attention_scores.view(-1, self.num_attention_heads, dim, dim)
  387. # Normalize the attention scores to probabilities.
  388. attention_probs = nn.functional.softmax(attention_scores, dim=-1)
  389. # This is actually dropping out entire tokens to attend to, which might
  390. # seem a bit unusual, but is taken from the original Transformer paper.
  391. attention_probs = self.dropout(attention_probs)
  392. # Mask heads if we want to
  393. if head_mask is not None:
  394. attention_probs = attention_probs * head_mask
  395. context_layer = torch.matmul(attention_probs, value_layer)
  396. context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
  397. new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
  398. context_layer = context_layer.view(new_context_layer_shape)
  399. outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
  400. return outputs
  401. # Copied from transformers.models.swin.modeling_swin.SwinSelfOutput with Swin->ClapAudio
  402. class ClapAudioSelfOutput(nn.Module):
  403. def __init__(self, config, dim):
  404. super().__init__()
  405. self.dense = nn.Linear(dim, dim)
  406. self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
  407. def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
  408. hidden_states = self.dense(hidden_states)
  409. hidden_states = self.dropout(hidden_states)
  410. return hidden_states
  411. # Copied from transformers.models.swin.modeling_swin.SwinAttention with Swin->ClapAudio
  412. class ClapAudioAttention(nn.Module):
  413. def __init__(self, config, dim, num_heads, window_size):
  414. super().__init__()
  415. self.self = ClapAudioSelfAttention(config, dim, num_heads, window_size)
  416. self.output = ClapAudioSelfOutput(config, dim)
  417. self.pruned_heads = set()
  418. def prune_heads(self, heads):
  419. if len(heads) == 0:
  420. return
  421. heads, index = find_pruneable_heads_and_indices(
  422. heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
  423. )
  424. # Prune linear layers
  425. self.self.query = prune_linear_layer(self.self.query, index)
  426. self.self.key = prune_linear_layer(self.self.key, index)
  427. self.self.value = prune_linear_layer(self.self.value, index)
  428. self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
  429. # Update hyper params and store pruned heads
  430. self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
  431. self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
  432. self.pruned_heads = self.pruned_heads.union(heads)
  433. def forward(
  434. self,
  435. hidden_states: torch.Tensor,
  436. attention_mask: Optional[torch.FloatTensor] = None,
  437. head_mask: Optional[torch.FloatTensor] = None,
  438. output_attentions: Optional[bool] = False,
  439. ) -> Tuple[torch.Tensor]:
  440. self_outputs = self.self(hidden_states, attention_mask, head_mask, output_attentions)
  441. attention_output = self.output(self_outputs[0], hidden_states)
  442. outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
  443. return outputs
  444. # Copied from transformers.models.swin.modeling_swin.SwinIntermediate with Swin->ClapAudio
  445. class ClapAudioIntermediate(nn.Module):
  446. def __init__(self, config, dim):
  447. super().__init__()
  448. self.dense = nn.Linear(dim, int(config.mlp_ratio * dim))
  449. if isinstance(config.hidden_act, str):
  450. self.intermediate_act_fn = ACT2FN[config.hidden_act]
  451. else:
  452. self.intermediate_act_fn = config.hidden_act
  453. def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
  454. hidden_states = self.dense(hidden_states)
  455. hidden_states = self.intermediate_act_fn(hidden_states)
  456. return hidden_states
  457. # Copied from transformers.models.swin.modeling_swin.SwinOutput with Swin->ClapAudio
  458. class ClapAudioOutput(nn.Module):
  459. def __init__(self, config, dim):
  460. super().__init__()
  461. self.dense = nn.Linear(int(config.mlp_ratio * dim), dim)
  462. self.dropout = nn.Dropout(config.hidden_dropout_prob)
  463. def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
  464. hidden_states = self.dense(hidden_states)
  465. hidden_states = self.dropout(hidden_states)
  466. return hidden_states
  467. # Copied from transformers.models.swin.modeling_swin.SwinLayer with SwinDropPath->ClapDropPath, Swin->ClapAudio
  468. class ClapAudioLayer(nn.Module):
  469. def __init__(self, config, dim, input_resolution, num_heads, shift_size=0):
  470. super().__init__()
  471. self.chunk_size_feed_forward = config.chunk_size_feed_forward
  472. self.shift_size = shift_size
  473. self.window_size = config.window_size
  474. self.input_resolution = input_resolution
  475. self.layernorm_before = nn.LayerNorm(dim, eps=config.layer_norm_eps)
  476. self.attention = ClapAudioAttention(config, dim, num_heads, window_size=self.window_size)
  477. self.drop_path = ClapDropPath(config.drop_path_rate) if config.drop_path_rate > 0.0 else nn.Identity()
  478. self.layernorm_after = nn.LayerNorm(dim, eps=config.layer_norm_eps)
  479. self.intermediate = ClapAudioIntermediate(config, dim)
  480. self.output = ClapAudioOutput(config, dim)
  481. def set_shift_and_window_size(self, input_resolution):
  482. if min(input_resolution) <= self.window_size:
  483. # if window size is larger than input resolution, we don't partition windows
  484. self.shift_size = torch_int(0)
  485. self.window_size = (
  486. torch.min(torch.tensor(input_resolution)) if torch.jit.is_tracing() else min(input_resolution)
  487. )
  488. def get_attn_mask(self, height, width, dtype, device):
  489. if self.shift_size > 0:
  490. # calculate attention mask for SW-MSA
  491. img_mask = torch.zeros((1, height, width, 1), dtype=dtype, device=device)
  492. height_slices = (
  493. slice(0, -self.window_size),
  494. slice(-self.window_size, -self.shift_size),
  495. slice(-self.shift_size, None),
  496. )
  497. width_slices = (
  498. slice(0, -self.window_size),
  499. slice(-self.window_size, -self.shift_size),
  500. slice(-self.shift_size, None),
  501. )
  502. count = 0
  503. for height_slice in height_slices:
  504. for width_slice in width_slices:
  505. img_mask[:, height_slice, width_slice, :] = count
  506. count += 1
  507. mask_windows = window_partition(img_mask, self.window_size)
  508. mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
  509. attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
  510. attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
  511. else:
  512. attn_mask = None
  513. return attn_mask
  514. def maybe_pad(self, hidden_states, height, width):
  515. pad_right = (self.window_size - width % self.window_size) % self.window_size
  516. pad_bottom = (self.window_size - height % self.window_size) % self.window_size
  517. pad_values = (0, 0, 0, pad_right, 0, pad_bottom)
  518. hidden_states = nn.functional.pad(hidden_states, pad_values)
  519. return hidden_states, pad_values
  520. def forward(
  521. self,
  522. hidden_states: torch.Tensor,
  523. input_dimensions: Tuple[int, int],
  524. head_mask: Optional[torch.FloatTensor] = None,
  525. output_attentions: Optional[bool] = False,
  526. always_partition: Optional[bool] = False,
  527. ) -> Tuple[torch.Tensor, torch.Tensor]:
  528. if not always_partition:
  529. self.set_shift_and_window_size(input_dimensions)
  530. else:
  531. pass
  532. height, width = input_dimensions
  533. batch_size, _, channels = hidden_states.size()
  534. shortcut = hidden_states
  535. hidden_states = self.layernorm_before(hidden_states)
  536. hidden_states = hidden_states.view(batch_size, height, width, channels)
  537. # pad hidden_states to multiples of window size
  538. hidden_states, pad_values = self.maybe_pad(hidden_states, height, width)
  539. _, height_pad, width_pad, _ = hidden_states.shape
  540. # cyclic shift
  541. if self.shift_size > 0:
  542. shifted_hidden_states = torch.roll(hidden_states, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
  543. else:
  544. shifted_hidden_states = hidden_states
  545. # partition windows
  546. hidden_states_windows = window_partition(shifted_hidden_states, self.window_size)
  547. hidden_states_windows = hidden_states_windows.view(-1, self.window_size * self.window_size, channels)
  548. attn_mask = self.get_attn_mask(
  549. height_pad, width_pad, dtype=hidden_states.dtype, device=hidden_states_windows.device
  550. )
  551. attention_outputs = self.attention(
  552. hidden_states_windows, attn_mask, head_mask, output_attentions=output_attentions
  553. )
  554. attention_output = attention_outputs[0]
  555. attention_windows = attention_output.view(-1, self.window_size, self.window_size, channels)
  556. shifted_windows = window_reverse(attention_windows, self.window_size, height_pad, width_pad)
  557. # reverse cyclic shift
  558. if self.shift_size > 0:
  559. attention_windows = torch.roll(shifted_windows, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
  560. else:
  561. attention_windows = shifted_windows
  562. was_padded = pad_values[3] > 0 or pad_values[5] > 0
  563. if was_padded:
  564. attention_windows = attention_windows[:, :height, :width, :].contiguous()
  565. attention_windows = attention_windows.view(batch_size, height * width, channels)
  566. hidden_states = shortcut + self.drop_path(attention_windows)
  567. layer_output = self.layernorm_after(hidden_states)
  568. layer_output = self.intermediate(layer_output)
  569. layer_output = hidden_states + self.output(layer_output)
  570. layer_outputs = (layer_output, attention_outputs[1]) if output_attentions else (layer_output,)
  571. return layer_outputs
  572. # Copied from transformers.models.swin.modeling_swin.SwinStage with Swin->ClapAudio
  573. class ClapAudioStage(nn.Module):
  574. def __init__(self, config, dim, input_resolution, depth, num_heads, drop_path, downsample):
  575. super().__init__()
  576. self.config = config
  577. self.dim = dim
  578. self.blocks = nn.ModuleList(
  579. [
  580. ClapAudioLayer(
  581. config=config,
  582. dim=dim,
  583. input_resolution=input_resolution,
  584. num_heads=num_heads,
  585. shift_size=0 if (i % 2 == 0) else config.window_size // 2,
  586. )
  587. for i in range(depth)
  588. ]
  589. )
  590. # patch merging layer
  591. if downsample is not None:
  592. self.downsample = downsample(input_resolution, dim=dim, norm_layer=nn.LayerNorm)
  593. else:
  594. self.downsample = None
  595. self.pointing = False
  596. def forward(
  597. self,
  598. hidden_states: torch.Tensor,
  599. input_dimensions: Tuple[int, int],
  600. head_mask: Optional[torch.FloatTensor] = None,
  601. output_attentions: Optional[bool] = False,
  602. always_partition: Optional[bool] = False,
  603. ) -> Tuple[torch.Tensor]:
  604. height, width = input_dimensions
  605. for i, layer_module in enumerate(self.blocks):
  606. layer_head_mask = head_mask[i] if head_mask is not None else None
  607. layer_outputs = layer_module(
  608. hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition
  609. )
  610. hidden_states = layer_outputs[0]
  611. hidden_states_before_downsampling = hidden_states
  612. if self.downsample is not None:
  613. height_downsampled, width_downsampled = (height + 1) // 2, (width + 1) // 2
  614. output_dimensions = (height, width, height_downsampled, width_downsampled)
  615. hidden_states = self.downsample(hidden_states_before_downsampling, input_dimensions)
  616. else:
  617. output_dimensions = (height, width, height, width)
  618. stage_outputs = (hidden_states, hidden_states_before_downsampling, output_dimensions)
  619. if output_attentions:
  620. stage_outputs += layer_outputs[1:]
  621. return stage_outputs
  622. # Copied from transformers.models.swin.modeling_swin.SwinPatchMerging with Swin->ClapAudio
  623. class ClapAudioPatchMerging(nn.Module):
  624. """
  625. Patch Merging Layer.
  626. Args:
  627. input_resolution (`Tuple[int]`):
  628. Resolution of input feature.
  629. dim (`int`):
  630. Number of input channels.
  631. norm_layer (`nn.Module`, *optional*, defaults to `nn.LayerNorm`):
  632. Normalization layer class.
  633. """
  634. def __init__(self, input_resolution: Tuple[int], dim: int, norm_layer: nn.Module = nn.LayerNorm) -> None:
  635. super().__init__()
  636. self.input_resolution = input_resolution
  637. self.dim = dim
  638. self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
  639. self.norm = norm_layer(4 * dim)
  640. def maybe_pad(self, input_feature, height, width):
  641. should_pad = (height % 2 == 1) or (width % 2 == 1)
  642. if should_pad:
  643. pad_values = (0, 0, 0, width % 2, 0, height % 2)
  644. input_feature = nn.functional.pad(input_feature, pad_values)
  645. return input_feature
  646. def forward(self, input_feature: torch.Tensor, input_dimensions: Tuple[int, int]) -> torch.Tensor:
  647. height, width = input_dimensions
  648. # `dim` is height * width
  649. batch_size, dim, num_channels = input_feature.shape
  650. input_feature = input_feature.view(batch_size, height, width, num_channels)
  651. # pad input to be disible by width and height, if needed
  652. input_feature = self.maybe_pad(input_feature, height, width)
  653. # [batch_size, height/2, width/2, num_channels]
  654. input_feature_0 = input_feature[:, 0::2, 0::2, :]
  655. # [batch_size, height/2, width/2, num_channels]
  656. input_feature_1 = input_feature[:, 1::2, 0::2, :]
  657. # [batch_size, height/2, width/2, num_channels]
  658. input_feature_2 = input_feature[:, 0::2, 1::2, :]
  659. # [batch_size, height/2, width/2, num_channels]
  660. input_feature_3 = input_feature[:, 1::2, 1::2, :]
  661. # batch_size height/2 width/2 4*num_channels
  662. input_feature = torch.cat([input_feature_0, input_feature_1, input_feature_2, input_feature_3], -1)
  663. input_feature = input_feature.view(batch_size, -1, 4 * num_channels) # batch_size height/2*width/2 4*C
  664. input_feature = self.norm(input_feature)
  665. input_feature = self.reduction(input_feature)
  666. return input_feature
  667. class ClapAudioEncoder(nn.Module):
  668. def __init__(self, config):
  669. super().__init__()
  670. self.num_layers = len(config.depths)
  671. self.config = config
  672. self.patch_embed = ClapAudioPatchEmbed(config)
  673. self.enable_fusion = config.enable_fusion
  674. self.patch_stride = self.patch_embed.patch_stride
  675. self.spec_size = config.spec_size
  676. self.freq_ratio = config.spec_size // config.num_mel_bins
  677. self.num_features = int(config.patch_embeds_hidden_size * 2 ** (self.num_layers - 1))
  678. drop_path_rate = [x.item() for x in torch.linspace(0, config.drop_path_rate, sum(config.depths))]
  679. grid_size = self.patch_embed.grid_size
  680. self.input_resolutions = [(grid_size[0] // (2**i), grid_size[1] // (2**i)) for i in range(self.num_layers)]
  681. self.layers = nn.ModuleList(
  682. [
  683. ClapAudioStage(
  684. config=config,
  685. dim=int(config.patch_embeds_hidden_size * 2**i_layer),
  686. input_resolution=self.input_resolutions[i_layer],
  687. depth=config.depths[i_layer],
  688. num_heads=config.num_attention_heads[i_layer],
  689. drop_path=drop_path_rate[sum(config.depths[:i_layer]) : sum(config.depths[: i_layer + 1])],
  690. downsample=ClapAudioPatchMerging if (i_layer < self.num_layers - 1) else None,
  691. )
  692. for i_layer in range(self.num_layers)
  693. ]
  694. )
  695. self.gradient_checkpointing = False
  696. self.batch_norm = nn.BatchNorm2d(config.num_mel_bins)
  697. self.norm = nn.LayerNorm(self.num_features)
  698. self.depths = config.depths
  699. self.avgpool = nn.AdaptiveAvgPool1d(1)
  700. def reshape_mel2img(self, normalized_input_features):
  701. """
  702. The input is 4 normalized log mel spectrograms. It is reshape to the common shape of images. Each channel
  703. should represent 1 of the 4 crops of the spectrogram. For more details, refer to the [`ClapFeatureExtractor`].
  704. """
  705. _, _, time_length, freq_length = normalized_input_features.shape
  706. spec_width = int(self.spec_size * self.freq_ratio)
  707. spec_heigth = self.spec_size // self.freq_ratio
  708. if time_length > spec_width or freq_length > spec_heigth:
  709. raise ValueError("the wav size should be less than or equal to the swin input size")
  710. # to avoid bicubic zero error
  711. if time_length < spec_width:
  712. normalized_input_features = nn.functional.interpolate(
  713. normalized_input_features, (spec_width, freq_length), mode="bicubic", align_corners=True
  714. )
  715. if freq_length < spec_heigth:
  716. normalized_input_features = nn.functional.interpolate(
  717. normalized_input_features, (time_length, spec_heigth), mode="bicubic", align_corners=True
  718. )
  719. batch, channels, time, freq = normalized_input_features.shape
  720. # batch_size, channels, spec_width, spec_heigth --> batch_size, channels, spec_heigth * freq_ratio, spec_width // freq_ratio
  721. normalized_input_features = normalized_input_features.reshape(
  722. batch, channels * self.freq_ratio, time // self.freq_ratio, freq
  723. )
  724. normalized_input_features = normalized_input_features.permute(0, 1, 3, 2).contiguous()
  725. normalized_input_features = normalized_input_features.reshape(
  726. batch, channels, freq * self.freq_ratio, time // self.freq_ratio
  727. )
  728. return normalized_input_features
  729. def forward(
  730. self,
  731. input_features,
  732. is_longer: Optional[torch.FloatTensor] = None,
  733. head_mask: Optional[torch.FloatTensor] = None,
  734. output_attentions: Optional[bool] = False,
  735. output_hidden_states: Optional[bool] = False,
  736. output_hidden_states_before_downsampling: Optional[bool] = False,
  737. always_partition: Optional[bool] = False,
  738. return_dict: Optional[bool] = True,
  739. ) -> Union[Tuple, ClapAudioModelOutput]:
  740. input_features = input_features.transpose(1, 3)
  741. normalized_input_features = self.batch_norm(input_features)
  742. normalized_input_features = normalized_input_features.transpose(1, 3)
  743. is_longer_list_idx = None
  744. if self.enable_fusion:
  745. is_longer_list = is_longer.to(input_features.device)
  746. is_longer_list_idx = torch.where(is_longer_list == 1)[0]
  747. hidden_states = self.reshape_mel2img(normalized_input_features)
  748. frames_num = hidden_states.shape[2]
  749. hidden_states = self.patch_embed(hidden_states, is_longer_list_idx)
  750. all_hidden_states = () if output_hidden_states else None
  751. all_reshaped_hidden_states = () if output_hidden_states else None
  752. all_self_attentions = () if output_attentions else None
  753. input_dimensions = self.input_resolutions[0]
  754. if output_hidden_states:
  755. batch_size, _, hidden_size = hidden_states.shape
  756. # rearrange batch_size (height width) channels -> batch_size channel height width
  757. reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
  758. reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
  759. all_hidden_states += (hidden_states,)
  760. all_reshaped_hidden_states += (reshaped_hidden_state,)
  761. for i, layer_module in enumerate(self.layers):
  762. layer_head_mask = head_mask[i] if head_mask is not None else None
  763. input_dimensions = self.input_resolutions[i]
  764. if self.gradient_checkpointing and self.training:
  765. layer_outputs = self._gradient_checkpointing_func(
  766. layer_module.__call__, hidden_states, input_dimensions, layer_head_mask, output_attentions
  767. )
  768. else:
  769. layer_outputs = layer_module(
  770. hidden_states, input_dimensions, layer_head_mask, output_attentions, always_partition
  771. )
  772. hidden_states = layer_outputs[0]
  773. hidden_states_before_downsampling = layer_outputs[1]
  774. output_dimensions = layer_outputs[2]
  775. input_dimensions = (output_dimensions[-2], output_dimensions[-1])
  776. if output_hidden_states and output_hidden_states_before_downsampling:
  777. batch_size, _, hidden_size = hidden_states_before_downsampling.shape
  778. # rearrange batch_size (height width) channels -> batch_size channel height width
  779. # here we use the original (not downsampled) height and width
  780. reshaped_hidden_state = hidden_states_before_downsampling.view(
  781. batch_size, *(output_dimensions[0], output_dimensions[1]), hidden_size
  782. )
  783. reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
  784. all_hidden_states += (hidden_states_before_downsampling,)
  785. all_reshaped_hidden_states += (reshaped_hidden_state,)
  786. elif output_hidden_states and not output_hidden_states_before_downsampling:
  787. batch_size, _, hidden_size = hidden_states.shape
  788. # rearrange batch_size (height width) channels -> batch_size channel height width
  789. reshaped_hidden_state = hidden_states.view(batch_size, *input_dimensions, hidden_size)
  790. reshaped_hidden_state = reshaped_hidden_state.permute(0, 3, 1, 2)
  791. all_hidden_states += (hidden_states,)
  792. all_reshaped_hidden_states += (reshaped_hidden_state,)
  793. if output_attentions:
  794. all_self_attentions += layer_outputs[3:]
  795. last_hidden_state = self.norm(hidden_states)
  796. batch_size, _, n_channels = last_hidden_state.shape
  797. freq_shape = frames_num // (2 ** (len(self.depths) - 1)) // self.patch_stride[0]
  798. temporal_shape = frames_num // (2 ** (len(self.depths) - 1)) // self.patch_stride[1]
  799. last_hidden_state = (
  800. last_hidden_state.permute(0, 2, 1).contiguous().reshape(batch_size, n_channels, freq_shape, temporal_shape)
  801. )
  802. batch_size, n_channels, n_frequencies, n_temp = last_hidden_state.shape
  803. # group 2D CNN
  804. c_freq_bin = n_frequencies // self.freq_ratio
  805. last_hidden_state = last_hidden_state.reshape(
  806. batch_size, n_channels, n_frequencies // c_freq_bin, c_freq_bin, n_temp
  807. )
  808. last_hidden_state = (
  809. last_hidden_state.permute(0, 1, 3, 2, 4).contiguous().reshape(batch_size, n_channels, c_freq_bin, -1)
  810. )
  811. latent_output = self.avgpool(torch.flatten(last_hidden_state, 2))
  812. latent_output = torch.flatten(latent_output, 1)
  813. if not return_dict:
  814. return tuple(
  815. v
  816. for v in [
  817. last_hidden_state,
  818. latent_output,
  819. all_reshaped_hidden_states,
  820. all_self_attentions,
  821. ]
  822. if v is not None
  823. )
  824. return BaseModelOutputWithPooling(
  825. last_hidden_state=last_hidden_state,
  826. pooler_output=latent_output,
  827. hidden_states=all_reshaped_hidden_states,
  828. attentions=all_self_attentions,
  829. )
  830. CLAP_START_DOCSTRING = r"""
  831. This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
  832. library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
  833. etc.)
  834. This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
  835. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
  836. and behavior.
  837. Parameters:
  838. config ([`ClapConfig`]): Model configuration class with all the parameters of the model.
  839. Initializing with a config file does not load the weights associated with the model, only the
  840. configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
  841. """
  842. CLAP_TEXT_INPUTS_DOCSTRING = r"""
  843. Args:
  844. input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
  845. Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
  846. it.
  847. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
  848. [`PreTrainedTokenizer.__call__`] for details.
  849. [What are input IDs?](../glossary#input-ids)
  850. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
  851. Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
  852. - 1 for tokens that are **not masked**,
  853. - 0 for tokens that are **masked**.
  854. [What are attention masks?](../glossary#attention-mask)
  855. position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
  856. Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
  857. config.max_position_embeddings - 1]`.
  858. [What are position IDs?](../glossary#position-ids)
  859. output_attentions (`bool`, *optional*):
  860. Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
  861. tensors for more detail.
  862. output_hidden_states (`bool`, *optional*):
  863. Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
  864. more detail.
  865. return_dict (`bool`, *optional*):
  866. Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
  867. """
  868. CLAP_AUDIO_INPUTS_DOCSTRING = r"""
  869. Args:
  870. input_features (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
  871. Input audio features. This should be returnes by the [`ClapFeatureExtractor`] class that you can also
  872. retrieve from [`AutoFeatureExtractor`]. See [`ClapFeatureExtractor.__call__`] for details.
  873. is_longer (`torch.FloatTensor`, of shape `(batch_size, 1)`, *optional*):
  874. Whether the audio clip is longer than `max_length`. If `True`, a feature fusion will be enabled to enhance
  875. the features.
  876. output_attentions (`bool`, *optional*):
  877. Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
  878. tensors for more detail.
  879. output_hidden_states (`bool`, *optional*):
  880. Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
  881. more detail.
  882. return_dict (`bool`, *optional*):
  883. Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
  884. """
  885. CLAP_INPUTS_DOCSTRING = r"""
  886. Args:
  887. input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
  888. Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
  889. it.
  890. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
  891. [`PreTrainedTokenizer.__call__`] for details.
  892. [What are input IDs?](../glossary#input-ids)
  893. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
  894. Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
  895. - 1 for tokens that are **not masked**,
  896. - 0 for tokens that are **masked**.
  897. [What are attention masks?](../glossary#attention-mask)
  898. position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
  899. Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
  900. config.max_position_embeddings - 1]`.
  901. [What are position IDs?](../glossary#position-ids)
  902. input_features (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
  903. Input audio features. This should be returnes by the [`ClapFeatureExtractor`] class that you can also
  904. retrieve from [`AutoFeatureExtractor`]. See [`ClapFeatureExtractor.__call__`] for details.
  905. return_loss (`bool`, *optional*):
  906. Whether or not to return the contrastive loss.
  907. output_attentions (`bool`, *optional*):
  908. Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
  909. tensors for more detail.
  910. output_hidden_states (`bool`, *optional*):
  911. Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
  912. more detail.
  913. return_dict (`bool`, *optional*):
  914. Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
  915. """
  916. class ClapProjectionLayer(nn.Module):
  917. def __init__(self, config: Union[ClapAudioConfig, ClapTextConfig]):
  918. super().__init__()
  919. self.config = config
  920. hidden_size = config.hidden_size
  921. projection_dim = config.projection_dim
  922. self.linear1 = nn.Linear(hidden_size, projection_dim)
  923. self.activation = ACT2FN[config.projection_hidden_act]
  924. self.linear2 = nn.Linear(projection_dim, projection_dim)
  925. def forward(self, hidden_states):
  926. hidden_states = self.linear1(hidden_states)
  927. hidden_states = self.activation(hidden_states)
  928. hidden_states = self.linear2(hidden_states)
  929. return hidden_states
  930. # Copied from transformers.models.roberta.modeling_roberta.RobertaEmbeddings with Roberta->ClapText, persistent=False->persistent=True
  931. class ClapTextEmbeddings(nn.Module):
  932. """
  933. Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
  934. """
  935. # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__
  936. def __init__(self, config):
  937. super().__init__()
  938. self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
  939. self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
  940. self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
  941. # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
  942. # any TensorFlow checkpoint file
  943. self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
  944. self.dropout = nn.Dropout(config.hidden_dropout_prob)
  945. # position_ids (1, len position emb) is contiguous in memory and exported when serialized
  946. self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
  947. self.register_buffer(
  948. "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=True
  949. )
  950. self.register_buffer(
  951. "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=True
  952. )
  953. # End copy
  954. self.padding_idx = config.pad_token_id
  955. self.position_embeddings = nn.Embedding(
  956. config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
  957. )
  958. def forward(
  959. self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
  960. ):
  961. if position_ids is None:
  962. if input_ids is not None:
  963. # Create the position ids from the input token ids. Any padded tokens remain padded.
  964. position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length)
  965. else:
  966. position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
  967. if input_ids is not None:
  968. input_shape = input_ids.size()
  969. else:
  970. input_shape = inputs_embeds.size()[:-1]
  971. seq_length = input_shape[1]
  972. # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
  973. # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
  974. # issue #5664
  975. if token_type_ids is None:
  976. if hasattr(self, "token_type_ids"):
  977. buffered_token_type_ids = self.token_type_ids[:, :seq_length]
  978. buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
  979. token_type_ids = buffered_token_type_ids_expanded
  980. else:
  981. token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
  982. if inputs_embeds is None:
  983. inputs_embeds = self.word_embeddings(input_ids)
  984. token_type_embeddings = self.token_type_embeddings(token_type_ids)
  985. embeddings = inputs_embeds + token_type_embeddings
  986. if self.position_embedding_type == "absolute":
  987. position_embeddings = self.position_embeddings(position_ids)
  988. embeddings += position_embeddings
  989. embeddings = self.LayerNorm(embeddings)
  990. embeddings = self.dropout(embeddings)
  991. return embeddings
  992. def create_position_ids_from_inputs_embeds(self, inputs_embeds):
  993. """
  994. We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
  995. Args:
  996. inputs_embeds: torch.Tensor
  997. Returns: torch.Tensor
  998. """
  999. input_shape = inputs_embeds.size()[:-1]
  1000. sequence_length = input_shape[1]
  1001. position_ids = torch.arange(
  1002. self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
  1003. )
  1004. return position_ids.unsqueeze(0).expand(input_shape)
  1005. # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->ClapText
  1006. class ClapTextSelfAttention(nn.Module):
  1007. def __init__(self, config, position_embedding_type=None):
  1008. super().__init__()
  1009. if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
  1010. raise ValueError(
  1011. f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
  1012. f"heads ({config.num_attention_heads})"
  1013. )
  1014. self.num_attention_heads = config.num_attention_heads
  1015. self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
  1016. self.all_head_size = self.num_attention_heads * self.attention_head_size
  1017. self.query = nn.Linear(config.hidden_size, self.all_head_size)
  1018. self.key = nn.Linear(config.hidden_size, self.all_head_size)
  1019. self.value = nn.Linear(config.hidden_size, self.all_head_size)
  1020. self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
  1021. self.position_embedding_type = position_embedding_type or getattr(
  1022. config, "position_embedding_type", "absolute"
  1023. )
  1024. if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
  1025. self.max_position_embeddings = config.max_position_embeddings
  1026. self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
  1027. self.is_decoder = config.is_decoder
  1028. def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
  1029. new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
  1030. x = x.view(new_x_shape)
  1031. return x.permute(0, 2, 1, 3)
  1032. def forward(
  1033. self,
  1034. hidden_states: torch.Tensor,
  1035. attention_mask: Optional[torch.FloatTensor] = None,
  1036. head_mask: Optional[torch.FloatTensor] = None,
  1037. encoder_hidden_states: Optional[torch.FloatTensor] = None,
  1038. encoder_attention_mask: Optional[torch.FloatTensor] = None,
  1039. past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
  1040. output_attentions: Optional[bool] = False,
  1041. ) -> Tuple[torch.Tensor]:
  1042. mixed_query_layer = self.query(hidden_states)
  1043. # If this is instantiated as a cross-attention module, the keys
  1044. # and values come from an encoder; the attention mask needs to be
  1045. # such that the encoder's padding tokens are not attended to.
  1046. is_cross_attention = encoder_hidden_states is not None
  1047. if is_cross_attention and past_key_value is not None:
  1048. # reuse k,v, cross_attentions
  1049. key_layer = past_key_value[0]
  1050. value_layer = past_key_value[1]
  1051. attention_mask = encoder_attention_mask
  1052. elif is_cross_attention:
  1053. key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
  1054. value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
  1055. attention_mask = encoder_attention_mask
  1056. elif past_key_value is not None:
  1057. key_layer = self.transpose_for_scores(self.key(hidden_states))
  1058. value_layer = self.transpose_for_scores(self.value(hidden_states))
  1059. key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
  1060. value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
  1061. else:
  1062. key_layer = self.transpose_for_scores(self.key(hidden_states))
  1063. value_layer = self.transpose_for_scores(self.value(hidden_states))
  1064. query_layer = self.transpose_for_scores(mixed_query_layer)
  1065. use_cache = past_key_value is not None
  1066. if self.is_decoder:
  1067. # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
  1068. # Further calls to cross_attention layer can then reuse all cross-attention
  1069. # key/value_states (first "if" case)
  1070. # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
  1071. # all previous decoder key/value_states. Further calls to uni-directional self-attention
  1072. # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
  1073. # if encoder bi-directional self-attention `past_key_value` is always `None`
  1074. past_key_value = (key_layer, value_layer)
  1075. # Take the dot product between "query" and "key" to get the raw attention scores.
  1076. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
  1077. if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
  1078. query_length, key_length = query_layer.shape[2], key_layer.shape[2]
  1079. if use_cache:
  1080. position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view(
  1081. -1, 1
  1082. )
  1083. else:
  1084. position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
  1085. position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
  1086. distance = position_ids_l - position_ids_r
  1087. positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
  1088. positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
  1089. if self.position_embedding_type == "relative_key":
  1090. relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
  1091. attention_scores = attention_scores + relative_position_scores
  1092. elif self.position_embedding_type == "relative_key_query":
  1093. relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
  1094. relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
  1095. attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
  1096. attention_scores = attention_scores / math.sqrt(self.attention_head_size)
  1097. if attention_mask is not None:
  1098. # Apply the attention mask is (precomputed for all layers in ClapTextModel forward() function)
  1099. attention_scores = attention_scores + attention_mask
  1100. # Normalize the attention scores to probabilities.
  1101. attention_probs = nn.functional.softmax(attention_scores, dim=-1)
  1102. # This is actually dropping out entire tokens to attend to, which might
  1103. # seem a bit unusual, but is taken from the original Transformer paper.
  1104. attention_probs = self.dropout(attention_probs)
  1105. # Mask heads if we want to
  1106. if head_mask is not None:
  1107. attention_probs = attention_probs * head_mask
  1108. context_layer = torch.matmul(attention_probs, value_layer)
  1109. context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
  1110. new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
  1111. context_layer = context_layer.view(new_context_layer_shape)
  1112. outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
  1113. if self.is_decoder:
  1114. outputs = outputs + (past_key_value,)
  1115. return outputs
  1116. # Copied from transformers.models.bert.modeling_bert.BertSelfOutput
  1117. class ClapTextSelfOutput(nn.Module):
  1118. def __init__(self, config):
  1119. super().__init__()
  1120. self.dense = nn.Linear(config.hidden_size, config.hidden_size)
  1121. self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
  1122. self.dropout = nn.Dropout(config.hidden_dropout_prob)
  1123. def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
  1124. hidden_states = self.dense(hidden_states)
  1125. hidden_states = self.dropout(hidden_states)
  1126. hidden_states = self.LayerNorm(hidden_states + input_tensor)
  1127. return hidden_states
  1128. CLAP_TEXT_SELF_ATTENTION_CLASSES = {
  1129. "eager": ClapTextSelfAttention,
  1130. }
  1131. # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->ClapText,BERT->CLAP_TEXT
  1132. class ClapTextAttention(nn.Module):
  1133. def __init__(self, config, position_embedding_type=None):
  1134. super().__init__()
  1135. self.self = CLAP_TEXT_SELF_ATTENTION_CLASSES[config._attn_implementation](
  1136. config, position_embedding_type=position_embedding_type
  1137. )
  1138. self.output = ClapTextSelfOutput(config)
  1139. self.pruned_heads = set()
  1140. def prune_heads(self, heads):
  1141. if len(heads) == 0:
  1142. return
  1143. heads, index = find_pruneable_heads_and_indices(
  1144. heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
  1145. )
  1146. # Prune linear layers
  1147. self.self.query = prune_linear_layer(self.self.query, index)
  1148. self.self.key = prune_linear_layer(self.self.key, index)
  1149. self.self.value = prune_linear_layer(self.self.value, index)
  1150. self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
  1151. # Update hyper params and store pruned heads
  1152. self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
  1153. self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
  1154. self.pruned_heads = self.pruned_heads.union(heads)
  1155. def forward(
  1156. self,
  1157. hidden_states: torch.Tensor,
  1158. attention_mask: Optional[torch.FloatTensor] = None,
  1159. head_mask: Optional[torch.FloatTensor] = None,
  1160. encoder_hidden_states: Optional[torch.FloatTensor] = None,
  1161. encoder_attention_mask: Optional[torch.FloatTensor] = None,
  1162. past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
  1163. output_attentions: Optional[bool] = False,
  1164. ) -> Tuple[torch.Tensor]:
  1165. self_outputs = self.self(
  1166. hidden_states,
  1167. attention_mask,
  1168. head_mask,
  1169. encoder_hidden_states,
  1170. encoder_attention_mask,
  1171. past_key_value,
  1172. output_attentions,
  1173. )
  1174. attention_output = self.output(self_outputs[0], hidden_states)
  1175. outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
  1176. return outputs
  1177. # Copied from transformers.models.bert.modeling_bert.BertIntermediate
  1178. class ClapTextIntermediate(nn.Module):
  1179. def __init__(self, config):
  1180. super().__init__()
  1181. self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
  1182. if isinstance(config.hidden_act, str):
  1183. self.intermediate_act_fn = ACT2FN[config.hidden_act]
  1184. else:
  1185. self.intermediate_act_fn = config.hidden_act
  1186. def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
  1187. hidden_states = self.dense(hidden_states)
  1188. hidden_states = self.intermediate_act_fn(hidden_states)
  1189. return hidden_states
  1190. # Copied from transformers.models.bert.modeling_bert.BertOutput
  1191. class ClapTextOutput(nn.Module):
  1192. def __init__(self, config):
  1193. super().__init__()
  1194. self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
  1195. self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
  1196. self.dropout = nn.Dropout(config.hidden_dropout_prob)
  1197. def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
  1198. hidden_states = self.dense(hidden_states)
  1199. hidden_states = self.dropout(hidden_states)
  1200. hidden_states = self.LayerNorm(hidden_states + input_tensor)
  1201. return hidden_states
  1202. # Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->ClapText
  1203. class ClapTextLayer(nn.Module):
  1204. def __init__(self, config):
  1205. super().__init__()
  1206. self.chunk_size_feed_forward = config.chunk_size_feed_forward
  1207. self.seq_len_dim = 1
  1208. self.attention = ClapTextAttention(config)
  1209. self.is_decoder = config.is_decoder
  1210. self.add_cross_attention = config.add_cross_attention
  1211. if self.add_cross_attention:
  1212. if not self.is_decoder:
  1213. raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
  1214. self.crossattention = ClapTextAttention(config, position_embedding_type="absolute")
  1215. self.intermediate = ClapTextIntermediate(config)
  1216. self.output = ClapTextOutput(config)
  1217. def forward(
  1218. self,
  1219. hidden_states: torch.Tensor,
  1220. attention_mask: Optional[torch.FloatTensor] = None,
  1221. head_mask: Optional[torch.FloatTensor] = None,
  1222. encoder_hidden_states: Optional[torch.FloatTensor] = None,
  1223. encoder_attention_mask: Optional[torch.FloatTensor] = None,
  1224. past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
  1225. output_attentions: Optional[bool] = False,
  1226. ) -> Tuple[torch.Tensor]:
  1227. # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
  1228. self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
  1229. self_attention_outputs = self.attention(
  1230. hidden_states,
  1231. attention_mask,
  1232. head_mask,
  1233. output_attentions=output_attentions,
  1234. past_key_value=self_attn_past_key_value,
  1235. )
  1236. attention_output = self_attention_outputs[0]
  1237. # if decoder, the last output is tuple of self-attn cache
  1238. if self.is_decoder:
  1239. outputs = self_attention_outputs[1:-1]
  1240. present_key_value = self_attention_outputs[-1]
  1241. else:
  1242. outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
  1243. cross_attn_present_key_value = None
  1244. if self.is_decoder and encoder_hidden_states is not None:
  1245. if not hasattr(self, "crossattention"):
  1246. raise ValueError(
  1247. f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
  1248. " by setting `config.add_cross_attention=True`"
  1249. )
  1250. # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
  1251. cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
  1252. cross_attention_outputs = self.crossattention(
  1253. attention_output,
  1254. attention_mask,
  1255. head_mask,
  1256. encoder_hidden_states,
  1257. encoder_attention_mask,
  1258. cross_attn_past_key_value,
  1259. output_attentions,
  1260. )
  1261. attention_output = cross_attention_outputs[0]
  1262. outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
  1263. # add cross-attn cache to positions 3,4 of present_key_value tuple
  1264. cross_attn_present_key_value = cross_attention_outputs[-1]
  1265. present_key_value = present_key_value + cross_attn_present_key_value
  1266. layer_output = apply_chunking_to_forward(
  1267. self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
  1268. )
  1269. outputs = (layer_output,) + outputs
  1270. # if decoder, return the attn key/values as the last output
  1271. if self.is_decoder:
  1272. outputs = outputs + (present_key_value,)
  1273. return outputs
  1274. def feed_forward_chunk(self, attention_output):
  1275. intermediate_output = self.intermediate(attention_output)
  1276. layer_output = self.output(intermediate_output, attention_output)
  1277. return layer_output
  1278. # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->ClapText
  1279. class ClapTextEncoder(nn.Module):
  1280. def __init__(self, config):
  1281. super().__init__()
  1282. self.config = config
  1283. self.layer = nn.ModuleList([ClapTextLayer(config) for _ in range(config.num_hidden_layers)])
  1284. self.gradient_checkpointing = False
  1285. def forward(
  1286. self,
  1287. hidden_states: torch.Tensor,
  1288. attention_mask: Optional[torch.FloatTensor] = None,
  1289. head_mask: Optional[torch.FloatTensor] = None,
  1290. encoder_hidden_states: Optional[torch.FloatTensor] = None,
  1291. encoder_attention_mask: Optional[torch.FloatTensor] = None,
  1292. past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
  1293. use_cache: Optional[bool] = None,
  1294. output_attentions: Optional[bool] = False,
  1295. output_hidden_states: Optional[bool] = False,
  1296. return_dict: Optional[bool] = True,
  1297. ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
  1298. all_hidden_states = () if output_hidden_states else None
  1299. all_self_attentions = () if output_attentions else None
  1300. all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
  1301. if self.gradient_checkpointing and self.training:
  1302. if use_cache:
  1303. logger.warning_once(
  1304. "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
  1305. )
  1306. use_cache = False
  1307. next_decoder_cache = () if use_cache else None
  1308. for i, layer_module in enumerate(self.layer):
  1309. if output_hidden_states:
  1310. all_hidden_states = all_hidden_states + (hidden_states,)
  1311. layer_head_mask = head_mask[i] if head_mask is not None else None
  1312. past_key_value = past_key_values[i] if past_key_values is not None else None
  1313. if self.gradient_checkpointing and self.training:
  1314. layer_outputs = self._gradient_checkpointing_func(
  1315. layer_module.__call__,
  1316. hidden_states,
  1317. attention_mask,
  1318. layer_head_mask,
  1319. encoder_hidden_states,
  1320. encoder_attention_mask,
  1321. past_key_value,
  1322. output_attentions,
  1323. )
  1324. else:
  1325. layer_outputs = layer_module(
  1326. hidden_states,
  1327. attention_mask,
  1328. layer_head_mask,
  1329. encoder_hidden_states,
  1330. encoder_attention_mask,
  1331. past_key_value,
  1332. output_attentions,
  1333. )
  1334. hidden_states = layer_outputs[0]
  1335. if use_cache:
  1336. next_decoder_cache += (layer_outputs[-1],)
  1337. if output_attentions:
  1338. all_self_attentions = all_self_attentions + (layer_outputs[1],)
  1339. if self.config.add_cross_attention:
  1340. all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
  1341. if output_hidden_states:
  1342. all_hidden_states = all_hidden_states + (hidden_states,)
  1343. if not return_dict:
  1344. return tuple(
  1345. v
  1346. for v in [
  1347. hidden_states,
  1348. next_decoder_cache,
  1349. all_hidden_states,
  1350. all_self_attentions,
  1351. all_cross_attentions,
  1352. ]
  1353. if v is not None
  1354. )
  1355. return BaseModelOutputWithPastAndCrossAttentions(
  1356. last_hidden_state=hidden_states,
  1357. past_key_values=next_decoder_cache,
  1358. hidden_states=all_hidden_states,
  1359. attentions=all_self_attentions,
  1360. cross_attentions=all_cross_attentions,
  1361. )
  1362. # Copied from transformers.models.bert.modeling_bert.BertPooler
  1363. class ClapTextPooler(nn.Module):
  1364. def __init__(self, config):
  1365. super().__init__()
  1366. self.dense = nn.Linear(config.hidden_size, config.hidden_size)
  1367. self.activation = nn.Tanh()
  1368. def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
  1369. # We "pool" the model by simply taking the hidden state corresponding
  1370. # to the first token.
  1371. first_token_tensor = hidden_states[:, 0]
  1372. pooled_output = self.dense(first_token_tensor)
  1373. pooled_output = self.activation(pooled_output)
  1374. return pooled_output
  1375. class ClapPreTrainedModel(PreTrainedModel):
  1376. """
  1377. An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
  1378. models.
  1379. """
  1380. config_class = ClapConfig
  1381. base_model_prefix = "clap"
  1382. supports_gradient_checkpointing = False
  1383. def _init_weights(self, module):
  1384. """Initialize the weights"""
  1385. factor = self.config.initializer_factor
  1386. if isinstance(module, ClapTextEmbeddings):
  1387. module.position_embeddings.weight.data.normal_(mean=0.0, std=factor * 0.02)
  1388. module.token_type_embeddings.weight.data.normal_(mean=0.0, std=factor * 0.02)
  1389. elif isinstance(module, ClapModel):
  1390. nn.init.normal_(module.logit_scale_a, std=factor * 0.02)
  1391. nn.init.normal_(module.logit_scale_t, std=factor * 0.02)
  1392. elif isinstance(module, nn.Embedding):
  1393. module.weight.data.normal_(mean=0.0, std=factor * 0.02)
  1394. elif isinstance(module, nn.LayerNorm):
  1395. module.bias.data.zero_()
  1396. module.weight.data.fill_(1.0)
  1397. elif isinstance(module, (nn.Conv2d, nn.Linear)):
  1398. in_proj_std = (self.config.hidden_size**-0.5) * ((2 * self.config.num_hidden_layers) ** -0.5) * factor
  1399. nn.init.normal_(module.weight, std=in_proj_std)
  1400. if module.bias is not None:
  1401. module.bias.data.zero_()
  1402. class ClapAudioModel(ClapPreTrainedModel):
  1403. config_class = ClapAudioConfig
  1404. main_input_name = "input_features"
  1405. def __init__(self, config: ClapAudioConfig):
  1406. super().__init__(config)
  1407. self.audio_encoder = ClapAudioEncoder(config)
  1408. # Initialize weights and apply final processing
  1409. self.post_init()
  1410. def get_input_embeddings(self) -> nn.Module:
  1411. return self.audio_encoder.patch_embed.proj
  1412. @add_start_docstrings_to_model_forward(CLAP_AUDIO_INPUTS_DOCSTRING)
  1413. @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=ClapAudioConfig)
  1414. def forward(
  1415. self,
  1416. input_features: Optional[torch.FloatTensor] = None,
  1417. is_longer: Optional[torch.BoolTensor] = None,
  1418. output_attentions: Optional[bool] = None,
  1419. output_hidden_states: Optional[bool] = None,
  1420. return_dict: Optional[bool] = None,
  1421. ) -> Union[Tuple, BaseModelOutputWithPooling]:
  1422. r"""
  1423. Returns:
  1424. Examples:
  1425. ```python
  1426. >>> from datasets import load_dataset
  1427. >>> from transformers import AutoProcessor, ClapAudioModel
  1428. >>> dataset = load_dataset("hf-internal-testing/ashraq-esc50-1-dog-example")
  1429. >>> audio_sample = dataset["train"]["audio"][0]["array"]
  1430. >>> model = ClapAudioModel.from_pretrained("laion/clap-htsat-fused")
  1431. >>> processor = AutoProcessor.from_pretrained("laion/clap-htsat-fused")
  1432. >>> inputs = processor(audios=audio_sample, return_tensors="pt")
  1433. >>> outputs = model(**inputs)
  1434. >>> last_hidden_state = outputs.last_hidden_state
  1435. ```"""
  1436. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  1437. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
  1438. output_hidden_states = (
  1439. output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
  1440. )
  1441. return self.audio_encoder(
  1442. input_features=input_features,
  1443. is_longer=is_longer,
  1444. output_attentions=output_attentions,
  1445. output_hidden_states=output_hidden_states,
  1446. return_dict=return_dict,
  1447. )
  1448. class ClapTextModel(ClapPreTrainedModel):
  1449. """
  1450. The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
  1451. cross-attention is added between the self-attention layers, following the architecture described in *Attention is
  1452. all you need*_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz
  1453. Kaiser and Illia Polosukhin.
  1454. To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
  1455. to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
  1456. `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
  1457. .. _*Attention is all you need*: https://arxiv.org/abs/1706.03762
  1458. """
  1459. config_class = ClapTextConfig
  1460. def __init__(self, config, add_pooling_layer=True):
  1461. super().__init__(config)
  1462. self.config = config
  1463. self.embeddings = ClapTextEmbeddings(config)
  1464. self.encoder = ClapTextEncoder(config)
  1465. self.pooler = ClapTextPooler(config) if add_pooling_layer else None
  1466. # Initialize weights and apply final processing
  1467. self.post_init()
  1468. def get_input_embeddings(self):
  1469. return self.embeddings.word_embeddings
  1470. def set_input_embeddings(self, value):
  1471. self.embeddings.word_embeddings = value
  1472. def forward(
  1473. self,
  1474. input_ids: Optional[torch.Tensor] = None,
  1475. attention_mask: Optional[torch.Tensor] = None,
  1476. token_type_ids: Optional[torch.Tensor] = None,
  1477. position_ids: Optional[torch.Tensor] = None,
  1478. head_mask: Optional[torch.Tensor] = None,
  1479. inputs_embeds: Optional[torch.Tensor] = None,
  1480. encoder_hidden_states: Optional[torch.Tensor] = None,
  1481. encoder_attention_mask: Optional[torch.Tensor] = None,
  1482. past_key_values: Optional[List[torch.FloatTensor]] = None,
  1483. use_cache: Optional[bool] = None,
  1484. output_attentions: Optional[bool] = None,
  1485. output_hidden_states: Optional[bool] = None,
  1486. return_dict: Optional[bool] = None,
  1487. ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
  1488. r"""
  1489. encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
  1490. Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
  1491. the model is configured as a decoder.
  1492. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
  1493. Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
  1494. the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
  1495. - 1 for tokens that are **not masked**,
  1496. - 0 for tokens that are **masked**.
  1497. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
  1498. Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
  1499. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
  1500. don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
  1501. `decoder_input_ids` of shape `(batch_size, sequence_length)`.
  1502. use_cache (`bool`, *optional*):
  1503. If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
  1504. `past_key_values`).
  1505. """
  1506. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
  1507. output_hidden_states = (
  1508. output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
  1509. )
  1510. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  1511. if self.config.is_decoder:
  1512. use_cache = use_cache if use_cache is not None else self.config.use_cache
  1513. else:
  1514. use_cache = False
  1515. if input_ids is not None and inputs_embeds is not None:
  1516. raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
  1517. elif input_ids is not None:
  1518. self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
  1519. input_shape = input_ids.size()
  1520. elif inputs_embeds is not None:
  1521. input_shape = inputs_embeds.size()[:-1]
  1522. else:
  1523. raise ValueError("You have to specify either input_ids or inputs_embeds")
  1524. batch_size, seq_length = input_shape
  1525. device = input_ids.device if input_ids is not None else inputs_embeds.device
  1526. # past_key_values_length
  1527. past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
  1528. if attention_mask is None:
  1529. attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
  1530. if token_type_ids is None:
  1531. if hasattr(self.embeddings, "token_type_ids"):
  1532. buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
  1533. buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
  1534. token_type_ids = buffered_token_type_ids_expanded
  1535. else:
  1536. token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
  1537. # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
  1538. # ourselves in which case we just need to make it broadcastable to all heads.
  1539. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
  1540. # If a 2D or 3D attention mask is provided for the cross-attention
  1541. # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
  1542. if self.config.is_decoder and encoder_hidden_states is not None:
  1543. encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
  1544. encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
  1545. if encoder_attention_mask is None:
  1546. encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
  1547. encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
  1548. else:
  1549. encoder_extended_attention_mask = None
  1550. # Prepare head mask if needed
  1551. # 1.0 in head_mask indicate we keep the head
  1552. # attention_probs has shape bsz x n_heads x N x N
  1553. # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
  1554. # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
  1555. head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
  1556. embedding_output = self.embeddings(
  1557. input_ids=input_ids,
  1558. position_ids=position_ids,
  1559. token_type_ids=token_type_ids,
  1560. inputs_embeds=inputs_embeds,
  1561. past_key_values_length=past_key_values_length,
  1562. )
  1563. encoder_outputs = self.encoder(
  1564. embedding_output,
  1565. attention_mask=extended_attention_mask,
  1566. head_mask=head_mask,
  1567. encoder_hidden_states=encoder_hidden_states,
  1568. encoder_attention_mask=encoder_extended_attention_mask,
  1569. past_key_values=past_key_values,
  1570. use_cache=use_cache,
  1571. output_attentions=output_attentions,
  1572. output_hidden_states=output_hidden_states,
  1573. return_dict=return_dict,
  1574. )
  1575. sequence_output = encoder_outputs[0]
  1576. pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
  1577. if not return_dict:
  1578. return (sequence_output, pooled_output) + encoder_outputs[1:]
  1579. return BaseModelOutputWithPoolingAndCrossAttentions(
  1580. last_hidden_state=sequence_output,
  1581. pooler_output=pooled_output,
  1582. past_key_values=encoder_outputs.past_key_values,
  1583. hidden_states=encoder_outputs.hidden_states,
  1584. attentions=encoder_outputs.attentions,
  1585. cross_attentions=encoder_outputs.cross_attentions,
  1586. )
  1587. @add_start_docstrings(CLAP_START_DOCSTRING)
  1588. class ClapModel(ClapPreTrainedModel):
  1589. config_class = ClapConfig
  1590. def __init__(self, config: ClapConfig):
  1591. super().__init__(config)
  1592. if not isinstance(config.text_config, ClapTextConfig):
  1593. raise TypeError(
  1594. "config.text_config is expected to be of type ClapTextConfig but is of type"
  1595. f" {type(config.text_config)}."
  1596. )
  1597. if not isinstance(config.audio_config, ClapAudioConfig):
  1598. raise TypeError(
  1599. "config.audio_config is expected to be of type ClapAudioConfig but is of type"
  1600. f" {type(config.audio_config)}."
  1601. )
  1602. text_config = config.text_config
  1603. audio_config = config.audio_config
  1604. self.logit_scale_a = nn.Parameter(torch.tensor(math.log(config.logit_scale_init_value)))
  1605. self.logit_scale_t = nn.Parameter(torch.tensor(math.log(config.logit_scale_init_value)))
  1606. self.projection_dim = config.projection_dim
  1607. self.text_model = ClapTextModel(text_config)
  1608. self.text_projection = ClapProjectionLayer(text_config)
  1609. self.audio_model = ClapAudioModel(audio_config)
  1610. self.audio_projection = ClapProjectionLayer(audio_config)
  1611. # Initialize weights and apply final processing
  1612. self.post_init()
  1613. @add_start_docstrings_to_model_forward(CLAP_TEXT_INPUTS_DOCSTRING)
  1614. def get_text_features(
  1615. self,
  1616. input_ids: Optional[torch.Tensor] = None,
  1617. attention_mask: Optional[torch.Tensor] = None,
  1618. position_ids: Optional[torch.Tensor] = None,
  1619. output_attentions: Optional[bool] = None,
  1620. output_hidden_states: Optional[bool] = None,
  1621. return_dict: Optional[bool] = None,
  1622. ) -> torch.FloatTensor:
  1623. r"""
  1624. Returns:
  1625. text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
  1626. applying the projection layer to the pooled output of [`ClapTextModel`].
  1627. Examples:
  1628. ```python
  1629. >>> from transformers import AutoTokenizer, ClapModel
  1630. >>> model = ClapModel.from_pretrained("laion/clap-htsat-unfused")
  1631. >>> tokenizer = AutoTokenizer.from_pretrained("laion/clap-htsat-unfused")
  1632. >>> inputs = tokenizer(["the sound of a cat", "the sound of a dog"], padding=True, return_tensors="pt")
  1633. >>> text_features = model.get_text_features(**inputs)
  1634. ```"""
  1635. # Use CLAP model's config for some fields (if specified) instead of those of audio & text components.
  1636. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
  1637. output_hidden_states = (
  1638. output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
  1639. )
  1640. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  1641. text_outputs = self.text_model(
  1642. input_ids=input_ids,
  1643. attention_mask=attention_mask,
  1644. position_ids=position_ids,
  1645. output_attentions=output_attentions,
  1646. output_hidden_states=output_hidden_states,
  1647. return_dict=return_dict,
  1648. )
  1649. pooled_output = text_outputs[1] if return_dict is not None else text_outputs.pooler_output
  1650. text_features = self.text_projection(pooled_output)
  1651. text_features = F.normalize(text_features, dim=-1)
  1652. return text_features
  1653. @add_start_docstrings_to_model_forward(CLAP_AUDIO_INPUTS_DOCSTRING)
  1654. def get_audio_features(
  1655. self,
  1656. input_features: Optional[torch.Tensor] = None,
  1657. is_longer: Optional[torch.Tensor] = None,
  1658. attention_mask: Optional[torch.Tensor] = None,
  1659. output_attentions: Optional[bool] = None,
  1660. output_hidden_states: Optional[bool] = None,
  1661. return_dict: Optional[bool] = None,
  1662. ) -> torch.FloatTensor:
  1663. r"""
  1664. Returns:
  1665. audio_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The audio embeddings obtained by
  1666. applying the projection layer to the pooled output of [`ClapAudioModel`].
  1667. Examples:
  1668. ```python
  1669. >>> from transformers import AutoFeatureExtractor, ClapModel
  1670. >>> import torch
  1671. >>> model = ClapModel.from_pretrained("laion/clap-htsat-unfused")
  1672. >>> feature_extractor = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused")
  1673. >>> random_audio = torch.rand((16_000))
  1674. >>> inputs = feature_extractor(random_audio, return_tensors="pt")
  1675. >>> audio_features = model.get_audio_features(**inputs)
  1676. ```"""
  1677. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
  1678. output_hidden_states = (
  1679. output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
  1680. )
  1681. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  1682. audio_outputs = self.audio_model(
  1683. input_features=input_features,
  1684. is_longer=is_longer,
  1685. return_dict=return_dict,
  1686. )
  1687. pooled_output = audio_outputs[1] if not return_dict else audio_outputs.pooler_output
  1688. audio_features = self.audio_projection(pooled_output)
  1689. audio_features = F.normalize(audio_features, dim=-1)
  1690. return audio_features
  1691. @add_start_docstrings_to_model_forward(CLAP_INPUTS_DOCSTRING)
  1692. @replace_return_docstrings(output_type=ClapOutput, config_class=ClapConfig)
  1693. def forward(
  1694. self,
  1695. input_ids: Optional[torch.LongTensor] = None,
  1696. input_features: Optional[torch.FloatTensor] = None,
  1697. is_longer: Optional[torch.BoolTensor] = None,
  1698. attention_mask: Optional[torch.Tensor] = None,
  1699. position_ids: Optional[torch.LongTensor] = None,
  1700. return_loss: Optional[bool] = None,
  1701. output_attentions: Optional[bool] = None,
  1702. output_hidden_states: Optional[bool] = None,
  1703. return_dict: Optional[bool] = None,
  1704. ) -> Union[Tuple, ClapOutput]:
  1705. r"""
  1706. Returns:
  1707. Examples:
  1708. ```python
  1709. >>> from datasets import load_dataset
  1710. >>> from transformers import AutoProcessor, ClapModel
  1711. >>> dataset = load_dataset("hf-internal-testing/ashraq-esc50-1-dog-example")
  1712. >>> audio_sample = dataset["train"]["audio"][0]["array"]
  1713. >>> model = ClapModel.from_pretrained("laion/clap-htsat-unfused")
  1714. >>> processor = AutoProcessor.from_pretrained("laion/clap-htsat-unfused")
  1715. >>> input_text = ["Sound of a dog", "Sound of vaccum cleaner"]
  1716. >>> inputs = processor(text=input_text, audios=audio_sample, return_tensors="pt", padding=True)
  1717. >>> outputs = model(**inputs)
  1718. >>> logits_per_audio = outputs.logits_per_audio # this is the audio-text similarity score
  1719. >>> probs = logits_per_audio.softmax(dim=-1) # we can take the softmax to get the label probabilities
  1720. ```"""
  1721. # Use CLAP model's config for some fields (if specified) instead of those of audio & text components.
  1722. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
  1723. output_hidden_states = (
  1724. output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
  1725. )
  1726. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  1727. audio_outputs = self.audio_model(
  1728. input_features=input_features,
  1729. is_longer=is_longer,
  1730. output_attentions=output_attentions,
  1731. output_hidden_states=output_hidden_states,
  1732. return_dict=return_dict,
  1733. )
  1734. text_outputs = self.text_model(
  1735. input_ids=input_ids,
  1736. attention_mask=attention_mask,
  1737. position_ids=position_ids,
  1738. output_attentions=output_attentions,
  1739. output_hidden_states=output_hidden_states,
  1740. return_dict=return_dict,
  1741. )
  1742. audio_embeds = audio_outputs[1] if not return_dict else audio_outputs.pooler_output
  1743. audio_embeds = self.audio_projection(audio_embeds)
  1744. text_embeds = text_outputs[1] if not return_dict else text_outputs.pooler_output
  1745. text_embeds = self.text_projection(text_embeds)
  1746. # normalized features
  1747. audio_embeds = audio_embeds / audio_embeds.norm(p=2, dim=-1, keepdim=True)
  1748. text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)
  1749. # cosine similarity as logits
  1750. logit_scale_text = self.logit_scale_t.exp()
  1751. logit_scale_audio = self.logit_scale_a.exp()
  1752. logits_per_text = torch.matmul(text_embeds, audio_embeds.t()) * logit_scale_text
  1753. logits_per_audio = torch.matmul(audio_embeds, text_embeds.t()) * logit_scale_audio
  1754. loss = None
  1755. if return_loss:
  1756. caption_loss = contrastive_loss(logits_per_text)
  1757. audio_loss = contrastive_loss(logits_per_audio.t())
  1758. loss = (caption_loss + audio_loss) / 2.0
  1759. if not return_dict:
  1760. output = (logits_per_audio, logits_per_text, text_embeds, audio_embeds, text_outputs, audio_outputs)
  1761. return ((loss,) + output) if loss is not None else output
  1762. return ClapOutput(
  1763. loss=loss,
  1764. logits_per_audio=logits_per_audio,
  1765. logits_per_text=logits_per_text,
  1766. text_embeds=text_embeds,
  1767. audio_embeds=audio_embeds,
  1768. text_model_output=text_outputs,
  1769. audio_model_output=audio_outputs,
  1770. )
  1771. @add_start_docstrings(
  1772. """
  1773. CLAP Text Model with a projection layer on top (a linear layer on top of the pooled output).
  1774. """,
  1775. CLAP_START_DOCSTRING,
  1776. )
  1777. class ClapTextModelWithProjection(ClapPreTrainedModel):
  1778. config_class = ClapTextConfig
  1779. def __init__(self, config: ClapTextConfig):
  1780. super().__init__(config)
  1781. self.text_model = ClapTextModel(config)
  1782. self.text_projection = ClapProjectionLayer(config)
  1783. # Initialize weights and apply final processing
  1784. self.post_init()
  1785. def get_input_embeddings(self) -> nn.Module:
  1786. return self.text_model.embeddings.word_embeddings
  1787. def set_input_embeddings(self, value):
  1788. self.text_model.embeddings.word_embeddings = value
  1789. @add_start_docstrings_to_model_forward(CLAP_TEXT_INPUTS_DOCSTRING)
  1790. @replace_return_docstrings(output_type=ClapTextModelOutput, config_class=ClapTextConfig)
  1791. def forward(
  1792. self,
  1793. input_ids: Optional[torch.Tensor] = None,
  1794. attention_mask: Optional[torch.Tensor] = None,
  1795. position_ids: Optional[torch.Tensor] = None,
  1796. output_attentions: Optional[bool] = None,
  1797. output_hidden_states: Optional[bool] = None,
  1798. return_dict: Optional[bool] = None,
  1799. ) -> Union[Tuple, ClapTextModelOutput]:
  1800. r"""
  1801. Returns:
  1802. Examples:
  1803. ```python
  1804. >>> from transformers import AutoTokenizer, ClapTextModelWithProjection
  1805. >>> model = ClapTextModelWithProjection.from_pretrained("laion/clap-htsat-unfused")
  1806. >>> tokenizer = AutoTokenizer.from_pretrained("laion/clap-htsat-unfused")
  1807. >>> inputs = tokenizer(["a sound of a cat", "a sound of a dog"], padding=True, return_tensors="pt")
  1808. >>> outputs = model(**inputs)
  1809. >>> text_embeds = outputs.text_embeds
  1810. ```"""
  1811. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  1812. text_outputs = self.text_model(
  1813. input_ids=input_ids,
  1814. attention_mask=attention_mask,
  1815. position_ids=position_ids,
  1816. output_attentions=output_attentions,
  1817. output_hidden_states=output_hidden_states,
  1818. return_dict=return_dict,
  1819. )
  1820. pooled_output = text_outputs[1] if not return_dict else text_outputs.pooler_output
  1821. text_embeds = self.text_projection(pooled_output)
  1822. if not return_dict:
  1823. outputs = (text_embeds, text_outputs[0]) + text_outputs[2:]
  1824. return tuple(output for output in outputs if output is not None)
  1825. return ClapTextModelOutput(
  1826. text_embeds=text_embeds,
  1827. last_hidden_state=text_outputs.last_hidden_state,
  1828. hidden_states=text_outputs.hidden_states,
  1829. attentions=text_outputs.attentions,
  1830. )
  1831. @add_start_docstrings(
  1832. """
  1833. CLAP Audio Model with a projection layer on top (a linear layer on top of the pooled output).
  1834. """,
  1835. CLAP_START_DOCSTRING,
  1836. )
  1837. class ClapAudioModelWithProjection(ClapPreTrainedModel):
  1838. config_class = ClapAudioConfig
  1839. main_input_name = "input_features"
  1840. def __init__(self, config: ClapAudioConfig):
  1841. super().__init__(config)
  1842. self.audio_model = ClapAudioModel(config)
  1843. self.audio_projection = ClapProjectionLayer(config)
  1844. # Initialize weights and apply final processing
  1845. self.post_init()
  1846. def get_input_embeddings(self) -> nn.Module:
  1847. return self.audio_model.audio_encoder.patch_embed.proj
  1848. @add_start_docstrings_to_model_forward(CLAP_AUDIO_INPUTS_DOCSTRING)
  1849. @replace_return_docstrings(output_type=ClapAudioModelOutput, config_class=ClapAudioConfig)
  1850. def forward(
  1851. self,
  1852. input_features: Optional[torch.FloatTensor] = None,
  1853. is_longer: Optional[torch.BoolTensor] = None,
  1854. output_attentions: Optional[bool] = None,
  1855. output_hidden_states: Optional[bool] = None,
  1856. return_dict: Optional[bool] = None,
  1857. ) -> Union[Tuple, ClapAudioModelOutput]:
  1858. r"""
  1859. Returns:
  1860. Examples:
  1861. ```python
  1862. >>> from datasets import load_dataset
  1863. >>> from transformers import ClapAudioModelWithProjection, ClapProcessor
  1864. >>> model = ClapAudioModelWithProjection.from_pretrained("laion/clap-htsat-fused")
  1865. >>> processor = ClapProcessor.from_pretrained("laion/clap-htsat-fused")
  1866. >>> dataset = load_dataset("hf-internal-testing/ashraq-esc50-1-dog-example")
  1867. >>> audio_sample = dataset["train"]["audio"][0]["array"]
  1868. >>> inputs = processor(audios=audio_sample, return_tensors="pt")
  1869. >>> outputs = model(**inputs)
  1870. >>> audio_embeds = outputs.audio_embeds
  1871. ```"""
  1872. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  1873. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
  1874. output_hidden_states = (
  1875. output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
  1876. )
  1877. audio_outputs = self.audio_model(
  1878. input_features=input_features,
  1879. is_longer=is_longer,
  1880. output_attentions=output_attentions,
  1881. output_hidden_states=output_hidden_states,
  1882. return_dict=return_dict,
  1883. )
  1884. pooled_output = audio_outputs[1] if not return_dict else audio_outputs.pooler_output
  1885. audio_embeds = self.audio_projection(pooled_output)
  1886. if not return_dict:
  1887. outputs = (audio_embeds, audio_outputs[0]) + audio_outputs[2:]
  1888. return tuple(output for output in outputs if output is not None)
  1889. return ClapAudioModelOutput(
  1890. audio_embeds=audio_embeds,
  1891. last_hidden_state=audio_outputs.last_hidden_state,
  1892. attentions=audio_outputs.attentions,
  1893. hidden_states=audio_outputs.hidden_states,
  1894. )